aboutsummaryrefslogtreecommitdiffstats
path: root/arch/ppc64
diff options
context:
space:
mode:
Diffstat (limited to 'arch/ppc64')
-rw-r--r--arch/ppc64/Kconfig33
-rw-r--r--arch/ppc64/Makefile18
-rw-r--r--arch/ppc64/boot/Makefile67
-rw-r--r--arch/ppc64/boot/crt0.S53
-rw-r--r--arch/ppc64/boot/install.sh2
-rw-r--r--arch/ppc64/boot/main.c268
-rw-r--r--arch/ppc64/boot/string.S4
-rw-r--r--arch/ppc64/boot/string.h1
-rw-r--r--arch/ppc64/boot/zImage.lds64
-rw-r--r--arch/ppc64/boot/zlib.c2195
-rw-r--r--arch/ppc64/boot/zlib.h432
-rw-r--r--arch/ppc64/defconfig4
-rw-r--r--arch/ppc64/kernel/HvCall.c36
-rw-r--r--arch/ppc64/kernel/HvLpConfig.c27
-rw-r--r--arch/ppc64/kernel/HvLpEvent.c88
-rw-r--r--arch/ppc64/kernel/ItLpQueue.c262
-rw-r--r--arch/ppc64/kernel/LparData.c227
-rw-r--r--arch/ppc64/kernel/Makefile75
-rw-r--r--arch/ppc64/kernel/align.c4
-rw-r--r--arch/ppc64/kernel/asm-offsets.c3
-rw-r--r--arch/ppc64/kernel/binfmt_elf32.c78
-rw-r--r--arch/ppc64/kernel/bpa_iommu.c2
-rw-r--r--arch/ppc64/kernel/bpa_setup.c7
-rw-r--r--arch/ppc64/kernel/btext.c42
-rw-r--r--arch/ppc64/kernel/cputable.c308
-rw-r--r--arch/ppc64/kernel/eeh.c2
-rw-r--r--arch/ppc64/kernel/entry.S845
-rw-r--r--arch/ppc64/kernel/head.S290
-rw-r--r--arch/ppc64/kernel/hvCall.S98
-rw-r--r--arch/ppc64/kernel/hvcserver.c2
-rw-r--r--arch/ppc64/kernel/i8259.c177
-rw-r--r--arch/ppc64/kernel/i8259.h17
-rw-r--r--arch/ppc64/kernel/iSeries_VpdInfo.c268
-rw-r--r--arch/ppc64/kernel/iSeries_htab.c236
-rw-r--r--arch/ppc64/kernel/iSeries_iommu.c176
-rw-r--r--arch/ppc64/kernel/iSeries_irq.c353
-rw-r--r--arch/ppc64/kernel/iSeries_pci.c905
-rw-r--r--arch/ppc64/kernel/iSeries_proc.c113
-rw-r--r--arch/ppc64/kernel/iSeries_setup.c977
-rw-r--r--arch/ppc64/kernel/iSeries_setup.h26
-rw-r--r--arch/ppc64/kernel/iSeries_smp.c149
-rw-r--r--arch/ppc64/kernel/iSeries_vio.c155
-rw-r--r--arch/ppc64/kernel/idle.c8
-rw-r--r--arch/ppc64/kernel/idle_power4.S79
-rw-r--r--arch/ppc64/kernel/init_task.c36
-rw-r--r--arch/ppc64/kernel/ioctl32.c4
-rw-r--r--arch/ppc64/kernel/kprobes.c1
-rw-r--r--arch/ppc64/kernel/lmb.c299
-rw-r--r--arch/ppc64/kernel/lparmap.c31
-rw-r--r--arch/ppc64/kernel/maple_pci.c521
-rw-r--r--arch/ppc64/kernel/maple_setup.c300
-rw-r--r--arch/ppc64/kernel/maple_time.c175
-rw-r--r--arch/ppc64/kernel/mf.c1281
-rw-r--r--arch/ppc64/kernel/misc.S662
-rw-r--r--arch/ppc64/kernel/mpic.c888
-rw-r--r--arch/ppc64/kernel/mpic.h273
-rw-r--r--arch/ppc64/kernel/of_device.c274
-rw-r--r--arch/ppc64/kernel/pSeries_hvCall.S131
-rw-r--r--arch/ppc64/kernel/pSeries_iommu.c590
-rw-r--r--arch/ppc64/kernel/pSeries_lpar.c518
-rw-r--r--arch/ppc64/kernel/pSeries_nvram.c148
-rw-r--r--arch/ppc64/kernel/pSeries_pci.c143
-rw-r--r--arch/ppc64/kernel/pSeries_reconfig.c426
-rw-r--r--arch/ppc64/kernel/pSeries_setup.c622
-rw-r--r--arch/ppc64/kernel/pSeries_smp.c517
-rw-r--r--arch/ppc64/kernel/pSeries_vio.c273
-rw-r--r--arch/ppc64/kernel/pci.c46
-rw-r--r--arch/ppc64/kernel/pci.h54
-rw-r--r--arch/ppc64/kernel/pci_direct_iommu.c3
-rw-r--r--arch/ppc64/kernel/pci_dn.c3
-rw-r--r--arch/ppc64/kernel/pci_iommu.c21
-rw-r--r--arch/ppc64/kernel/pmac.h31
-rw-r--r--arch/ppc64/kernel/pmac_feature.c767
-rw-r--r--arch/ppc64/kernel/pmac_low_i2c.c523
-rw-r--r--arch/ppc64/kernel/pmac_nvram.c495
-rw-r--r--arch/ppc64/kernel/pmac_pci.c793
-rw-r--r--arch/ppc64/kernel/pmac_setup.c525
-rw-r--r--arch/ppc64/kernel/pmac_smp.c330
-rw-r--r--arch/ppc64/kernel/pmac_time.c195
-rw-r--r--arch/ppc64/kernel/pmc.c88
-rw-r--r--arch/ppc64/kernel/ppc_ksyms.c20
-rw-r--r--arch/ppc64/kernel/process.c713
-rw-r--r--arch/ppc64/kernel/prom.c7
-rw-r--r--arch/ppc64/kernel/prom_init.c1
-rw-r--r--arch/ppc64/kernel/ptrace.c363
-rw-r--r--arch/ppc64/kernel/ptrace32.c449
-rw-r--r--arch/ppc64/kernel/ras.c353
-rw-r--r--arch/ppc64/kernel/rtas-proc.c1
-rw-r--r--arch/ppc64/kernel/rtas.c774
-rw-r--r--arch/ppc64/kernel/rtas_pci.c9
-rw-r--r--arch/ppc64/kernel/rtc.c48
-rw-r--r--arch/ppc64/kernel/setup.c1316
-rw-r--r--arch/ppc64/kernel/signal.c2
-rw-r--r--arch/ppc64/kernel/signal32.c998
-rw-r--r--arch/ppc64/kernel/smp.c40
-rw-r--r--arch/ppc64/kernel/sys_ppc32.c1222
-rw-r--r--arch/ppc64/kernel/syscalls.c263
-rw-r--r--arch/ppc64/kernel/time.c881
-rw-r--r--arch/ppc64/kernel/traps.c568
-rw-r--r--arch/ppc64/kernel/u3_iommu.c349
-rw-r--r--arch/ppc64/kernel/vdso64/sigtramp.S1
-rw-r--r--arch/ppc64/kernel/vecemu.c346
-rw-r--r--arch/ppc64/kernel/vector.S172
-rw-r--r--arch/ppc64/kernel/vio.c261
-rw-r--r--arch/ppc64/kernel/viopath.c673
-rw-r--r--arch/ppc64/kernel/vmlinux.lds.S17
-rw-r--r--arch/ppc64/kernel/xics.c747
-rw-r--r--arch/ppc64/lib/Makefile15
-rw-r--r--arch/ppc64/lib/checksum.S229
-rw-r--r--arch/ppc64/lib/copypage.S121
-rw-r--r--arch/ppc64/lib/copyuser.S576
-rw-r--r--arch/ppc64/lib/e2a.c108
-rw-r--r--arch/ppc64/lib/locks.c95
-rw-r--r--arch/ppc64/lib/memcpy.S172
-rw-r--r--arch/ppc64/lib/sstep.c141
-rw-r--r--arch/ppc64/lib/strcase.c31
-rw-r--r--arch/ppc64/lib/string.S106
-rw-r--r--arch/ppc64/lib/usercopy.c41
-rw-r--r--arch/ppc64/mm/Makefile11
-rw-r--r--arch/ppc64/mm/fault.c333
-rw-r--r--arch/ppc64/mm/hash_low.S288
-rw-r--r--arch/ppc64/mm/hash_native.c453
-rw-r--r--arch/ppc64/mm/hash_utils.c438
-rw-r--r--arch/ppc64/mm/hugetlbpage.c745
-rw-r--r--arch/ppc64/mm/imalloc.c312
-rw-r--r--arch/ppc64/mm/init.c950
-rw-r--r--arch/ppc64/mm/mmap.c86
-rw-r--r--arch/ppc64/mm/numa.c779
-rw-r--r--arch/ppc64/mm/slb.c158
-rw-r--r--arch/ppc64/mm/slb_low.S151
-rw-r--r--arch/ppc64/mm/stab.c279
-rw-r--r--arch/ppc64/mm/tlb.c197
-rw-r--r--arch/ppc64/oprofile/Kconfig23
-rw-r--r--arch/ppc64/oprofile/Makefile9
-rw-r--r--arch/ppc64/oprofile/common.c145
-rw-r--r--arch/ppc64/oprofile/op_model_power4.c309
-rw-r--r--arch/ppc64/oprofile/op_model_rs64.c218
-rw-r--r--arch/ppc64/xmon/Makefile5
-rw-r--r--arch/ppc64/xmon/ansidecl.h141
-rw-r--r--arch/ppc64/xmon/nonstdio.h22
-rw-r--r--arch/ppc64/xmon/ppc-dis.c184
-rw-r--r--arch/ppc64/xmon/ppc-opc.c4621
-rw-r--r--arch/ppc64/xmon/ppc.h307
-rw-r--r--arch/ppc64/xmon/setjmp.S73
-rw-r--r--arch/ppc64/xmon/start.c187
-rw-r--r--arch/ppc64/xmon/subr_prf.c55
-rw-r--r--arch/ppc64/xmon/xmon.c2514
147 files changed, 517 insertions, 46868 deletions
diff --git a/arch/ppc64/Kconfig b/arch/ppc64/Kconfig
index c658650af429..42677cc96508 100644
--- a/arch/ppc64/Kconfig
+++ b/arch/ppc64/Kconfig
@@ -10,6 +10,9 @@ config MMU
10 bool 10 bool
11 default y 11 default y
12 12
13config PPC_STD_MMU
14 def_bool y
15
13config UID16 16config UID16
14 bool 17 bool
15 18
@@ -120,6 +123,11 @@ config MPIC
120 bool 123 bool
121 default y 124 default y
122 125
126config PPC_I8259
127 depends on PPC_PSERIES
128 bool
129 default y
130
123config BPA_IIC 131config BPA_IIC
124 depends on PPC_BPA 132 depends on PPC_BPA
125 bool 133 bool
@@ -186,6 +194,12 @@ config BOOTX_TEXT
186 Say Y here to see progress messages from the boot firmware in text 194 Say Y here to see progress messages from the boot firmware in text
187 mode. Requires an Open Firmware compatible video card. 195 mode. Requires an Open Firmware compatible video card.
188 196
197config POWER4
198 def_bool y
199
200config PPC_FPU
201 def_bool y
202
189config POWER4_ONLY 203config POWER4_ONLY
190 bool "Optimize for POWER4" 204 bool "Optimize for POWER4"
191 default n 205 default n
@@ -234,6 +248,10 @@ config HMT
234 This option enables hardware multithreading on RS64 cpus. 248 This option enables hardware multithreading on RS64 cpus.
235 pSeries systems p620 and p660 have such a cpu type. 249 pSeries systems p620 and p660 have such a cpu type.
236 250
251config NUMA
252 bool "NUMA support"
253 default y if SMP && PPC_PSERIES
254
237config ARCH_SELECT_MEMORY_MODEL 255config ARCH_SELECT_MEMORY_MODEL
238 def_bool y 256 def_bool y
239 257
@@ -249,9 +267,6 @@ config ARCH_DISCONTIGMEM_DEFAULT
249 def_bool y 267 def_bool y
250 depends on ARCH_DISCONTIGMEM_ENABLE 268 depends on ARCH_DISCONTIGMEM_ENABLE
251 269
252config ARCH_FLATMEM_ENABLE
253 def_bool y
254
255config ARCH_SPARSEMEM_ENABLE 270config ARCH_SPARSEMEM_ENABLE
256 def_bool y 271 def_bool y
257 depends on ARCH_DISCONTIGMEM_ENABLE 272 depends on ARCH_DISCONTIGMEM_ENABLE
@@ -274,10 +289,6 @@ config NODES_SPAN_OTHER_NODES
274 def_bool y 289 def_bool y
275 depends on NEED_MULTIPLE_NODES 290 depends on NEED_MULTIPLE_NODES
276 291
277config NUMA
278 bool "NUMA support"
279 default y if DISCONTIGMEM || SPARSEMEM
280
281config SCHED_SMT 292config SCHED_SMT
282 bool "SMT (Hyperthreading) scheduler support" 293 bool "SMT (Hyperthreading) scheduler support"
283 depends on SMP 294 depends on SMP
@@ -307,6 +318,11 @@ config PPC_RTAS
307 depends on PPC_PSERIES || PPC_BPA 318 depends on PPC_PSERIES || PPC_BPA
308 default y 319 default y
309 320
321config RTAS_ERROR_LOGGING
322 bool
323 depends on PPC_RTAS
324 default y
325
310config RTAS_PROC 326config RTAS_PROC
311 bool "Proc interface to RTAS" 327 bool "Proc interface to RTAS"
312 depends on PPC_RTAS 328 depends on PPC_RTAS
@@ -357,7 +373,6 @@ config HOTPLUG_CPU
357 373
358config PROC_DEVICETREE 374config PROC_DEVICETREE
359 bool "Support for Open Firmware device tree in /proc" 375 bool "Support for Open Firmware device tree in /proc"
360 depends on !PPC_ISERIES
361 help 376 help
362 This option adds a device-tree directory under /proc which contains 377 This option adds a device-tree directory under /proc which contains
363 an image of the device tree that the kernel copies from Open 378 an image of the device tree that the kernel copies from Open
@@ -461,7 +476,7 @@ config VIOPATH
461 depends on VIOCONS || VIODASD || VIOCD || VIOTAPE || VETH 476 depends on VIOCONS || VIODASD || VIOCD || VIOTAPE || VETH
462 default y 477 default y
463 478
464source "arch/ppc64/oprofile/Kconfig" 479source "arch/powerpc/oprofile/Kconfig"
465 480
466source "arch/ppc64/Kconfig.debug" 481source "arch/ppc64/Kconfig.debug"
467 482
diff --git a/arch/ppc64/Makefile b/arch/ppc64/Makefile
index 521c2a5a2862..fdbd6f44adc0 100644
--- a/arch/ppc64/Makefile
+++ b/arch/ppc64/Makefile
@@ -75,17 +75,25 @@ else
75 CFLAGS += $(call cc-option,-mtune=power4) 75 CFLAGS += $(call cc-option,-mtune=power4)
76endif 76endif
77 77
78# No AltiVec instruction when building kernel
79CFLAGS += $(call cc-option, -mno-altivec)
80
78# Enable unit-at-a-time mode when possible. It shrinks the 81# Enable unit-at-a-time mode when possible. It shrinks the
79# kernel considerably. 82# kernel considerably.
80CFLAGS += $(call cc-option,-funit-at-a-time) 83CFLAGS += $(call cc-option,-funit-at-a-time)
81 84
82head-y := arch/ppc64/kernel/head.o 85head-y := arch/ppc64/kernel/head.o
86head-y += arch/powerpc/kernel/fpu.o
87head-y += arch/powerpc/kernel/entry_64.o
83 88
84libs-y += arch/ppc64/lib/ 89libs-y += arch/ppc64/lib/
85core-y += arch/ppc64/kernel/ 90core-y += arch/ppc64/kernel/ arch/powerpc/kernel/
86core-y += arch/ppc64/mm/ 91core-y += arch/powerpc/mm/
87core-$(CONFIG_XMON) += arch/ppc64/xmon/ 92core-y += arch/powerpc/sysdev/
88drivers-$(CONFIG_OPROFILE) += arch/ppc64/oprofile/ 93core-y += arch/powerpc/platforms/
94core-y += arch/powerpc/lib/
95core-$(CONFIG_XMON) += arch/powerpc/xmon/
96drivers-$(CONFIG_OPROFILE) += arch/powerpc/oprofile/
89 97
90boot := arch/ppc64/boot 98boot := arch/ppc64/boot
91 99
@@ -100,7 +108,7 @@ $(boottargets-y): vmlinux
100bootimage-$(CONFIG_PPC_PSERIES) := $(boot)/zImage 108bootimage-$(CONFIG_PPC_PSERIES) := $(boot)/zImage
101bootimage-$(CONFIG_PPC_PMAC) := vmlinux 109bootimage-$(CONFIG_PPC_PMAC) := vmlinux
102bootimage-$(CONFIG_PPC_MAPLE) := $(boot)/zImage 110bootimage-$(CONFIG_PPC_MAPLE) := $(boot)/zImage
103bootimage-$(CONFIG_PPC_BPA) := zImage 111bootimage-$(CONFIG_PPC_BPA) := $(boot)/zImage
104bootimage-$(CONFIG_PPC_ISERIES) := vmlinux 112bootimage-$(CONFIG_PPC_ISERIES) := vmlinux
105BOOTIMAGE := $(bootimage-y) 113BOOTIMAGE := $(bootimage-y)
106install: vmlinux 114install: vmlinux
diff --git a/arch/ppc64/boot/Makefile b/arch/ppc64/boot/Makefile
index 33fdc8710891..301bc1536c49 100644
--- a/arch/ppc64/boot/Makefile
+++ b/arch/ppc64/boot/Makefile
@@ -22,15 +22,46 @@
22 22
23 23
24HOSTCC := gcc 24HOSTCC := gcc
25BOOTCFLAGS := $(HOSTCFLAGS) -fno-builtin -nostdinc -isystem $(shell $(CROSS32CC) -print-file-name=include) 25BOOTCFLAGS := $(HOSTCFLAGS) -fno-builtin -nostdinc -isystem $(shell $(CROSS32CC) -print-file-name=include) -fPIC
26BOOTAFLAGS := -D__ASSEMBLY__ $(BOOTCFLAGS) -traditional -nostdinc 26BOOTAFLAGS := -D__ASSEMBLY__ $(BOOTCFLAGS) -traditional -nostdinc
27BOOTLFLAGS := -Ttext 0x00400000 -e _start -T $(srctree)/$(src)/zImage.lds 27BOOTLFLAGS := -T $(srctree)/$(src)/zImage.lds
28OBJCOPYFLAGS := contents,alloc,load,readonly,data 28OBJCOPYFLAGS := contents,alloc,load,readonly,data
29 29
30src-boot := crt0.S string.S prom.c main.c zlib.c imagesize.c div64.S 30zlib := infblock.c infcodes.c inffast.c inflate.c inftrees.c infutil.c
31zlibheader := infblock.h infcodes.h inffast.h inftrees.h infutil.h
32zliblinuxheader := zlib.h zconf.h zutil.h
33
34$(addprefix $(obj)/,$(zlib) main.o): $(addprefix $(obj)/,$(zliblinuxheader)) $(addprefix $(obj)/,$(zlibheader))
35#$(addprefix $(obj)/,main.o): $(addprefix $(obj)/,zlib.h)
36
37src-boot := string.S prom.c main.c div64.S crt0.S
38src-boot += $(zlib)
31src-boot := $(addprefix $(obj)/, $(src-boot)) 39src-boot := $(addprefix $(obj)/, $(src-boot))
32obj-boot := $(addsuffix .o, $(basename $(src-boot))) 40obj-boot := $(addsuffix .o, $(basename $(src-boot)))
33 41
42BOOTCFLAGS += -I$(obj) -I$(srctree)/$(obj)
43
44quiet_cmd_copy_zlib = COPY $@
45 cmd_copy_zlib = sed "s@__attribute_used__@@;s@<linux/\([^>]\+\).*@\"\1\"@" $< > $@
46
47quiet_cmd_copy_zlibheader = COPY $@
48 cmd_copy_zlibheader = sed "s@<linux/\([^>]\+\).*@\"\1\"@" $< > $@
49# stddef.h for NULL
50quiet_cmd_copy_zliblinuxheader = COPY $@
51 cmd_copy_zliblinuxheader = sed "s@<linux/string.h>@\"string.h\"@;s@<linux/kernel.h>@<stddef.h>@;s@<linux/\([^>]\+\).*@\"\1\"@" $< > $@
52
53$(addprefix $(obj)/,$(zlib)): $(obj)/%: $(srctree)/lib/zlib_inflate/%
54 $(call cmd,copy_zlib)
55
56$(addprefix $(obj)/,$(zlibheader)): $(obj)/%: $(srctree)/lib/zlib_inflate/%
57 $(call cmd,copy_zlibheader)
58
59$(addprefix $(obj)/,$(zliblinuxheader)): $(obj)/%: $(srctree)/include/linux/%
60 $(call cmd,copy_zliblinuxheader)
61
62clean-files := $(zlib) $(zlibheader) $(zliblinuxheader)
63
64
34quiet_cmd_bootcc = BOOTCC $@ 65quiet_cmd_bootcc = BOOTCC $@
35 cmd_bootcc = $(CROSS32CC) -Wp,-MD,$(depfile) $(BOOTCFLAGS) -c -o $@ $< 66 cmd_bootcc = $(CROSS32CC) -Wp,-MD,$(depfile) $(BOOTCFLAGS) -c -o $@ $<
36 67
@@ -56,7 +87,7 @@ src-sec = $(foreach section, $(1), $(patsubst %,$(obj)/kernel-%.c, $(section)))
56gz-sec = $(foreach section, $(1), $(patsubst %,$(obj)/kernel-%.gz, $(section))) 87gz-sec = $(foreach section, $(1), $(patsubst %,$(obj)/kernel-%.gz, $(section)))
57 88
58hostprogs-y := addnote addRamDisk 89hostprogs-y := addnote addRamDisk
59targets += zImage.vmode zImage.initrd.vmode zImage zImage.initrd imagesize.c \ 90targets += zImage.vmode zImage.initrd.vmode zImage zImage.initrd \
60 $(patsubst $(obj)/%,%, $(call obj-sec, $(required) $(initrd))) \ 91 $(patsubst $(obj)/%,%, $(call obj-sec, $(required) $(initrd))) \
61 $(patsubst $(obj)/%,%, $(call src-sec, $(required) $(initrd))) \ 92 $(patsubst $(obj)/%,%, $(call src-sec, $(required) $(initrd))) \
62 $(patsubst $(obj)/%,%, $(call gz-sec, $(required) $(initrd))) \ 93 $(patsubst $(obj)/%,%, $(call gz-sec, $(required) $(initrd))) \
@@ -69,9 +100,9 @@ quiet_cmd_ramdisk = RAMDISK $@
69quiet_cmd_stripvm = STRIP $@ 100quiet_cmd_stripvm = STRIP $@
70 cmd_stripvm = $(STRIP) -s $< -o $@ 101 cmd_stripvm = $(STRIP) -s $< -o $@
71 102
72vmlinux.strip: vmlinux FORCE 103vmlinux.strip: vmlinux
73 $(call if_changed,stripvm) 104 $(call if_changed,stripvm)
74$(obj)/vmlinux.initrd: vmlinux.strip $(obj)/addRamDisk $(obj)/ramdisk.image.gz FORCE 105$(obj)/vmlinux.initrd: vmlinux.strip $(obj)/addRamDisk $(obj)/ramdisk.image.gz
75 $(call if_changed,ramdisk) 106 $(call if_changed,ramdisk)
76 107
77quiet_cmd_addsection = ADDSEC $@ 108quiet_cmd_addsection = ADDSEC $@
@@ -79,48 +110,38 @@ quiet_cmd_addsection = ADDSEC $@
79 --add-section=.kernel:$(strip $(patsubst $(obj)/kernel-%.o,%, $@))=$(patsubst %.o,%.gz, $@) \ 110 --add-section=.kernel:$(strip $(patsubst $(obj)/kernel-%.o,%, $@))=$(patsubst %.o,%.gz, $@) \
80 --set-section-flags=.kernel:$(strip $(patsubst $(obj)/kernel-%.o,%, $@))=$(OBJCOPYFLAGS) 111 --set-section-flags=.kernel:$(strip $(patsubst $(obj)/kernel-%.o,%, $@))=$(OBJCOPYFLAGS)
81 112
82quiet_cmd_imagesize = GENSIZE $@
83 cmd_imagesize = ls -l vmlinux.strip | \
84 awk '{printf "/* generated -- do not edit! */\n" "unsigned long vmlinux_filesize = %d;\n", $$5}' \
85 > $(obj)/imagesize.c && \
86 $(CROSS_COMPILE)nm -n vmlinux | tail -n 1 | \
87 awk '{printf "unsigned long vmlinux_memsize = 0x%s;\n", substr($$1,8)}' >> $(obj)/imagesize.c
88
89quiet_cmd_addnote = ADDNOTE $@ 113quiet_cmd_addnote = ADDNOTE $@
90 cmd_addnote = $(obj)/addnote $@ 114 cmd_addnote = $(obj)/addnote $@
91 115
92$(call gz-sec, $(required)): $(obj)/kernel-%.gz: % FORCE 116$(call gz-sec, $(required)): $(obj)/kernel-%.gz: %
93 $(call if_changed,gzip) 117 $(call if_changed,gzip)
94 118
95$(obj)/kernel-initrd.gz: $(obj)/ramdisk.image.gz 119$(obj)/kernel-initrd.gz: $(obj)/ramdisk.image.gz
96 cp -f $(obj)/ramdisk.image.gz $@ 120 cp -f $(obj)/ramdisk.image.gz $@
97 121
98$(call src-sec, $(required) $(initrd)): $(obj)/kernel-%.c: $(obj)/kernel-%.gz FORCE 122$(call src-sec, $(required) $(initrd)): $(obj)/kernel-%.c: $(obj)/kernel-%.gz
99 @touch $@ 123 @touch $@
100 124
101$(call obj-sec, $(required) $(initrd)): $(obj)/kernel-%.o: $(obj)/kernel-%.c FORCE 125$(call obj-sec, $(required) $(initrd)): $(obj)/kernel-%.o: $(obj)/kernel-%.c
102 $(call if_changed_dep,bootcc) 126 $(call if_changed_dep,bootcc)
103 $(call cmd,addsection) 127 $(call cmd,addsection)
104 128
105$(obj)/zImage.vmode: obj-boot += $(call obj-sec, $(required)) 129$(obj)/zImage.vmode: obj-boot += $(call obj-sec, $(required))
106$(obj)/zImage.vmode: $(call obj-sec, $(required)) $(obj-boot) FORCE 130$(obj)/zImage.vmode: $(call obj-sec, $(required)) $(obj-boot) $(srctree)/$(src)/zImage.lds
107 $(call cmd,bootld,$(obj-boot)) 131 $(call cmd,bootld,$(obj-boot))
108 132
109$(obj)/zImage.initrd.vmode: obj-boot += $(call obj-sec, $(required) $(initrd)) 133$(obj)/zImage.initrd.vmode: obj-boot += $(call obj-sec, $(required) $(initrd))
110$(obj)/zImage.initrd.vmode: $(call obj-sec, $(required) $(initrd)) $(obj-boot) FORCE 134$(obj)/zImage.initrd.vmode: $(call obj-sec, $(required) $(initrd)) $(obj-boot) $(srctree)/$(src)/zImage.lds
111 $(call cmd,bootld,$(obj-boot)) 135 $(call cmd,bootld,$(obj-boot))
112 136
113$(obj)/zImage: $(obj)/zImage.vmode $(obj)/addnote FORCE 137$(obj)/zImage: $(obj)/zImage.vmode $(obj)/addnote
114 @cp -f $< $@ 138 @cp -f $< $@
115 $(call if_changed,addnote) 139 $(call if_changed,addnote)
116 140
117$(obj)/zImage.initrd: $(obj)/zImage.initrd.vmode $(obj)/addnote FORCE 141$(obj)/zImage.initrd: $(obj)/zImage.initrd.vmode $(obj)/addnote
118 @cp -f $< $@ 142 @cp -f $< $@
119 $(call if_changed,addnote) 143 $(call if_changed,addnote)
120 144
121$(obj)/imagesize.c: vmlinux.strip
122 $(call cmd,imagesize)
123
124install: $(CONFIGURE) $(BOOTIMAGE) 145install: $(CONFIGURE) $(BOOTIMAGE)
125 sh -x $(srctree)/$(src)/install.sh "$(KERNELRELEASE)" vmlinux System.map "$(INSTALL_PATH)" "$(BOOTIMAGE)" 146 sh -x $(srctree)/$(src)/install.sh "$(KERNELRELEASE)" vmlinux System.map "$(INSTALL_PATH)" "$(BOOTIMAGE)"
126 147
diff --git a/arch/ppc64/boot/crt0.S b/arch/ppc64/boot/crt0.S
index 3861e7f9cf19..9cc442263939 100644
--- a/arch/ppc64/boot/crt0.S
+++ b/arch/ppc64/boot/crt0.S
@@ -12,11 +12,40 @@
12#include "ppc_asm.h" 12#include "ppc_asm.h"
13 13
14 .text 14 .text
15 .globl _start 15 .globl _zimage_start
16_start: 16_zimage_start:
17 bl reloc_offset
18
19reloc_offset:
20 mflr r0
21 lis r9,reloc_offset@ha
22 addi r9,r9,reloc_offset@l
23 subf. r0,r9,r0
24 beq clear_caches
25
26reloc_got2:
27 lis r9,__got2_start@ha
28 addi r9,r9,__got2_start@l
29 lis r8,__got2_end@ha
30 addi r8,r8,__got2_end@l
31 subf. r8,r9,r8
32 beq clear_caches
33 srwi. r8,r8,2
34 mtctr r8
35 add r9,r0,r9
36reloc_got2_loop:
37 lwz r8,0(r9)
38 add r8,r8,r0
39 stw r8,0(r9)
40 addi r9,r9,4
41 bdnz reloc_got2_loop
42
43clear_caches:
17 lis r9,_start@h 44 lis r9,_start@h
45 add r9,r0,r9
18 lis r8,_etext@ha 46 lis r8,_etext@ha
19 addi r8,r8,_etext@l 47 addi r8,r8,_etext@l
48 add r8,r0,r8
201: dcbf r0,r9 491: dcbf r0,r9
21 icbi r0,r9 50 icbi r0,r9
22 addi r9,r9,0x20 51 addi r9,r9,0x20
@@ -25,24 +54,6 @@ _start:
25 sync 54 sync
26 isync 55 isync
27 56
28 ## Clear out the BSS as per ANSI C requirements 57 mr r6,r1
29
30 lis r7,_end@ha
31 addi r7,r7,_end@l # r7 = &_end
32 lis r8,__bss_start@ha #
33 addi r8,r8,__bss_start@l # r8 = &_bss_start
34
35 ## Determine how large an area, in number of words, to clear
36
37 subf r7,r8,r7 # r7 = &_end - &_bss_start + 1
38 addi r7,r7,3 # r7 += 3
39 srwi. r7,r7,2 # r7 = size in words.
40 beq 3f # If the size is zero, don't bother
41 addi r8,r8,-4 # r8 -= 4
42 mtctr r7 # SPRN_CTR = number of words to clear
43 li r0,0 # r0 = 0
442: stwu r0,4(r8) # Clear out a word
45 bdnz 2b # Keep clearing until done
463:
47 b start 58 b start
48 59
diff --git a/arch/ppc64/boot/install.sh b/arch/ppc64/boot/install.sh
index cb2d6626b555..eacce9590816 100644
--- a/arch/ppc64/boot/install.sh
+++ b/arch/ppc64/boot/install.sh
@@ -28,7 +28,7 @@ if [ -x /sbin/${CROSS_COMPILE}installkernel ]; then exec /sbin/${CROSS_COMPILE}i
28# Default install 28# Default install
29 29
30# this should work for both the pSeries zImage and the iSeries vmlinux.sm 30# this should work for both the pSeries zImage and the iSeries vmlinux.sm
31image_name=`basename $5` 31image_name=`basename $2`
32 32
33if [ -f $4/$image_name ]; then 33if [ -f $4/$image_name ]; then
34 mv $4/$image_name $4/$image_name.old 34 mv $4/$image_name $4/$image_name.old
diff --git a/arch/ppc64/boot/main.c b/arch/ppc64/boot/main.c
index f7ec19a2d0b0..c1dc876bccab 100644
--- a/arch/ppc64/boot/main.c
+++ b/arch/ppc64/boot/main.c
@@ -17,7 +17,6 @@
17#include "prom.h" 17#include "prom.h"
18#include "zlib.h" 18#include "zlib.h"
19 19
20static void gunzip(void *, int, unsigned char *, int *);
21extern void flush_cache(void *, unsigned long); 20extern void flush_cache(void *, unsigned long);
22 21
23 22
@@ -26,31 +25,26 @@ extern void flush_cache(void *, unsigned long);
26#define RAM_END (512<<20) // Fixme: use OF */ 25#define RAM_END (512<<20) // Fixme: use OF */
27#define ONE_MB 0x100000 26#define ONE_MB 0x100000
28 27
29static char *avail_ram;
30static char *begin_avail, *end_avail;
31static char *avail_high;
32static unsigned int heap_use;
33static unsigned int heap_max;
34
35extern char _start[]; 28extern char _start[];
29extern char __bss_start[];
36extern char _end[]; 30extern char _end[];
37extern char _vmlinux_start[]; 31extern char _vmlinux_start[];
38extern char _vmlinux_end[]; 32extern char _vmlinux_end[];
39extern char _initrd_start[]; 33extern char _initrd_start[];
40extern char _initrd_end[]; 34extern char _initrd_end[];
41extern unsigned long vmlinux_filesize;
42extern unsigned long vmlinux_memsize;
43 35
44struct addr_range { 36struct addr_range {
45 unsigned long addr; 37 unsigned long addr;
46 unsigned long size; 38 unsigned long size;
47 unsigned long memsize; 39 unsigned long memsize;
48}; 40};
49static struct addr_range vmlinux = {0, 0, 0}; 41static struct addr_range vmlinux;
50static struct addr_range vmlinuz = {0, 0, 0}; 42static struct addr_range vmlinuz;
51static struct addr_range initrd = {0, 0, 0}; 43static struct addr_range initrd;
44
45static char scratch[46912]; /* scratch space for gunzip, from zlib_inflate_workspacesize() */
46static char elfheader[256];
52 47
53static char scratch[128<<10]; /* 128kB of scratch space for gunzip */
54 48
55typedef void (*kernel_entry_t)( unsigned long, 49typedef void (*kernel_entry_t)( unsigned long,
56 unsigned long, 50 unsigned long,
@@ -62,6 +56,63 @@ typedef void (*kernel_entry_t)( unsigned long,
62 56
63static unsigned long claim_base; 57static unsigned long claim_base;
64 58
59#define HEAD_CRC 2
60#define EXTRA_FIELD 4
61#define ORIG_NAME 8
62#define COMMENT 0x10
63#define RESERVED 0xe0
64
65static void gunzip(void *dst, int dstlen, unsigned char *src, int *lenp)
66{
67 z_stream s;
68 int r, i, flags;
69
70 /* skip header */
71 i = 10;
72 flags = src[3];
73 if (src[2] != Z_DEFLATED || (flags & RESERVED) != 0) {
74 printf("bad gzipped data\n\r");
75 exit();
76 }
77 if ((flags & EXTRA_FIELD) != 0)
78 i = 12 + src[10] + (src[11] << 8);
79 if ((flags & ORIG_NAME) != 0)
80 while (src[i++] != 0)
81 ;
82 if ((flags & COMMENT) != 0)
83 while (src[i++] != 0)
84 ;
85 if ((flags & HEAD_CRC) != 0)
86 i += 2;
87 if (i >= *lenp) {
88 printf("gunzip: ran out of data in header\n\r");
89 exit();
90 }
91
92 if (zlib_inflate_workspacesize() > sizeof(scratch)) {
93 printf("gunzip needs more mem\n");
94 exit();
95 }
96 memset(&s, 0, sizeof(s));
97 s.workspace = scratch;
98 r = zlib_inflateInit2(&s, -MAX_WBITS);
99 if (r != Z_OK) {
100 printf("inflateInit2 returned %d\n\r", r);
101 exit();
102 }
103 s.next_in = src + i;
104 s.avail_in = *lenp - i;
105 s.next_out = dst;
106 s.avail_out = dstlen;
107 r = zlib_inflate(&s, Z_FULL_FLUSH);
108 if (r != Z_OK && r != Z_STREAM_END) {
109 printf("inflate returned %d msg: %s\n\r", r, s.msg);
110 exit();
111 }
112 *lenp = s.next_out - (unsigned char *) dst;
113 zlib_inflateEnd(&s);
114}
115
65static unsigned long try_claim(unsigned long size) 116static unsigned long try_claim(unsigned long size)
66{ 117{
67 unsigned long addr = 0; 118 unsigned long addr = 0;
@@ -80,13 +131,16 @@ static unsigned long try_claim(unsigned long size)
80 return addr; 131 return addr;
81} 132}
82 133
83void start(unsigned long a1, unsigned long a2, void *promptr) 134void start(unsigned long a1, unsigned long a2, void *promptr, void *sp)
84{ 135{
85 unsigned long i; 136 unsigned long i;
137 int len;
86 kernel_entry_t kernel_entry; 138 kernel_entry_t kernel_entry;
87 Elf64_Ehdr *elf64; 139 Elf64_Ehdr *elf64;
88 Elf64_Phdr *elf64ph; 140 Elf64_Phdr *elf64ph;
89 141
142 memset(__bss_start, 0, _end - __bss_start);
143
90 prom = (int (*)(void *)) promptr; 144 prom = (int (*)(void *)) promptr;
91 chosen_handle = finddevice("/chosen"); 145 chosen_handle = finddevice("/chosen");
92 if (chosen_handle == (void *) -1) 146 if (chosen_handle == (void *) -1)
@@ -97,7 +151,7 @@ void start(unsigned long a1, unsigned long a2, void *promptr)
97 if (getprop(chosen_handle, "stdin", &stdin, sizeof(stdin)) != 4) 151 if (getprop(chosen_handle, "stdin", &stdin, sizeof(stdin)) != 4)
98 exit(); 152 exit();
99 153
100 printf("\n\rzImage starting: loaded at 0x%lx\n\r", (unsigned long) _start); 154 printf("\n\rzImage starting: loaded at 0x%p (sp: 0x%p)\n\r", _start, sp);
101 155
102 /* 156 /*
103 * The first available claim_base must be above the end of the 157 * The first available claim_base must be above the end of the
@@ -118,25 +172,45 @@ void start(unsigned long a1, unsigned long a2, void *promptr)
118 claim_base = PROG_START; 172 claim_base = PROG_START;
119#endif 173#endif
120 174
121 /* 175 vmlinuz.addr = (unsigned long)_vmlinux_start;
122 * Now we try to claim some memory for the kernel itself 176 vmlinuz.size = (unsigned long)(_vmlinux_end - _vmlinux_start);
123 * our "vmlinux_memsize" is the memory footprint in RAM, _HOWEVER_, what 177
124 * our Makefile stuffs in is an image containing all sort of junk including 178 /* gunzip the ELF header of the kernel */
125 * an ELF header. We need to do some calculations here to find the right 179 if (*(unsigned short *)vmlinuz.addr == 0x1f8b) {
126 * size... In practice we add 1Mb, that is enough, but we should really 180 len = vmlinuz.size;
127 * consider fixing the Makefile to put a _raw_ kernel in there ! 181 gunzip(elfheader, sizeof(elfheader),
128 */ 182 (unsigned char *)vmlinuz.addr, &len);
129 vmlinux_memsize += ONE_MB; 183 } else
130 printf("Allocating 0x%lx bytes for kernel ...\n\r", vmlinux_memsize); 184 memcpy(elfheader, (const void *)vmlinuz.addr, sizeof(elfheader));
131 vmlinux.addr = try_claim(vmlinux_memsize); 185
186 elf64 = (Elf64_Ehdr *)elfheader;
187 if ( elf64->e_ident[EI_MAG0] != ELFMAG0 ||
188 elf64->e_ident[EI_MAG1] != ELFMAG1 ||
189 elf64->e_ident[EI_MAG2] != ELFMAG2 ||
190 elf64->e_ident[EI_MAG3] != ELFMAG3 ||
191 elf64->e_ident[EI_CLASS] != ELFCLASS64 ||
192 elf64->e_ident[EI_DATA] != ELFDATA2MSB ||
193 elf64->e_type != ET_EXEC ||
194 elf64->e_machine != EM_PPC64 )
195 {
196 printf("Error: not a valid PPC64 ELF file!\n\r");
197 exit();
198 }
199
200 elf64ph = (Elf64_Phdr *)((unsigned long)elf64 +
201 (unsigned long)elf64->e_phoff);
202 for(i=0; i < (unsigned int)elf64->e_phnum ;i++,elf64ph++) {
203 if (elf64ph->p_type == PT_LOAD && elf64ph->p_offset != 0)
204 break;
205 }
206 vmlinux.size = (unsigned long)elf64ph->p_filesz;
207 vmlinux.memsize = (unsigned long)elf64ph->p_memsz;
208 printf("Allocating 0x%lx bytes for kernel ...\n\r", vmlinux.memsize);
209 vmlinux.addr = try_claim(vmlinux.memsize);
132 if (vmlinux.addr == 0) { 210 if (vmlinux.addr == 0) {
133 printf("Can't allocate memory for kernel image !\n\r"); 211 printf("Can't allocate memory for kernel image !\n\r");
134 exit(); 212 exit();
135 } 213 }
136 vmlinuz.addr = (unsigned long)_vmlinux_start;
137 vmlinuz.size = (unsigned long)(_vmlinux_end - _vmlinux_start);
138 vmlinux.size = PAGE_ALIGN(vmlinux_filesize);
139 vmlinux.memsize = vmlinux_memsize;
140 214
141 /* 215 /*
142 * Now we try to claim memory for the initrd (and copy it there) 216 * Now we try to claim memory for the initrd (and copy it there)
@@ -160,49 +234,22 @@ void start(unsigned long a1, unsigned long a2, void *promptr)
160 234
161 /* Eventually gunzip the kernel */ 235 /* Eventually gunzip the kernel */
162 if (*(unsigned short *)vmlinuz.addr == 0x1f8b) { 236 if (*(unsigned short *)vmlinuz.addr == 0x1f8b) {
163 int len;
164 avail_ram = scratch;
165 begin_avail = avail_high = avail_ram;
166 end_avail = scratch + sizeof(scratch);
167 printf("gunzipping (0x%lx <- 0x%lx:0x%0lx)...", 237 printf("gunzipping (0x%lx <- 0x%lx:0x%0lx)...",
168 vmlinux.addr, vmlinuz.addr, vmlinuz.addr+vmlinuz.size); 238 vmlinux.addr, vmlinuz.addr, vmlinuz.addr+vmlinuz.size);
169 len = vmlinuz.size; 239 len = vmlinuz.size;
170 gunzip((void *)vmlinux.addr, vmlinux.size, 240 gunzip((void *)vmlinux.addr, vmlinux.memsize,
171 (unsigned char *)vmlinuz.addr, &len); 241 (unsigned char *)vmlinuz.addr, &len);
172 printf("done 0x%lx bytes\n\r", len); 242 printf("done 0x%lx bytes\n\r", len);
173 printf("0x%x bytes of heap consumed, max in use 0x%x\n\r",
174 (unsigned)(avail_high - begin_avail), heap_max);
175 } else { 243 } else {
176 memmove((void *)vmlinux.addr,(void *)vmlinuz.addr,vmlinuz.size); 244 memmove((void *)vmlinux.addr,(void *)vmlinuz.addr,vmlinuz.size);
177 } 245 }
178 246
179 /* Skip over the ELF header */ 247 /* Skip over the ELF header */
180 elf64 = (Elf64_Ehdr *)vmlinux.addr;
181 if ( elf64->e_ident[EI_MAG0] != ELFMAG0 ||
182 elf64->e_ident[EI_MAG1] != ELFMAG1 ||
183 elf64->e_ident[EI_MAG2] != ELFMAG2 ||
184 elf64->e_ident[EI_MAG3] != ELFMAG3 ||
185 elf64->e_ident[EI_CLASS] != ELFCLASS64 ||
186 elf64->e_ident[EI_DATA] != ELFDATA2MSB ||
187 elf64->e_type != ET_EXEC ||
188 elf64->e_machine != EM_PPC64 )
189 {
190 printf("Error: not a valid PPC64 ELF file!\n\r");
191 exit();
192 }
193
194 elf64ph = (Elf64_Phdr *)((unsigned long)elf64 +
195 (unsigned long)elf64->e_phoff);
196 for(i=0; i < (unsigned int)elf64->e_phnum ;i++,elf64ph++) {
197 if (elf64ph->p_type == PT_LOAD && elf64ph->p_offset != 0)
198 break;
199 }
200#ifdef DEBUG 248#ifdef DEBUG
201 printf("... skipping 0x%lx bytes of ELF header\n\r", 249 printf("... skipping 0x%lx bytes of ELF header\n\r",
202 (unsigned long)elf64ph->p_offset); 250 (unsigned long)elf64ph->p_offset);
203#endif 251#endif
204 vmlinux.addr += (unsigned long)elf64ph->p_offset; 252 vmlinux.addr += (unsigned long)elf64ph->p_offset;
205 vmlinux.size -= (unsigned long)elf64ph->p_offset;
206 253
207 flush_cache((void *)vmlinux.addr, vmlinux.size); 254 flush_cache((void *)vmlinux.addr, vmlinux.size);
208 255
@@ -225,108 +272,3 @@ void start(unsigned long a1, unsigned long a2, void *promptr)
225 exit(); 272 exit();
226} 273}
227 274
228struct memchunk {
229 unsigned int size;
230 unsigned int pad;
231 struct memchunk *next;
232};
233
234static struct memchunk *freechunks;
235
236void *zalloc(void *x, unsigned items, unsigned size)
237{
238 void *p;
239 struct memchunk **mpp, *mp;
240
241 size *= items;
242 size = _ALIGN(size, sizeof(struct memchunk));
243 heap_use += size;
244 if (heap_use > heap_max)
245 heap_max = heap_use;
246 for (mpp = &freechunks; (mp = *mpp) != 0; mpp = &mp->next) {
247 if (mp->size == size) {
248 *mpp = mp->next;
249 return mp;
250 }
251 }
252 p = avail_ram;
253 avail_ram += size;
254 if (avail_ram > avail_high)
255 avail_high = avail_ram;
256 if (avail_ram > end_avail) {
257 printf("oops... out of memory\n\r");
258 pause();
259 }
260 return p;
261}
262
263void zfree(void *x, void *addr, unsigned nb)
264{
265 struct memchunk *mp = addr;
266
267 nb = _ALIGN(nb, sizeof(struct memchunk));
268 heap_use -= nb;
269 if (avail_ram == addr + nb) {
270 avail_ram = addr;
271 return;
272 }
273 mp->size = nb;
274 mp->next = freechunks;
275 freechunks = mp;
276}
277
278#define HEAD_CRC 2
279#define EXTRA_FIELD 4
280#define ORIG_NAME 8
281#define COMMENT 0x10
282#define RESERVED 0xe0
283
284#define DEFLATED 8
285
286static void gunzip(void *dst, int dstlen, unsigned char *src, int *lenp)
287{
288 z_stream s;
289 int r, i, flags;
290
291 /* skip header */
292 i = 10;
293 flags = src[3];
294 if (src[2] != DEFLATED || (flags & RESERVED) != 0) {
295 printf("bad gzipped data\n\r");
296 exit();
297 }
298 if ((flags & EXTRA_FIELD) != 0)
299 i = 12 + src[10] + (src[11] << 8);
300 if ((flags & ORIG_NAME) != 0)
301 while (src[i++] != 0)
302 ;
303 if ((flags & COMMENT) != 0)
304 while (src[i++] != 0)
305 ;
306 if ((flags & HEAD_CRC) != 0)
307 i += 2;
308 if (i >= *lenp) {
309 printf("gunzip: ran out of data in header\n\r");
310 exit();
311 }
312
313 s.zalloc = zalloc;
314 s.zfree = zfree;
315 r = inflateInit2(&s, -MAX_WBITS);
316 if (r != Z_OK) {
317 printf("inflateInit2 returned %d\n\r", r);
318 exit();
319 }
320 s.next_in = src + i;
321 s.avail_in = *lenp - i;
322 s.next_out = dst;
323 s.avail_out = dstlen;
324 r = inflate(&s, Z_FINISH);
325 if (r != Z_OK && r != Z_STREAM_END) {
326 printf("inflate returned %d msg: %s\n\r", r, s.msg);
327 exit();
328 }
329 *lenp = s.next_out - (unsigned char *) dst;
330 inflateEnd(&s);
331}
332
diff --git a/arch/ppc64/boot/string.S b/arch/ppc64/boot/string.S
index 7ade87ae7718..b1eeaed7db17 100644
--- a/arch/ppc64/boot/string.S
+++ b/arch/ppc64/boot/string.S
@@ -104,7 +104,7 @@ memmove:
104 104
105 .globl memcpy 105 .globl memcpy
106memcpy: 106memcpy:
107 rlwinm. r7,r5,32-3,3,31 /* r0 = r5 >> 3 */ 107 rlwinm. r7,r5,32-3,3,31 /* r7 = r5 >> 3 */
108 addi r6,r3,-4 108 addi r6,r3,-4
109 addi r4,r4,-4 109 addi r4,r4,-4
110 beq 2f /* if less than 8 bytes to do */ 110 beq 2f /* if less than 8 bytes to do */
@@ -146,7 +146,7 @@ memcpy:
146 146
147 .globl backwards_memcpy 147 .globl backwards_memcpy
148backwards_memcpy: 148backwards_memcpy:
149 rlwinm. r7,r5,32-3,3,31 /* r0 = r5 >> 3 */ 149 rlwinm. r7,r5,32-3,3,31 /* r7 = r5 >> 3 */
150 add r6,r3,r5 150 add r6,r3,r5
151 add r4,r4,r5 151 add r4,r4,r5
152 beq 2f 152 beq 2f
diff --git a/arch/ppc64/boot/string.h b/arch/ppc64/boot/string.h
index 9289258bcbd6..9fdff1cc0d70 100644
--- a/arch/ppc64/boot/string.h
+++ b/arch/ppc64/boot/string.h
@@ -1,5 +1,6 @@
1#ifndef _PPC_BOOT_STRING_H_ 1#ifndef _PPC_BOOT_STRING_H_
2#define _PPC_BOOT_STRING_H_ 2#define _PPC_BOOT_STRING_H_
3#include <stddef.h>
3 4
4extern char *strcpy(char *dest, const char *src); 5extern char *strcpy(char *dest, const char *src);
5extern char *strncpy(char *dest, const char *src, size_t n); 6extern char *strncpy(char *dest, const char *src, size_t n);
diff --git a/arch/ppc64/boot/zImage.lds b/arch/ppc64/boot/zImage.lds
index 8fe5e7071f54..4b6bb3ffe3dc 100644
--- a/arch/ppc64/boot/zImage.lds
+++ b/arch/ppc64/boot/zImage.lds
@@ -1,62 +1,24 @@
1OUTPUT_ARCH(powerpc:common) 1OUTPUT_ARCH(powerpc:common)
2SEARCH_DIR(/lib); SEARCH_DIR(/usr/lib); SEARCH_DIR(/usr/local/lib); SEARCH_DIR(/usr/local/powerpc-any-elf/lib); 2ENTRY(_zimage_start)
3/* Do we need any of these for elf?
4 __DYNAMIC = 0; */
5SECTIONS 3SECTIONS
6{ 4{
7 /* Read-only sections, merged into text segment: */ 5 . = (4*1024*1024);
8 . = + SIZEOF_HEADERS; 6 _start = .;
9 .interp : { *(.interp) }
10 .hash : { *(.hash) }
11 .dynsym : { *(.dynsym) }
12 .dynstr : { *(.dynstr) }
13 .rel.text : { *(.rel.text) }
14 .rela.text : { *(.rela.text) }
15 .rel.data : { *(.rel.data) }
16 .rela.data : { *(.rela.data) }
17 .rel.rodata : { *(.rel.rodata) }
18 .rela.rodata : { *(.rela.rodata) }
19 .rel.got : { *(.rel.got) }
20 .rela.got : { *(.rela.got) }
21 .rel.ctors : { *(.rel.ctors) }
22 .rela.ctors : { *(.rela.ctors) }
23 .rel.dtors : { *(.rel.dtors) }
24 .rela.dtors : { *(.rela.dtors) }
25 .rel.bss : { *(.rel.bss) }
26 .rela.bss : { *(.rela.bss) }
27 .rel.plt : { *(.rel.plt) }
28 .rela.plt : { *(.rela.plt) }
29 .plt : { *(.plt) }
30 .text : 7 .text :
31 { 8 {
32 *(.text) 9 *(.text)
33 *(.fixup) 10 *(.fixup)
34 *(.got1)
35 } 11 }
36 . = ALIGN(4096);
37 _etext = .; 12 _etext = .;
38 PROVIDE (etext = .);
39 .rodata :
40 {
41 *(.rodata)
42 *(.rodata1)
43 }
44 .kstrtab : { *(.kstrtab) }
45 __vermagic : { *(__vermagic) }
46 .fini : { *(.fini) } =0
47 .ctors : { *(.ctors) }
48 .dtors : { *(.dtors) }
49 /* Read-write section, merged into data segment: */
50 . = ALIGN(4096); 13 . = ALIGN(4096);
51 .data : 14 .data :
52 { 15 {
53 *(.data) 16 *(.rodata*)
54 *(.data1) 17 *(.data*)
55 *(.sdata) 18 *(.sdata*)
56 *(.sdata2) 19 __got2_start = .;
57 *(.got.plt) *(.got) 20 *(.got2)
58 *(.dynamic) 21 __got2_end = .;
59 CONSTRUCTORS
60 } 22 }
61 23
62 . = ALIGN(4096); 24 . = ALIGN(4096);
@@ -71,20 +33,14 @@ SECTIONS
71 33
72 . = ALIGN(4096); 34 . = ALIGN(4096);
73 _edata = .; 35 _edata = .;
74 PROVIDE (edata = .);
75
76 .fixup : { *(.fixup) }
77 36
78 . = ALIGN(4096); 37 . = ALIGN(4096);
79 __bss_start = .; 38 __bss_start = .;
80 .bss : 39 .bss :
81 { 40 {
82 *(.sbss) *(.scommon) 41 *(.sbss)
83 *(.dynbss)
84 *(.bss) 42 *(.bss)
85 *(COMMON)
86 } 43 }
87 . = ALIGN(4096); 44 . = ALIGN(4096);
88 _end = . ; 45 _end = . ;
89 PROVIDE (end = .);
90} 46}
diff --git a/arch/ppc64/boot/zlib.c b/arch/ppc64/boot/zlib.c
deleted file mode 100644
index 0d910cd2079d..000000000000
--- a/arch/ppc64/boot/zlib.c
+++ /dev/null
@@ -1,2195 +0,0 @@
1/*
2 * This file is derived from various .h and .c files from the zlib-0.95
3 * distribution by Jean-loup Gailly and Mark Adler, with some additions
4 * by Paul Mackerras to aid in implementing Deflate compression and
5 * decompression for PPP packets. See zlib.h for conditions of
6 * distribution and use.
7 *
8 * Changes that have been made include:
9 * - changed functions not used outside this file to "local"
10 * - added minCompression parameter to deflateInit2
11 * - added Z_PACKET_FLUSH (see zlib.h for details)
12 * - added inflateIncomp
13 *
14 Copyright (C) 1995 Jean-loup Gailly and Mark Adler
15
16 This software is provided 'as-is', without any express or implied
17 warranty. In no event will the authors be held liable for any damages
18 arising from the use of this software.
19
20 Permission is granted to anyone to use this software for any purpose,
21 including commercial applications, and to alter it and redistribute it
22 freely, subject to the following restrictions:
23
24 1. The origin of this software must not be misrepresented; you must not
25 claim that you wrote the original software. If you use this software
26 in a product, an acknowledgment in the product documentation would be
27 appreciated but is not required.
28 2. Altered source versions must be plainly marked as such, and must not be
29 misrepresented as being the original software.
30 3. This notice may not be removed or altered from any source distribution.
31
32 Jean-loup Gailly Mark Adler
33 gzip@prep.ai.mit.edu madler@alumni.caltech.edu
34
35 *
36 *
37 */
38
39/*+++++*/
40/* zutil.h -- internal interface and configuration of the compression library
41 * Copyright (C) 1995 Jean-loup Gailly.
42 * For conditions of distribution and use, see copyright notice in zlib.h
43 */
44
45/* WARNING: this file should *not* be used by applications. It is
46 part of the implementation of the compression library and is
47 subject to change. Applications should only use zlib.h.
48 */
49
50/* From: zutil.h,v 1.9 1995/05/03 17:27:12 jloup Exp */
51
52#define _Z_UTIL_H
53
54#include "zlib.h"
55
56#ifndef local
57# define local static
58#endif
59/* compile with -Dlocal if your debugger can't find static symbols */
60
61#define FAR
62
63typedef unsigned char uch;
64typedef uch FAR uchf;
65typedef unsigned short ush;
66typedef ush FAR ushf;
67typedef unsigned long ulg;
68
69extern char *z_errmsg[]; /* indexed by 1-zlib_error */
70
71#define ERR_RETURN(strm,err) return (strm->msg=z_errmsg[1-err], err)
72/* To be used only when the state is known to be valid */
73
74#ifndef NULL
75#define NULL ((void *) 0)
76#endif
77
78 /* common constants */
79
80#define DEFLATED 8
81
82#ifndef DEF_WBITS
83# define DEF_WBITS MAX_WBITS
84#endif
85/* default windowBits for decompression. MAX_WBITS is for compression only */
86
87#if MAX_MEM_LEVEL >= 8
88# define DEF_MEM_LEVEL 8
89#else
90# define DEF_MEM_LEVEL MAX_MEM_LEVEL
91#endif
92/* default memLevel */
93
94#define STORED_BLOCK 0
95#define STATIC_TREES 1
96#define DYN_TREES 2
97/* The three kinds of block type */
98
99#define MIN_MATCH 3
100#define MAX_MATCH 258
101/* The minimum and maximum match lengths */
102
103 /* functions */
104
105extern void *memcpy(void *, const void *, unsigned long);
106#define zmemcpy memcpy
107
108/* Diagnostic functions */
109#ifdef DEBUG_ZLIB
110# include "stdio.h"
111# ifndef verbose
112# define verbose 0
113# endif
114# define Assert(cond,msg) {if(!(cond)) z_error(msg);}
115# define Trace(x) fprintf x
116# define Tracev(x) {if (verbose) fprintf x ;}
117# define Tracevv(x) {if (verbose>1) fprintf x ;}
118# define Tracec(c,x) {if (verbose && (c)) fprintf x ;}
119# define Tracecv(c,x) {if (verbose>1 && (c)) fprintf x ;}
120#else
121# define Assert(cond,msg)
122# define Trace(x)
123# define Tracev(x)
124# define Tracevv(x)
125# define Tracec(c,x)
126# define Tracecv(c,x)
127#endif
128
129
130typedef uLong (*check_func) OF((uLong check, Bytef *buf, uInt len));
131
132/* voidpf zcalloc OF((voidpf opaque, unsigned items, unsigned size)); */
133/* void zcfree OF((voidpf opaque, voidpf ptr)); */
134
135#define ZALLOC(strm, items, size) \
136 (*((strm)->zalloc))((strm)->opaque, (items), (size))
137#define ZFREE(strm, addr, size) \
138 (*((strm)->zfree))((strm)->opaque, (voidpf)(addr), (size))
139#define TRY_FREE(s, p, n) {if (p) ZFREE(s, p, n);}
140
141/* deflate.h -- internal compression state
142 * Copyright (C) 1995 Jean-loup Gailly
143 * For conditions of distribution and use, see copyright notice in zlib.h
144 */
145
146/* WARNING: this file should *not* be used by applications. It is
147 part of the implementation of the compression library and is
148 subject to change. Applications should only use zlib.h.
149 */
150
151/*+++++*/
152/* infblock.h -- header to use infblock.c
153 * Copyright (C) 1995 Mark Adler
154 * For conditions of distribution and use, see copyright notice in zlib.h
155 */
156
157/* WARNING: this file should *not* be used by applications. It is
158 part of the implementation of the compression library and is
159 subject to change. Applications should only use zlib.h.
160 */
161
162struct inflate_blocks_state;
163typedef struct inflate_blocks_state FAR inflate_blocks_statef;
164
165local inflate_blocks_statef * inflate_blocks_new OF((
166 z_stream *z,
167 check_func c, /* check function */
168 uInt w)); /* window size */
169
170local int inflate_blocks OF((
171 inflate_blocks_statef *,
172 z_stream *,
173 int)); /* initial return code */
174
175local void inflate_blocks_reset OF((
176 inflate_blocks_statef *,
177 z_stream *,
178 uLongf *)); /* check value on output */
179
180local int inflate_blocks_free OF((
181 inflate_blocks_statef *,
182 z_stream *,
183 uLongf *)); /* check value on output */
184
185local int inflate_addhistory OF((
186 inflate_blocks_statef *,
187 z_stream *));
188
189local int inflate_packet_flush OF((
190 inflate_blocks_statef *));
191
192/*+++++*/
193/* inftrees.h -- header to use inftrees.c
194 * Copyright (C) 1995 Mark Adler
195 * For conditions of distribution and use, see copyright notice in zlib.h
196 */
197
198/* WARNING: this file should *not* be used by applications. It is
199 part of the implementation of the compression library and is
200 subject to change. Applications should only use zlib.h.
201 */
202
203/* Huffman code lookup table entry--this entry is four bytes for machines
204 that have 16-bit pointers (e.g. PC's in the small or medium model). */
205
206typedef struct inflate_huft_s FAR inflate_huft;
207
208struct inflate_huft_s {
209 union {
210 struct {
211 Byte Exop; /* number of extra bits or operation */
212 Byte Bits; /* number of bits in this code or subcode */
213 } what;
214 uInt Nalloc; /* number of these allocated here */
215 Bytef *pad; /* pad structure to a power of 2 (4 bytes for */
216 } word; /* 16-bit, 8 bytes for 32-bit machines) */
217 union {
218 uInt Base; /* literal, length base, or distance base */
219 inflate_huft *Next; /* pointer to next level of table */
220 } more;
221};
222
223#ifdef DEBUG_ZLIB
224 local uInt inflate_hufts;
225#endif
226
227local int inflate_trees_bits OF((
228 uIntf *, /* 19 code lengths */
229 uIntf *, /* bits tree desired/actual depth */
230 inflate_huft * FAR *, /* bits tree result */
231 z_stream *)); /* for zalloc, zfree functions */
232
233local int inflate_trees_dynamic OF((
234 uInt, /* number of literal/length codes */
235 uInt, /* number of distance codes */
236 uIntf *, /* that many (total) code lengths */
237 uIntf *, /* literal desired/actual bit depth */
238 uIntf *, /* distance desired/actual bit depth */
239 inflate_huft * FAR *, /* literal/length tree result */
240 inflate_huft * FAR *, /* distance tree result */
241 z_stream *)); /* for zalloc, zfree functions */
242
243local int inflate_trees_fixed OF((
244 uIntf *, /* literal desired/actual bit depth */
245 uIntf *, /* distance desired/actual bit depth */
246 inflate_huft * FAR *, /* literal/length tree result */
247 inflate_huft * FAR *)); /* distance tree result */
248
249local int inflate_trees_free OF((
250 inflate_huft *, /* tables to free */
251 z_stream *)); /* for zfree function */
252
253
254/*+++++*/
255/* infcodes.h -- header to use infcodes.c
256 * Copyright (C) 1995 Mark Adler
257 * For conditions of distribution and use, see copyright notice in zlib.h
258 */
259
260/* WARNING: this file should *not* be used by applications. It is
261 part of the implementation of the compression library and is
262 subject to change. Applications should only use zlib.h.
263 */
264
265struct inflate_codes_state;
266typedef struct inflate_codes_state FAR inflate_codes_statef;
267
268local inflate_codes_statef *inflate_codes_new OF((
269 uInt, uInt,
270 inflate_huft *, inflate_huft *,
271 z_stream *));
272
273local int inflate_codes OF((
274 inflate_blocks_statef *,
275 z_stream *,
276 int));
277
278local void inflate_codes_free OF((
279 inflate_codes_statef *,
280 z_stream *));
281
282
283/*+++++*/
284/* inflate.c -- zlib interface to inflate modules
285 * Copyright (C) 1995 Mark Adler
286 * For conditions of distribution and use, see copyright notice in zlib.h
287 */
288
289/* inflate private state */
290struct internal_state {
291
292 /* mode */
293 enum {
294 METHOD, /* waiting for method byte */
295 FLAG, /* waiting for flag byte */
296 BLOCKS, /* decompressing blocks */
297 CHECK4, /* four check bytes to go */
298 CHECK3, /* three check bytes to go */
299 CHECK2, /* two check bytes to go */
300 CHECK1, /* one check byte to go */
301 DONE, /* finished check, done */
302 BAD} /* got an error--stay here */
303 mode; /* current inflate mode */
304
305 /* mode dependent information */
306 union {
307 uInt method; /* if FLAGS, method byte */
308 struct {
309 uLong was; /* computed check value */
310 uLong need; /* stream check value */
311 } check; /* if CHECK, check values to compare */
312 uInt marker; /* if BAD, inflateSync's marker bytes count */
313 } sub; /* submode */
314
315 /* mode independent information */
316 int nowrap; /* flag for no wrapper */
317 uInt wbits; /* log2(window size) (8..15, defaults to 15) */
318 inflate_blocks_statef
319 *blocks; /* current inflate_blocks state */
320
321};
322
323
324int inflateReset(
325 z_stream *z
326)
327{
328 uLong c;
329
330 if (z == Z_NULL || z->state == Z_NULL)
331 return Z_STREAM_ERROR;
332 z->total_in = z->total_out = 0;
333 z->msg = Z_NULL;
334 z->state->mode = z->state->nowrap ? BLOCKS : METHOD;
335 inflate_blocks_reset(z->state->blocks, z, &c);
336 Trace((stderr, "inflate: reset\n"));
337 return Z_OK;
338}
339
340
341int inflateEnd(
342 z_stream *z
343)
344{
345 uLong c;
346
347 if (z == Z_NULL || z->state == Z_NULL || z->zfree == Z_NULL)
348 return Z_STREAM_ERROR;
349 if (z->state->blocks != Z_NULL)
350 inflate_blocks_free(z->state->blocks, z, &c);
351 ZFREE(z, z->state, sizeof(struct internal_state));
352 z->state = Z_NULL;
353 Trace((stderr, "inflate: end\n"));
354 return Z_OK;
355}
356
357
358int inflateInit2(
359 z_stream *z,
360 int w
361)
362{
363 /* initialize state */
364 if (z == Z_NULL)
365 return Z_STREAM_ERROR;
366/* if (z->zalloc == Z_NULL) z->zalloc = zcalloc; */
367/* if (z->zfree == Z_NULL) z->zfree = zcfree; */
368 if ((z->state = (struct internal_state FAR *)
369 ZALLOC(z,1,sizeof(struct internal_state))) == Z_NULL)
370 return Z_MEM_ERROR;
371 z->state->blocks = Z_NULL;
372
373 /* handle undocumented nowrap option (no zlib header or check) */
374 z->state->nowrap = 0;
375 if (w < 0)
376 {
377 w = - w;
378 z->state->nowrap = 1;
379 }
380
381 /* set window size */
382 if (w < 8 || w > 15)
383 {
384 inflateEnd(z);
385 return Z_STREAM_ERROR;
386 }
387 z->state->wbits = (uInt)w;
388
389 /* create inflate_blocks state */
390 if ((z->state->blocks =
391 inflate_blocks_new(z, z->state->nowrap ? Z_NULL : adler32, 1 << w))
392 == Z_NULL)
393 {
394 inflateEnd(z);
395 return Z_MEM_ERROR;
396 }
397 Trace((stderr, "inflate: allocated\n"));
398
399 /* reset state */
400 inflateReset(z);
401 return Z_OK;
402}
403
404
405int inflateInit(
406 z_stream *z
407)
408{
409 return inflateInit2(z, DEF_WBITS);
410}
411
412
413#define NEEDBYTE {if(z->avail_in==0)goto empty;r=Z_OK;}
414#define NEXTBYTE (z->avail_in--,z->total_in++,*z->next_in++)
415
416int inflate(
417 z_stream *z,
418 int f
419)
420{
421 int r;
422 uInt b;
423
424 if (z == Z_NULL || z->next_in == Z_NULL)
425 return Z_STREAM_ERROR;
426 r = Z_BUF_ERROR;
427 while (1) switch (z->state->mode)
428 {
429 case METHOD:
430 NEEDBYTE
431 if (((z->state->sub.method = NEXTBYTE) & 0xf) != DEFLATED)
432 {
433 z->state->mode = BAD;
434 z->msg = "unknown compression method";
435 z->state->sub.marker = 5; /* can't try inflateSync */
436 break;
437 }
438 if ((z->state->sub.method >> 4) + 8 > z->state->wbits)
439 {
440 z->state->mode = BAD;
441 z->msg = "invalid window size";
442 z->state->sub.marker = 5; /* can't try inflateSync */
443 break;
444 }
445 z->state->mode = FLAG;
446 case FLAG:
447 NEEDBYTE
448 if ((b = NEXTBYTE) & 0x20)
449 {
450 z->state->mode = BAD;
451 z->msg = "invalid reserved bit";
452 z->state->sub.marker = 5; /* can't try inflateSync */
453 break;
454 }
455 if (((z->state->sub.method << 8) + b) % 31)
456 {
457 z->state->mode = BAD;
458 z->msg = "incorrect header check";
459 z->state->sub.marker = 5; /* can't try inflateSync */
460 break;
461 }
462 Trace((stderr, "inflate: zlib header ok\n"));
463 z->state->mode = BLOCKS;
464 case BLOCKS:
465 r = inflate_blocks(z->state->blocks, z, r);
466 if (f == Z_PACKET_FLUSH && z->avail_in == 0 && z->avail_out != 0)
467 r = inflate_packet_flush(z->state->blocks);
468 if (r == Z_DATA_ERROR)
469 {
470 z->state->mode = BAD;
471 z->state->sub.marker = 0; /* can try inflateSync */
472 break;
473 }
474 if (r != Z_STREAM_END)
475 return r;
476 r = Z_OK;
477 inflate_blocks_reset(z->state->blocks, z, &z->state->sub.check.was);
478 if (z->state->nowrap)
479 {
480 z->state->mode = DONE;
481 break;
482 }
483 z->state->mode = CHECK4;
484 case CHECK4:
485 NEEDBYTE
486 z->state->sub.check.need = (uLong)NEXTBYTE << 24;
487 z->state->mode = CHECK3;
488 case CHECK3:
489 NEEDBYTE
490 z->state->sub.check.need += (uLong)NEXTBYTE << 16;
491 z->state->mode = CHECK2;
492 case CHECK2:
493 NEEDBYTE
494 z->state->sub.check.need += (uLong)NEXTBYTE << 8;
495 z->state->mode = CHECK1;
496 case CHECK1:
497 NEEDBYTE
498 z->state->sub.check.need += (uLong)NEXTBYTE;
499
500 if (z->state->sub.check.was != z->state->sub.check.need)
501 {
502 z->state->mode = BAD;
503 z->msg = "incorrect data check";
504 z->state->sub.marker = 5; /* can't try inflateSync */
505 break;
506 }
507 Trace((stderr, "inflate: zlib check ok\n"));
508 z->state->mode = DONE;
509 case DONE:
510 return Z_STREAM_END;
511 case BAD:
512 return Z_DATA_ERROR;
513 default:
514 return Z_STREAM_ERROR;
515 }
516
517 empty:
518 if (f != Z_PACKET_FLUSH)
519 return r;
520 z->state->mode = BAD;
521 z->state->sub.marker = 0; /* can try inflateSync */
522 return Z_DATA_ERROR;
523}
524
525/*
526 * This subroutine adds the data at next_in/avail_in to the output history
527 * without performing any output. The output buffer must be "caught up";
528 * i.e. no pending output (hence s->read equals s->write), and the state must
529 * be BLOCKS (i.e. we should be willing to see the start of a series of
530 * BLOCKS). On exit, the output will also be caught up, and the checksum
531 * will have been updated if need be.
532 */
533
534int inflateIncomp(
535 z_stream *z
536)
537{
538 if (z->state->mode != BLOCKS)
539 return Z_DATA_ERROR;
540 return inflate_addhistory(z->state->blocks, z);
541}
542
543
544int inflateSync(
545 z_stream *z
546)
547{
548 uInt n; /* number of bytes to look at */
549 Bytef *p; /* pointer to bytes */
550 uInt m; /* number of marker bytes found in a row */
551 uLong r, w; /* temporaries to save total_in and total_out */
552
553 /* set up */
554 if (z == Z_NULL || z->state == Z_NULL)
555 return Z_STREAM_ERROR;
556 if (z->state->mode != BAD)
557 {
558 z->state->mode = BAD;
559 z->state->sub.marker = 0;
560 }
561 if ((n = z->avail_in) == 0)
562 return Z_BUF_ERROR;
563 p = z->next_in;
564 m = z->state->sub.marker;
565
566 /* search */
567 while (n && m < 4)
568 {
569 if (*p == (Byte)(m < 2 ? 0 : 0xff))
570 m++;
571 else if (*p)
572 m = 0;
573 else
574 m = 4 - m;
575 p++, n--;
576 }
577
578 /* restore */
579 z->total_in += p - z->next_in;
580 z->next_in = p;
581 z->avail_in = n;
582 z->state->sub.marker = m;
583
584 /* return no joy or set up to restart on a new block */
585 if (m != 4)
586 return Z_DATA_ERROR;
587 r = z->total_in; w = z->total_out;
588 inflateReset(z);
589 z->total_in = r; z->total_out = w;
590 z->state->mode = BLOCKS;
591 return Z_OK;
592}
593
594#undef NEEDBYTE
595#undef NEXTBYTE
596
597/*+++++*/
598/* infutil.h -- types and macros common to blocks and codes
599 * Copyright (C) 1995 Mark Adler
600 * For conditions of distribution and use, see copyright notice in zlib.h
601 */
602
603/* WARNING: this file should *not* be used by applications. It is
604 part of the implementation of the compression library and is
605 subject to change. Applications should only use zlib.h.
606 */
607
608/* inflate blocks semi-private state */
609struct inflate_blocks_state {
610
611 /* mode */
612 enum {
613 TYPE, /* get type bits (3, including end bit) */
614 LENS, /* get lengths for stored */
615 STORED, /* processing stored block */
616 TABLE, /* get table lengths */
617 BTREE, /* get bit lengths tree for a dynamic block */
618 DTREE, /* get length, distance trees for a dynamic block */
619 CODES, /* processing fixed or dynamic block */
620 DRY, /* output remaining window bytes */
621 DONEB, /* finished last block, done */
622 BADB} /* got a data error--stuck here */
623 mode; /* current inflate_block mode */
624
625 /* mode dependent information */
626 union {
627 uInt left; /* if STORED, bytes left to copy */
628 struct {
629 uInt table; /* table lengths (14 bits) */
630 uInt index; /* index into blens (or border) */
631 uIntf *blens; /* bit lengths of codes */
632 uInt bb; /* bit length tree depth */
633 inflate_huft *tb; /* bit length decoding tree */
634 int nblens; /* # elements allocated at blens */
635 } trees; /* if DTREE, decoding info for trees */
636 struct {
637 inflate_huft *tl, *td; /* trees to free */
638 inflate_codes_statef
639 *codes;
640 } decode; /* if CODES, current state */
641 } sub; /* submode */
642 uInt last; /* true if this block is the last block */
643
644 /* mode independent information */
645 uInt bitk; /* bits in bit buffer */
646 uLong bitb; /* bit buffer */
647 Bytef *window; /* sliding window */
648 Bytef *end; /* one byte after sliding window */
649 Bytef *read; /* window read pointer */
650 Bytef *write; /* window write pointer */
651 check_func checkfn; /* check function */
652 uLong check; /* check on output */
653
654};
655
656
657/* defines for inflate input/output */
658/* update pointers and return */
659#define UPDBITS {s->bitb=b;s->bitk=k;}
660#define UPDIN {z->avail_in=n;z->total_in+=p-z->next_in;z->next_in=p;}
661#define UPDOUT {s->write=q;}
662#define UPDATE {UPDBITS UPDIN UPDOUT}
663#define LEAVE {UPDATE return inflate_flush(s,z,r);}
664/* get bytes and bits */
665#define LOADIN {p=z->next_in;n=z->avail_in;b=s->bitb;k=s->bitk;}
666#define NEEDBYTE {if(n)r=Z_OK;else LEAVE}
667#define NEXTBYTE (n--,*p++)
668#define NEEDBITS(j) {while(k<(j)){NEEDBYTE;b|=((uLong)NEXTBYTE)<<k;k+=8;}}
669#define DUMPBITS(j) {b>>=(j);k-=(j);}
670/* output bytes */
671#define WAVAIL (q<s->read?s->read-q-1:s->end-q)
672#define LOADOUT {q=s->write;m=WAVAIL;}
673#define WRAP {if(q==s->end&&s->read!=s->window){q=s->window;m=WAVAIL;}}
674#define FLUSH {UPDOUT r=inflate_flush(s,z,r); LOADOUT}
675#define NEEDOUT {if(m==0){WRAP if(m==0){FLUSH WRAP if(m==0) LEAVE}}r=Z_OK;}
676#define OUTBYTE(a) {*q++=(Byte)(a);m--;}
677/* load local pointers */
678#define LOAD {LOADIN LOADOUT}
679
680/* And'ing with mask[n] masks the lower n bits */
681local uInt inflate_mask[] = {
682 0x0000,
683 0x0001, 0x0003, 0x0007, 0x000f, 0x001f, 0x003f, 0x007f, 0x00ff,
684 0x01ff, 0x03ff, 0x07ff, 0x0fff, 0x1fff, 0x3fff, 0x7fff, 0xffff
685};
686
687/* copy as much as possible from the sliding window to the output area */
688local int inflate_flush OF((
689 inflate_blocks_statef *,
690 z_stream *,
691 int));
692
693/*+++++*/
694/* inffast.h -- header to use inffast.c
695 * Copyright (C) 1995 Mark Adler
696 * For conditions of distribution and use, see copyright notice in zlib.h
697 */
698
699/* WARNING: this file should *not* be used by applications. It is
700 part of the implementation of the compression library and is
701 subject to change. Applications should only use zlib.h.
702 */
703
704local int inflate_fast OF((
705 uInt,
706 uInt,
707 inflate_huft *,
708 inflate_huft *,
709 inflate_blocks_statef *,
710 z_stream *));
711
712
713/*+++++*/
714/* infblock.c -- interpret and process block types to last block
715 * Copyright (C) 1995 Mark Adler
716 * For conditions of distribution and use, see copyright notice in zlib.h
717 */
718
719/* Table for deflate from PKZIP's appnote.txt. */
720local uInt border[] = { /* Order of the bit length code lengths */
721 16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15};
722
723/*
724 Notes beyond the 1.93a appnote.txt:
725
726 1. Distance pointers never point before the beginning of the output
727 stream.
728 2. Distance pointers can point back across blocks, up to 32k away.
729 3. There is an implied maximum of 7 bits for the bit length table and
730 15 bits for the actual data.
731 4. If only one code exists, then it is encoded using one bit. (Zero
732 would be more efficient, but perhaps a little confusing.) If two
733 codes exist, they are coded using one bit each (0 and 1).
734 5. There is no way of sending zero distance codes--a dummy must be
735 sent if there are none. (History: a pre 2.0 version of PKZIP would
736 store blocks with no distance codes, but this was discovered to be
737 too harsh a criterion.) Valid only for 1.93a. 2.04c does allow
738 zero distance codes, which is sent as one code of zero bits in
739 length.
740 6. There are up to 286 literal/length codes. Code 256 represents the
741 end-of-block. Note however that the static length tree defines
742 288 codes just to fill out the Huffman codes. Codes 286 and 287
743 cannot be used though, since there is no length base or extra bits
744 defined for them. Similarily, there are up to 30 distance codes.
745 However, static trees define 32 codes (all 5 bits) to fill out the
746 Huffman codes, but the last two had better not show up in the data.
747 7. Unzip can check dynamic Huffman blocks for complete code sets.
748 The exception is that a single code would not be complete (see #4).
749 8. The five bits following the block type is really the number of
750 literal codes sent minus 257.
751 9. Length codes 8,16,16 are interpreted as 13 length codes of 8 bits
752 (1+6+6). Therefore, to output three times the length, you output
753 three codes (1+1+1), whereas to output four times the same length,
754 you only need two codes (1+3). Hmm.
755 10. In the tree reconstruction algorithm, Code = Code + Increment
756 only if BitLength(i) is not zero. (Pretty obvious.)
757 11. Correction: 4 Bits: # of Bit Length codes - 4 (4 - 19)
758 12. Note: length code 284 can represent 227-258, but length code 285
759 really is 258. The last length deserves its own, short code
760 since it gets used a lot in very redundant files. The length
761 258 is special since 258 - 3 (the min match length) is 255.
762 13. The literal/length and distance code bit lengths are read as a
763 single stream of lengths. It is possible (and advantageous) for
764 a repeat code (16, 17, or 18) to go across the boundary between
765 the two sets of lengths.
766 */
767
768
769local void inflate_blocks_reset(
770 inflate_blocks_statef *s,
771 z_stream *z,
772 uLongf *c
773)
774{
775 if (s->checkfn != Z_NULL)
776 *c = s->check;
777 if (s->mode == BTREE || s->mode == DTREE)
778 ZFREE(z, s->sub.trees.blens, s->sub.trees.nblens * sizeof(uInt));
779 if (s->mode == CODES)
780 {
781 inflate_codes_free(s->sub.decode.codes, z);
782 inflate_trees_free(s->sub.decode.td, z);
783 inflate_trees_free(s->sub.decode.tl, z);
784 }
785 s->mode = TYPE;
786 s->bitk = 0;
787 s->bitb = 0;
788 s->read = s->write = s->window;
789 if (s->checkfn != Z_NULL)
790 s->check = (*s->checkfn)(0L, Z_NULL, 0);
791 Trace((stderr, "inflate: blocks reset\n"));
792}
793
794
795local inflate_blocks_statef *inflate_blocks_new(
796 z_stream *z,
797 check_func c,
798 uInt w
799)
800{
801 inflate_blocks_statef *s;
802
803 if ((s = (inflate_blocks_statef *)ZALLOC
804 (z,1,sizeof(struct inflate_blocks_state))) == Z_NULL)
805 return s;
806 if ((s->window = (Bytef *)ZALLOC(z, 1, w)) == Z_NULL)
807 {
808 ZFREE(z, s, sizeof(struct inflate_blocks_state));
809 return Z_NULL;
810 }
811 s->end = s->window + w;
812 s->checkfn = c;
813 s->mode = TYPE;
814 Trace((stderr, "inflate: blocks allocated\n"));
815 inflate_blocks_reset(s, z, &s->check);
816 return s;
817}
818
819
820local int inflate_blocks(
821 inflate_blocks_statef *s,
822 z_stream *z,
823 int r
824)
825{
826 uInt t; /* temporary storage */
827 uLong b; /* bit buffer */
828 uInt k; /* bits in bit buffer */
829 Bytef *p; /* input data pointer */
830 uInt n; /* bytes available there */
831 Bytef *q; /* output window write pointer */
832 uInt m; /* bytes to end of window or read pointer */
833
834 /* copy input/output information to locals (UPDATE macro restores) */
835 LOAD
836
837 /* process input based on current state */
838 while (1) switch (s->mode)
839 {
840 case TYPE:
841 NEEDBITS(3)
842 t = (uInt)b & 7;
843 s->last = t & 1;
844 switch (t >> 1)
845 {
846 case 0: /* stored */
847 Trace((stderr, "inflate: stored block%s\n",
848 s->last ? " (last)" : ""));
849 DUMPBITS(3)
850 t = k & 7; /* go to byte boundary */
851 DUMPBITS(t)
852 s->mode = LENS; /* get length of stored block */
853 break;
854 case 1: /* fixed */
855 Trace((stderr, "inflate: fixed codes block%s\n",
856 s->last ? " (last)" : ""));
857 {
858 uInt bl, bd;
859 inflate_huft *tl, *td;
860
861 inflate_trees_fixed(&bl, &bd, &tl, &td);
862 s->sub.decode.codes = inflate_codes_new(bl, bd, tl, td, z);
863 if (s->sub.decode.codes == Z_NULL)
864 {
865 r = Z_MEM_ERROR;
866 LEAVE
867 }
868 s->sub.decode.tl = Z_NULL; /* don't try to free these */
869 s->sub.decode.td = Z_NULL;
870 }
871 DUMPBITS(3)
872 s->mode = CODES;
873 break;
874 case 2: /* dynamic */
875 Trace((stderr, "inflate: dynamic codes block%s\n",
876 s->last ? " (last)" : ""));
877 DUMPBITS(3)
878 s->mode = TABLE;
879 break;
880 case 3: /* illegal */
881 DUMPBITS(3)
882 s->mode = BADB;
883 z->msg = "invalid block type";
884 r = Z_DATA_ERROR;
885 LEAVE
886 }
887 break;
888 case LENS:
889 NEEDBITS(32)
890 if (((~b) >> 16) != (b & 0xffff))
891 {
892 s->mode = BADB;
893 z->msg = "invalid stored block lengths";
894 r = Z_DATA_ERROR;
895 LEAVE
896 }
897 s->sub.left = (uInt)b & 0xffff;
898 b = k = 0; /* dump bits */
899 Tracev((stderr, "inflate: stored length %u\n", s->sub.left));
900 s->mode = s->sub.left ? STORED : TYPE;
901 break;
902 case STORED:
903 if (n == 0)
904 LEAVE
905 NEEDOUT
906 t = s->sub.left;
907 if (t > n) t = n;
908 if (t > m) t = m;
909 zmemcpy(q, p, t);
910 p += t; n -= t;
911 q += t; m -= t;
912 if ((s->sub.left -= t) != 0)
913 break;
914 Tracev((stderr, "inflate: stored end, %lu total out\n",
915 z->total_out + (q >= s->read ? q - s->read :
916 (s->end - s->read) + (q - s->window))));
917 s->mode = s->last ? DRY : TYPE;
918 break;
919 case TABLE:
920 NEEDBITS(14)
921 s->sub.trees.table = t = (uInt)b & 0x3fff;
922#ifndef PKZIP_BUG_WORKAROUND
923 if ((t & 0x1f) > 29 || ((t >> 5) & 0x1f) > 29)
924 {
925 s->mode = BADB;
926 z->msg = "too many length or distance symbols";
927 r = Z_DATA_ERROR;
928 LEAVE
929 }
930#endif
931 t = 258 + (t & 0x1f) + ((t >> 5) & 0x1f);
932 if (t < 19)
933 t = 19;
934 if ((s->sub.trees.blens = (uIntf*)ZALLOC(z, t, sizeof(uInt))) == Z_NULL)
935 {
936 r = Z_MEM_ERROR;
937 LEAVE
938 }
939 s->sub.trees.nblens = t;
940 DUMPBITS(14)
941 s->sub.trees.index = 0;
942 Tracev((stderr, "inflate: table sizes ok\n"));
943 s->mode = BTREE;
944 case BTREE:
945 while (s->sub.trees.index < 4 + (s->sub.trees.table >> 10))
946 {
947 NEEDBITS(3)
948 s->sub.trees.blens[border[s->sub.trees.index++]] = (uInt)b & 7;
949 DUMPBITS(3)
950 }
951 while (s->sub.trees.index < 19)
952 s->sub.trees.blens[border[s->sub.trees.index++]] = 0;
953 s->sub.trees.bb = 7;
954 t = inflate_trees_bits(s->sub.trees.blens, &s->sub.trees.bb,
955 &s->sub.trees.tb, z);
956 if (t != Z_OK)
957 {
958 r = t;
959 if (r == Z_DATA_ERROR)
960 s->mode = BADB;
961 LEAVE
962 }
963 s->sub.trees.index = 0;
964 Tracev((stderr, "inflate: bits tree ok\n"));
965 s->mode = DTREE;
966 case DTREE:
967 while (t = s->sub.trees.table,
968 s->sub.trees.index < 258 + (t & 0x1f) + ((t >> 5) & 0x1f))
969 {
970 inflate_huft *h;
971 uInt i, j, c;
972
973 t = s->sub.trees.bb;
974 NEEDBITS(t)
975 h = s->sub.trees.tb + ((uInt)b & inflate_mask[t]);
976 t = h->word.what.Bits;
977 c = h->more.Base;
978 if (c < 16)
979 {
980 DUMPBITS(t)
981 s->sub.trees.blens[s->sub.trees.index++] = c;
982 }
983 else /* c == 16..18 */
984 {
985 i = c == 18 ? 7 : c - 14;
986 j = c == 18 ? 11 : 3;
987 NEEDBITS(t + i)
988 DUMPBITS(t)
989 j += (uInt)b & inflate_mask[i];
990 DUMPBITS(i)
991 i = s->sub.trees.index;
992 t = s->sub.trees.table;
993 if (i + j > 258 + (t & 0x1f) + ((t >> 5) & 0x1f) ||
994 (c == 16 && i < 1))
995 {
996 s->mode = BADB;
997 z->msg = "invalid bit length repeat";
998 r = Z_DATA_ERROR;
999 LEAVE
1000 }
1001 c = c == 16 ? s->sub.trees.blens[i - 1] : 0;
1002 do {
1003 s->sub.trees.blens[i++] = c;
1004 } while (--j);
1005 s->sub.trees.index = i;
1006 }
1007 }
1008 inflate_trees_free(s->sub.trees.tb, z);
1009 s->sub.trees.tb = Z_NULL;
1010 {
1011 uInt bl, bd;
1012 inflate_huft *tl, *td;
1013 inflate_codes_statef *c;
1014
1015 bl = 9; /* must be <= 9 for lookahead assumptions */
1016 bd = 6; /* must be <= 9 for lookahead assumptions */
1017 t = s->sub.trees.table;
1018 t = inflate_trees_dynamic(257 + (t & 0x1f), 1 + ((t >> 5) & 0x1f),
1019 s->sub.trees.blens, &bl, &bd, &tl, &td, z);
1020 if (t != Z_OK)
1021 {
1022 if (t == (uInt)Z_DATA_ERROR)
1023 s->mode = BADB;
1024 r = t;
1025 LEAVE
1026 }
1027 Tracev((stderr, "inflate: trees ok\n"));
1028 if ((c = inflate_codes_new(bl, bd, tl, td, z)) == Z_NULL)
1029 {
1030 inflate_trees_free(td, z);
1031 inflate_trees_free(tl, z);
1032 r = Z_MEM_ERROR;
1033 LEAVE
1034 }
1035 ZFREE(z, s->sub.trees.blens, s->sub.trees.nblens * sizeof(uInt));
1036 s->sub.decode.codes = c;
1037 s->sub.decode.tl = tl;
1038 s->sub.decode.td = td;
1039 }
1040 s->mode = CODES;
1041 case CODES:
1042 UPDATE
1043 if ((r = inflate_codes(s, z, r)) != Z_STREAM_END)
1044 return inflate_flush(s, z, r);
1045 r = Z_OK;
1046 inflate_codes_free(s->sub.decode.codes, z);
1047 inflate_trees_free(s->sub.decode.td, z);
1048 inflate_trees_free(s->sub.decode.tl, z);
1049 LOAD
1050 Tracev((stderr, "inflate: codes end, %lu total out\n",
1051 z->total_out + (q >= s->read ? q - s->read :
1052 (s->end - s->read) + (q - s->window))));
1053 if (!s->last)
1054 {
1055 s->mode = TYPE;
1056 break;
1057 }
1058 if (k > 7) /* return unused byte, if any */
1059 {
1060 Assert(k < 16, "inflate_codes grabbed too many bytes")
1061 k -= 8;
1062 n++;
1063 p--; /* can always return one */
1064 }
1065 s->mode = DRY;
1066 case DRY:
1067 FLUSH
1068 if (s->read != s->write)
1069 LEAVE
1070 s->mode = DONEB;
1071 case DONEB:
1072 r = Z_STREAM_END;
1073 LEAVE
1074 case BADB:
1075 r = Z_DATA_ERROR;
1076 LEAVE
1077 default:
1078 r = Z_STREAM_ERROR;
1079 LEAVE
1080 }
1081}
1082
1083
1084local int inflate_blocks_free(
1085 inflate_blocks_statef *s,
1086 z_stream *z,
1087 uLongf *c
1088)
1089{
1090 inflate_blocks_reset(s, z, c);
1091 ZFREE(z, s->window, s->end - s->window);
1092 ZFREE(z, s, sizeof(struct inflate_blocks_state));
1093 Trace((stderr, "inflate: blocks freed\n"));
1094 return Z_OK;
1095}
1096
1097/*
1098 * This subroutine adds the data at next_in/avail_in to the output history
1099 * without performing any output. The output buffer must be "caught up";
1100 * i.e. no pending output (hence s->read equals s->write), and the state must
1101 * be BLOCKS (i.e. we should be willing to see the start of a series of
1102 * BLOCKS). On exit, the output will also be caught up, and the checksum
1103 * will have been updated if need be.
1104 */
1105local int inflate_addhistory(
1106 inflate_blocks_statef *s,
1107 z_stream *z
1108)
1109{
1110 uLong b; /* bit buffer */ /* NOT USED HERE */
1111 uInt k; /* bits in bit buffer */ /* NOT USED HERE */
1112 uInt t; /* temporary storage */
1113 Bytef *p; /* input data pointer */
1114 uInt n; /* bytes available there */
1115 Bytef *q; /* output window write pointer */
1116 uInt m; /* bytes to end of window or read pointer */
1117
1118 if (s->read != s->write)
1119 return Z_STREAM_ERROR;
1120 if (s->mode != TYPE)
1121 return Z_DATA_ERROR;
1122
1123 /* we're ready to rock */
1124 LOAD
1125 /* while there is input ready, copy to output buffer, moving
1126 * pointers as needed.
1127 */
1128 while (n) {
1129 t = n; /* how many to do */
1130 /* is there room until end of buffer? */
1131 if (t > m) t = m;
1132 /* update check information */
1133 if (s->checkfn != Z_NULL)
1134 s->check = (*s->checkfn)(s->check, q, t);
1135 zmemcpy(q, p, t);
1136 q += t;
1137 p += t;
1138 n -= t;
1139 z->total_out += t;
1140 s->read = q; /* drag read pointer forward */
1141/* WRAP */ /* expand WRAP macro by hand to handle s->read */
1142 if (q == s->end) {
1143 s->read = q = s->window;
1144 m = WAVAIL;
1145 }
1146 }
1147 UPDATE
1148 return Z_OK;
1149}
1150
1151
1152/*
1153 * At the end of a Deflate-compressed PPP packet, we expect to have seen
1154 * a `stored' block type value but not the (zero) length bytes.
1155 */
1156local int inflate_packet_flush(
1157 inflate_blocks_statef *s
1158)
1159{
1160 if (s->mode != LENS)
1161 return Z_DATA_ERROR;
1162 s->mode = TYPE;
1163 return Z_OK;
1164}
1165
1166
1167/*+++++*/
1168/* inftrees.c -- generate Huffman trees for efficient decoding
1169 * Copyright (C) 1995 Mark Adler
1170 * For conditions of distribution and use, see copyright notice in zlib.h
1171 */
1172
1173/* simplify the use of the inflate_huft type with some defines */
1174#define base more.Base
1175#define next more.Next
1176#define exop word.what.Exop
1177#define bits word.what.Bits
1178
1179
1180local int huft_build OF((
1181 uIntf *, /* code lengths in bits */
1182 uInt, /* number of codes */
1183 uInt, /* number of "simple" codes */
1184 uIntf *, /* list of base values for non-simple codes */
1185 uIntf *, /* list of extra bits for non-simple codes */
1186 inflate_huft * FAR*,/* result: starting table */
1187 uIntf *, /* maximum lookup bits (returns actual) */
1188 z_stream *)); /* for zalloc function */
1189
1190local voidpf falloc OF((
1191 voidpf, /* opaque pointer (not used) */
1192 uInt, /* number of items */
1193 uInt)); /* size of item */
1194
1195local void ffree OF((
1196 voidpf q, /* opaque pointer (not used) */
1197 voidpf p, /* what to free (not used) */
1198 uInt n)); /* number of bytes (not used) */
1199
1200/* Tables for deflate from PKZIP's appnote.txt. */
1201local uInt cplens[] = { /* Copy lengths for literal codes 257..285 */
1202 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 15, 17, 19, 23, 27, 31,
1203 35, 43, 51, 59, 67, 83, 99, 115, 131, 163, 195, 227, 258, 0, 0};
1204 /* actually lengths - 2; also see note #13 above about 258 */
1205local uInt cplext[] = { /* Extra bits for literal codes 257..285 */
1206 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2,
1207 3, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 0, 192, 192}; /* 192==invalid */
1208local uInt cpdist[] = { /* Copy offsets for distance codes 0..29 */
1209 1, 2, 3, 4, 5, 7, 9, 13, 17, 25, 33, 49, 65, 97, 129, 193,
1210 257, 385, 513, 769, 1025, 1537, 2049, 3073, 4097, 6145,
1211 8193, 12289, 16385, 24577};
1212local uInt cpdext[] = { /* Extra bits for distance codes */
1213 0, 0, 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6,
1214 7, 7, 8, 8, 9, 9, 10, 10, 11, 11,
1215 12, 12, 13, 13};
1216
1217/*
1218 Huffman code decoding is performed using a multi-level table lookup.
1219 The fastest way to decode is to simply build a lookup table whose
1220 size is determined by the longest code. However, the time it takes
1221 to build this table can also be a factor if the data being decoded
1222 is not very long. The most common codes are necessarily the
1223 shortest codes, so those codes dominate the decoding time, and hence
1224 the speed. The idea is you can have a shorter table that decodes the
1225 shorter, more probable codes, and then point to subsidiary tables for
1226 the longer codes. The time it costs to decode the longer codes is
1227 then traded against the time it takes to make longer tables.
1228
1229 This results of this trade are in the variables lbits and dbits
1230 below. lbits is the number of bits the first level table for literal/
1231 length codes can decode in one step, and dbits is the same thing for
1232 the distance codes. Subsequent tables are also less than or equal to
1233 those sizes. These values may be adjusted either when all of the
1234 codes are shorter than that, in which case the longest code length in
1235 bits is used, or when the shortest code is *longer* than the requested
1236 table size, in which case the length of the shortest code in bits is
1237 used.
1238
1239 There are two different values for the two tables, since they code a
1240 different number of possibilities each. The literal/length table
1241 codes 286 possible values, or in a flat code, a little over eight
1242 bits. The distance table codes 30 possible values, or a little less
1243 than five bits, flat. The optimum values for speed end up being
1244 about one bit more than those, so lbits is 8+1 and dbits is 5+1.
1245 The optimum values may differ though from machine to machine, and
1246 possibly even between compilers. Your mileage may vary.
1247 */
1248
1249
1250/* If BMAX needs to be larger than 16, then h and x[] should be uLong. */
1251#define BMAX 15 /* maximum bit length of any code */
1252#define N_MAX 288 /* maximum number of codes in any set */
1253
1254#ifdef DEBUG_ZLIB
1255 uInt inflate_hufts;
1256#endif
1257
1258local int huft_build(
1259 uIntf *b, /* code lengths in bits (all assumed <= BMAX) */
1260 uInt n, /* number of codes (assumed <= N_MAX) */
1261 uInt s, /* number of simple-valued codes (0..s-1) */
1262 uIntf *d, /* list of base values for non-simple codes */
1263 uIntf *e, /* list of extra bits for non-simple codes */
1264 inflate_huft * FAR *t, /* result: starting table */
1265 uIntf *m, /* maximum lookup bits, returns actual */
1266 z_stream *zs /* for zalloc function */
1267)
1268/* Given a list of code lengths and a maximum table size, make a set of
1269 tables to decode that set of codes. Return Z_OK on success, Z_BUF_ERROR
1270 if the given code set is incomplete (the tables are still built in this
1271 case), Z_DATA_ERROR if the input is invalid (all zero length codes or an
1272 over-subscribed set of lengths), or Z_MEM_ERROR if not enough memory. */
1273{
1274
1275 uInt a; /* counter for codes of length k */
1276 uInt c[BMAX+1]; /* bit length count table */
1277 uInt f; /* i repeats in table every f entries */
1278 int g; /* maximum code length */
1279 int h; /* table level */
1280 register uInt i; /* counter, current code */
1281 register uInt j; /* counter */
1282 register int k; /* number of bits in current code */
1283 int l; /* bits per table (returned in m) */
1284 register uIntf *p; /* pointer into c[], b[], or v[] */
1285 inflate_huft *q; /* points to current table */
1286 struct inflate_huft_s r; /* table entry for structure assignment */
1287 inflate_huft *u[BMAX]; /* table stack */
1288 uInt v[N_MAX]; /* values in order of bit length */
1289 register int w; /* bits before this table == (l * h) */
1290 uInt x[BMAX+1]; /* bit offsets, then code stack */
1291 uIntf *xp; /* pointer into x */
1292 int y; /* number of dummy codes added */
1293 uInt z; /* number of entries in current table */
1294
1295
1296 /* Generate counts for each bit length */
1297 p = c;
1298#define C0 *p++ = 0;
1299#define C2 C0 C0 C0 C0
1300#define C4 C2 C2 C2 C2
1301 C4 /* clear c[]--assume BMAX+1 is 16 */
1302 p = b; i = n;
1303 do {
1304 c[*p++]++; /* assume all entries <= BMAX */
1305 } while (--i);
1306 if (c[0] == n) /* null input--all zero length codes */
1307 {
1308 *t = (inflate_huft *)Z_NULL;
1309 *m = 0;
1310 return Z_DATA_ERROR;
1311 }
1312
1313
1314 /* Find minimum and maximum length, bound *m by those */
1315 l = *m;
1316 for (j = 1; j <= BMAX; j++)
1317 if (c[j])
1318 break;
1319 k = j; /* minimum code length */
1320 if ((uInt)l < j)
1321 l = j;
1322 for (i = BMAX; i; i--)
1323 if (c[i])
1324 break;
1325 g = i; /* maximum code length */
1326 if ((uInt)l > i)
1327 l = i;
1328 *m = l;
1329
1330
1331 /* Adjust last length count to fill out codes, if needed */
1332 for (y = 1 << j; j < i; j++, y <<= 1)
1333 if ((y -= c[j]) < 0)
1334 return Z_DATA_ERROR;
1335 if ((y -= c[i]) < 0)
1336 return Z_DATA_ERROR;
1337 c[i] += y;
1338
1339
1340 /* Generate starting offsets into the value table for each length */
1341 x[1] = j = 0;
1342 p = c + 1; xp = x + 2;
1343 while (--i) { /* note that i == g from above */
1344 *xp++ = (j += *p++);
1345 }
1346
1347
1348 /* Make a table of values in order of bit lengths */
1349 p = b; i = 0;
1350 do {
1351 if ((j = *p++) != 0)
1352 v[x[j]++] = i;
1353 } while (++i < n);
1354 n = x[g]; /* set n to length of v */
1355
1356
1357 /* Generate the Huffman codes and for each, make the table entries */
1358 x[0] = i = 0; /* first Huffman code is zero */
1359 p = v; /* grab values in bit order */
1360 h = -1; /* no tables yet--level -1 */
1361 w = -l; /* bits decoded == (l * h) */
1362 u[0] = (inflate_huft *)Z_NULL; /* just to keep compilers happy */
1363 q = (inflate_huft *)Z_NULL; /* ditto */
1364 z = 0; /* ditto */
1365
1366 /* go through the bit lengths (k already is bits in shortest code) */
1367 for (; k <= g; k++)
1368 {
1369 a = c[k];
1370 while (a--)
1371 {
1372 /* here i is the Huffman code of length k bits for value *p */
1373 /* make tables up to required level */
1374 while (k > w + l)
1375 {
1376 h++;
1377 w += l; /* previous table always l bits */
1378
1379 /* compute minimum size table less than or equal to l bits */
1380 z = (z = g - w) > (uInt)l ? l : z; /* table size upper limit */
1381 if ((f = 1 << (j = k - w)) > a + 1) /* try a k-w bit table */
1382 { /* too few codes for k-w bit table */
1383 f -= a + 1; /* deduct codes from patterns left */
1384 xp = c + k;
1385 if (j < z)
1386 while (++j < z) /* try smaller tables up to z bits */
1387 {
1388 if ((f <<= 1) <= *++xp)
1389 break; /* enough codes to use up j bits */
1390 f -= *xp; /* else deduct codes from patterns */
1391 }
1392 }
1393 z = 1 << j; /* table entries for j-bit table */
1394
1395 /* allocate and link in new table */
1396 if ((q = (inflate_huft *)ZALLOC
1397 (zs,z + 1,sizeof(inflate_huft))) == Z_NULL)
1398 {
1399 if (h)
1400 inflate_trees_free(u[0], zs);
1401 return Z_MEM_ERROR; /* not enough memory */
1402 }
1403 q->word.Nalloc = z + 1;
1404#ifdef DEBUG_ZLIB
1405 inflate_hufts += z + 1;
1406#endif
1407 *t = q + 1; /* link to list for huft_free() */
1408 *(t = &(q->next)) = Z_NULL;
1409 u[h] = ++q; /* table starts after link */
1410
1411 /* connect to last table, if there is one */
1412 if (h)
1413 {
1414 x[h] = i; /* save pattern for backing up */
1415 r.bits = (Byte)l; /* bits to dump before this table */
1416 r.exop = (Byte)j; /* bits in this table */
1417 r.next = q; /* pointer to this table */
1418 j = i >> (w - l); /* (get around Turbo C bug) */
1419 u[h-1][j] = r; /* connect to last table */
1420 }
1421 }
1422
1423 /* set up table entry in r */
1424 r.bits = (Byte)(k - w);
1425 if (p >= v + n)
1426 r.exop = 128 + 64; /* out of values--invalid code */
1427 else if (*p < s)
1428 {
1429 r.exop = (Byte)(*p < 256 ? 0 : 32 + 64); /* 256 is end-of-block */
1430 r.base = *p++; /* simple code is just the value */
1431 }
1432 else
1433 {
1434 r.exop = (Byte)e[*p - s] + 16 + 64; /* non-simple--look up in lists */
1435 r.base = d[*p++ - s];
1436 }
1437
1438 /* fill code-like entries with r */
1439 f = 1 << (k - w);
1440 for (j = i >> w; j < z; j += f)
1441 q[j] = r;
1442
1443 /* backwards increment the k-bit code i */
1444 for (j = 1 << (k - 1); i & j; j >>= 1)
1445 i ^= j;
1446 i ^= j;
1447
1448 /* backup over finished tables */
1449 while ((i & ((1 << w) - 1)) != x[h])
1450 {
1451 h--; /* don't need to update q */
1452 w -= l;
1453 }
1454 }
1455 }
1456
1457
1458 /* Return Z_BUF_ERROR if we were given an incomplete table */
1459 return y != 0 && g != 1 ? Z_BUF_ERROR : Z_OK;
1460}
1461
1462
1463local int inflate_trees_bits(
1464 uIntf *c, /* 19 code lengths */
1465 uIntf *bb, /* bits tree desired/actual depth */
1466 inflate_huft * FAR *tb, /* bits tree result */
1467 z_stream *z /* for zfree function */
1468)
1469{
1470 int r;
1471
1472 r = huft_build(c, 19, 19, (uIntf*)Z_NULL, (uIntf*)Z_NULL, tb, bb, z);
1473 if (r == Z_DATA_ERROR)
1474 z->msg = "oversubscribed dynamic bit lengths tree";
1475 else if (r == Z_BUF_ERROR)
1476 {
1477 inflate_trees_free(*tb, z);
1478 z->msg = "incomplete dynamic bit lengths tree";
1479 r = Z_DATA_ERROR;
1480 }
1481 return r;
1482}
1483
1484
1485local int inflate_trees_dynamic(
1486 uInt nl, /* number of literal/length codes */
1487 uInt nd, /* number of distance codes */
1488 uIntf *c, /* that many (total) code lengths */
1489 uIntf *bl, /* literal desired/actual bit depth */
1490 uIntf *bd, /* distance desired/actual bit depth */
1491 inflate_huft * FAR *tl, /* literal/length tree result */
1492 inflate_huft * FAR *td, /* distance tree result */
1493 z_stream *z /* for zfree function */
1494)
1495{
1496 int r;
1497
1498 /* build literal/length tree */
1499 if ((r = huft_build(c, nl, 257, cplens, cplext, tl, bl, z)) != Z_OK)
1500 {
1501 if (r == Z_DATA_ERROR)
1502 z->msg = "oversubscribed literal/length tree";
1503 else if (r == Z_BUF_ERROR)
1504 {
1505 inflate_trees_free(*tl, z);
1506 z->msg = "incomplete literal/length tree";
1507 r = Z_DATA_ERROR;
1508 }
1509 return r;
1510 }
1511
1512 /* build distance tree */
1513 if ((r = huft_build(c + nl, nd, 0, cpdist, cpdext, td, bd, z)) != Z_OK)
1514 {
1515 if (r == Z_DATA_ERROR)
1516 z->msg = "oversubscribed literal/length tree";
1517 else if (r == Z_BUF_ERROR) {
1518#ifdef PKZIP_BUG_WORKAROUND
1519 r = Z_OK;
1520 }
1521#else
1522 inflate_trees_free(*td, z);
1523 z->msg = "incomplete literal/length tree";
1524 r = Z_DATA_ERROR;
1525 }
1526 inflate_trees_free(*tl, z);
1527 return r;
1528#endif
1529 }
1530
1531 /* done */
1532 return Z_OK;
1533}
1534
1535
1536/* build fixed tables only once--keep them here */
1537local int fixed_lock = 0;
1538local int fixed_built = 0;
1539#define FIXEDH 530 /* number of hufts used by fixed tables */
1540local uInt fixed_left = FIXEDH;
1541local inflate_huft fixed_mem[FIXEDH];
1542local uInt fixed_bl;
1543local uInt fixed_bd;
1544local inflate_huft *fixed_tl;
1545local inflate_huft *fixed_td;
1546
1547
1548local voidpf falloc(
1549 voidpf q, /* opaque pointer (not used) */
1550 uInt n, /* number of items */
1551 uInt s /* size of item */
1552)
1553{
1554 Assert(s == sizeof(inflate_huft) && n <= fixed_left,
1555 "inflate_trees falloc overflow");
1556 if (q) s++; /* to make some compilers happy */
1557 fixed_left -= n;
1558 return (voidpf)(fixed_mem + fixed_left);
1559}
1560
1561
1562local void ffree(
1563 voidpf q,
1564 voidpf p,
1565 uInt n
1566)
1567{
1568 Assert(0, "inflate_trees ffree called!");
1569 if (q) q = p; /* to make some compilers happy */
1570}
1571
1572
1573local int inflate_trees_fixed(
1574 uIntf *bl, /* literal desired/actual bit depth */
1575 uIntf *bd, /* distance desired/actual bit depth */
1576 inflate_huft * FAR *tl, /* literal/length tree result */
1577 inflate_huft * FAR *td /* distance tree result */
1578)
1579{
1580 /* build fixed tables if not built already--lock out other instances */
1581 while (++fixed_lock > 1)
1582 fixed_lock--;
1583 if (!fixed_built)
1584 {
1585 int k; /* temporary variable */
1586 unsigned c[288]; /* length list for huft_build */
1587 z_stream z; /* for falloc function */
1588
1589 /* set up fake z_stream for memory routines */
1590 z.zalloc = falloc;
1591 z.zfree = ffree;
1592 z.opaque = Z_NULL;
1593
1594 /* literal table */
1595 for (k = 0; k < 144; k++)
1596 c[k] = 8;
1597 for (; k < 256; k++)
1598 c[k] = 9;
1599 for (; k < 280; k++)
1600 c[k] = 7;
1601 for (; k < 288; k++)
1602 c[k] = 8;
1603 fixed_bl = 7;
1604 huft_build(c, 288, 257, cplens, cplext, &fixed_tl, &fixed_bl, &z);
1605
1606 /* distance table */
1607 for (k = 0; k < 30; k++)
1608 c[k] = 5;
1609 fixed_bd = 5;
1610 huft_build(c, 30, 0, cpdist, cpdext, &fixed_td, &fixed_bd, &z);
1611
1612 /* done */
1613 fixed_built = 1;
1614 }
1615 fixed_lock--;
1616 *bl = fixed_bl;
1617 *bd = fixed_bd;
1618 *tl = fixed_tl;
1619 *td = fixed_td;
1620 return Z_OK;
1621}
1622
1623
1624local int inflate_trees_free(
1625 inflate_huft *t, /* table to free */
1626 z_stream *z /* for zfree function */
1627)
1628/* Free the malloc'ed tables built by huft_build(), which makes a linked
1629 list of the tables it made, with the links in a dummy first entry of
1630 each table. */
1631{
1632 register inflate_huft *p, *q;
1633
1634 /* Go through linked list, freeing from the malloced (t[-1]) address. */
1635 p = t;
1636 while (p != Z_NULL)
1637 {
1638 q = (--p)->next;
1639 ZFREE(z, p, p->word.Nalloc * sizeof(inflate_huft));
1640 p = q;
1641 }
1642 return Z_OK;
1643}
1644
1645/*+++++*/
1646/* infcodes.c -- process literals and length/distance pairs
1647 * Copyright (C) 1995 Mark Adler
1648 * For conditions of distribution and use, see copyright notice in zlib.h
1649 */
1650
1651/* simplify the use of the inflate_huft type with some defines */
1652#define base more.Base
1653#define next more.Next
1654#define exop word.what.Exop
1655#define bits word.what.Bits
1656
1657/* inflate codes private state */
1658struct inflate_codes_state {
1659
1660 /* mode */
1661 enum { /* waiting for "i:"=input, "o:"=output, "x:"=nothing */
1662 START, /* x: set up for LEN */
1663 LEN, /* i: get length/literal/eob next */
1664 LENEXT, /* i: getting length extra (have base) */
1665 DIST, /* i: get distance next */
1666 DISTEXT, /* i: getting distance extra */
1667 COPY, /* o: copying bytes in window, waiting for space */
1668 LIT, /* o: got literal, waiting for output space */
1669 WASH, /* o: got eob, possibly still output waiting */
1670 END, /* x: got eob and all data flushed */
1671 BADCODE} /* x: got error */
1672 mode; /* current inflate_codes mode */
1673
1674 /* mode dependent information */
1675 uInt len;
1676 union {
1677 struct {
1678 inflate_huft *tree; /* pointer into tree */
1679 uInt need; /* bits needed */
1680 } code; /* if LEN or DIST, where in tree */
1681 uInt lit; /* if LIT, literal */
1682 struct {
1683 uInt get; /* bits to get for extra */
1684 uInt dist; /* distance back to copy from */
1685 } copy; /* if EXT or COPY, where and how much */
1686 } sub; /* submode */
1687
1688 /* mode independent information */
1689 Byte lbits; /* ltree bits decoded per branch */
1690 Byte dbits; /* dtree bits decoder per branch */
1691 inflate_huft *ltree; /* literal/length/eob tree */
1692 inflate_huft *dtree; /* distance tree */
1693
1694};
1695
1696
1697local inflate_codes_statef *inflate_codes_new(
1698 uInt bl,
1699 uInt bd,
1700 inflate_huft *tl,
1701 inflate_huft *td,
1702 z_stream *z
1703)
1704{
1705 inflate_codes_statef *c;
1706
1707 if ((c = (inflate_codes_statef *)
1708 ZALLOC(z,1,sizeof(struct inflate_codes_state))) != Z_NULL)
1709 {
1710 c->mode = START;
1711 c->lbits = (Byte)bl;
1712 c->dbits = (Byte)bd;
1713 c->ltree = tl;
1714 c->dtree = td;
1715 Tracev((stderr, "inflate: codes new\n"));
1716 }
1717 return c;
1718}
1719
1720
1721local int inflate_codes(
1722 inflate_blocks_statef *s,
1723 z_stream *z,
1724 int r
1725)
1726{
1727 uInt j; /* temporary storage */
1728 inflate_huft *t; /* temporary pointer */
1729 uInt e; /* extra bits or operation */
1730 uLong b; /* bit buffer */
1731 uInt k; /* bits in bit buffer */
1732 Bytef *p; /* input data pointer */
1733 uInt n; /* bytes available there */
1734 Bytef *q; /* output window write pointer */
1735 uInt m; /* bytes to end of window or read pointer */
1736 Bytef *f; /* pointer to copy strings from */
1737 inflate_codes_statef *c = s->sub.decode.codes; /* codes state */
1738
1739 /* copy input/output information to locals (UPDATE macro restores) */
1740 LOAD
1741
1742 /* process input and output based on current state */
1743 while (1) switch (c->mode)
1744 { /* waiting for "i:"=input, "o:"=output, "x:"=nothing */
1745 case START: /* x: set up for LEN */
1746#ifndef SLOW
1747 if (m >= 258 && n >= 10)
1748 {
1749 UPDATE
1750 r = inflate_fast(c->lbits, c->dbits, c->ltree, c->dtree, s, z);
1751 LOAD
1752 if (r != Z_OK)
1753 {
1754 c->mode = r == Z_STREAM_END ? WASH : BADCODE;
1755 break;
1756 }
1757 }
1758#endif /* !SLOW */
1759 c->sub.code.need = c->lbits;
1760 c->sub.code.tree = c->ltree;
1761 c->mode = LEN;
1762 case LEN: /* i: get length/literal/eob next */
1763 j = c->sub.code.need;
1764 NEEDBITS(j)
1765 t = c->sub.code.tree + ((uInt)b & inflate_mask[j]);
1766 DUMPBITS(t->bits)
1767 e = (uInt)(t->exop);
1768 if (e == 0) /* literal */
1769 {
1770 c->sub.lit = t->base;
1771 Tracevv((stderr, t->base >= 0x20 && t->base < 0x7f ?
1772 "inflate: literal '%c'\n" :
1773 "inflate: literal 0x%02x\n", t->base));
1774 c->mode = LIT;
1775 break;
1776 }
1777 if (e & 16) /* length */
1778 {
1779 c->sub.copy.get = e & 15;
1780 c->len = t->base;
1781 c->mode = LENEXT;
1782 break;
1783 }
1784 if ((e & 64) == 0) /* next table */
1785 {
1786 c->sub.code.need = e;
1787 c->sub.code.tree = t->next;
1788 break;
1789 }
1790 if (e & 32) /* end of block */
1791 {
1792 Tracevv((stderr, "inflate: end of block\n"));
1793 c->mode = WASH;
1794 break;
1795 }
1796 c->mode = BADCODE; /* invalid code */
1797 z->msg = "invalid literal/length code";
1798 r = Z_DATA_ERROR;
1799 LEAVE
1800 case LENEXT: /* i: getting length extra (have base) */
1801 j = c->sub.copy.get;
1802 NEEDBITS(j)
1803 c->len += (uInt)b & inflate_mask[j];
1804 DUMPBITS(j)
1805 c->sub.code.need = c->dbits;
1806 c->sub.code.tree = c->dtree;
1807 Tracevv((stderr, "inflate: length %u\n", c->len));
1808 c->mode = DIST;
1809 case DIST: /* i: get distance next */
1810 j = c->sub.code.need;
1811 NEEDBITS(j)
1812 t = c->sub.code.tree + ((uInt)b & inflate_mask[j]);
1813 DUMPBITS(t->bits)
1814 e = (uInt)(t->exop);
1815 if (e & 16) /* distance */
1816 {
1817 c->sub.copy.get = e & 15;
1818 c->sub.copy.dist = t->base;
1819 c->mode = DISTEXT;
1820 break;
1821 }
1822 if ((e & 64) == 0) /* next table */
1823 {
1824 c->sub.code.need = e;
1825 c->sub.code.tree = t->next;
1826 break;
1827 }
1828 c->mode = BADCODE; /* invalid code */
1829 z->msg = "invalid distance code";
1830 r = Z_DATA_ERROR;
1831 LEAVE
1832 case DISTEXT: /* i: getting distance extra */
1833 j = c->sub.copy.get;
1834 NEEDBITS(j)
1835 c->sub.copy.dist += (uInt)b & inflate_mask[j];
1836 DUMPBITS(j)
1837 Tracevv((stderr, "inflate: distance %u\n", c->sub.copy.dist));
1838 c->mode = COPY;
1839 case COPY: /* o: copying bytes in window, waiting for space */
1840#ifndef __TURBOC__ /* Turbo C bug for following expression */
1841 f = (uInt)(q - s->window) < c->sub.copy.dist ?
1842 s->end - (c->sub.copy.dist - (q - s->window)) :
1843 q - c->sub.copy.dist;
1844#else
1845 f = q - c->sub.copy.dist;
1846 if ((uInt)(q - s->window) < c->sub.copy.dist)
1847 f = s->end - (c->sub.copy.dist - (q - s->window));
1848#endif
1849 while (c->len)
1850 {
1851 NEEDOUT
1852 OUTBYTE(*f++)
1853 if (f == s->end)
1854 f = s->window;
1855 c->len--;
1856 }
1857 c->mode = START;
1858 break;
1859 case LIT: /* o: got literal, waiting for output space */
1860 NEEDOUT
1861 OUTBYTE(c->sub.lit)
1862 c->mode = START;
1863 break;
1864 case WASH: /* o: got eob, possibly more output */
1865 FLUSH
1866 if (s->read != s->write)
1867 LEAVE
1868 c->mode = END;
1869 case END:
1870 r = Z_STREAM_END;
1871 LEAVE
1872 case BADCODE: /* x: got error */
1873 r = Z_DATA_ERROR;
1874 LEAVE
1875 default:
1876 r = Z_STREAM_ERROR;
1877 LEAVE
1878 }
1879}
1880
1881
1882local void inflate_codes_free(
1883 inflate_codes_statef *c,
1884 z_stream *z
1885)
1886{
1887 ZFREE(z, c, sizeof(struct inflate_codes_state));
1888 Tracev((stderr, "inflate: codes free\n"));
1889}
1890
1891/*+++++*/
1892/* inflate_util.c -- data and routines common to blocks and codes
1893 * Copyright (C) 1995 Mark Adler
1894 * For conditions of distribution and use, see copyright notice in zlib.h
1895 */
1896
1897/* copy as much as possible from the sliding window to the output area */
1898local int inflate_flush(
1899 inflate_blocks_statef *s,
1900 z_stream *z,
1901 int r
1902)
1903{
1904 uInt n;
1905 Bytef *p, *q;
1906
1907 /* local copies of source and destination pointers */
1908 p = z->next_out;
1909 q = s->read;
1910
1911 /* compute number of bytes to copy as far as end of window */
1912 n = (uInt)((q <= s->write ? s->write : s->end) - q);
1913 if (n > z->avail_out) n = z->avail_out;
1914 if (n && r == Z_BUF_ERROR) r = Z_OK;
1915
1916 /* update counters */
1917 z->avail_out -= n;
1918 z->total_out += n;
1919
1920 /* update check information */
1921 if (s->checkfn != Z_NULL)
1922 s->check = (*s->checkfn)(s->check, q, n);
1923
1924 /* copy as far as end of window */
1925 zmemcpy(p, q, n);
1926 p += n;
1927 q += n;
1928
1929 /* see if more to copy at beginning of window */
1930 if (q == s->end)
1931 {
1932 /* wrap pointers */
1933 q = s->window;
1934 if (s->write == s->end)
1935 s->write = s->window;
1936
1937 /* compute bytes to copy */
1938 n = (uInt)(s->write - q);
1939 if (n > z->avail_out) n = z->avail_out;
1940 if (n && r == Z_BUF_ERROR) r = Z_OK;
1941
1942 /* update counters */
1943 z->avail_out -= n;
1944 z->total_out += n;
1945
1946 /* update check information */
1947 if (s->checkfn != Z_NULL)
1948 s->check = (*s->checkfn)(s->check, q, n);
1949
1950 /* copy */
1951 zmemcpy(p, q, n);
1952 p += n;
1953 q += n;
1954 }
1955
1956 /* update pointers */
1957 z->next_out = p;
1958 s->read = q;
1959
1960 /* done */
1961 return r;
1962}
1963
1964
1965/*+++++*/
1966/* inffast.c -- process literals and length/distance pairs fast
1967 * Copyright (C) 1995 Mark Adler
1968 * For conditions of distribution and use, see copyright notice in zlib.h
1969 */
1970
1971/* simplify the use of the inflate_huft type with some defines */
1972#define base more.Base
1973#define next more.Next
1974#define exop word.what.Exop
1975#define bits word.what.Bits
1976
1977/* macros for bit input with no checking and for returning unused bytes */
1978#define GRABBITS(j) {while(k<(j)){b|=((uLong)NEXTBYTE)<<k;k+=8;}}
1979#define UNGRAB {n+=(c=k>>3);p-=c;k&=7;}
1980
1981/* Called with number of bytes left to write in window at least 258
1982 (the maximum string length) and number of input bytes available
1983 at least ten. The ten bytes are six bytes for the longest length/
1984 distance pair plus four bytes for overloading the bit buffer. */
1985
1986local int inflate_fast(
1987 uInt bl,
1988 uInt bd,
1989 inflate_huft *tl,
1990 inflate_huft *td,
1991 inflate_blocks_statef *s,
1992 z_stream *z
1993)
1994{
1995 inflate_huft *t; /* temporary pointer */
1996 uInt e; /* extra bits or operation */
1997 uLong b; /* bit buffer */
1998 uInt k; /* bits in bit buffer */
1999 Bytef *p; /* input data pointer */
2000 uInt n; /* bytes available there */
2001 Bytef *q; /* output window write pointer */
2002 uInt m; /* bytes to end of window or read pointer */
2003 uInt ml; /* mask for literal/length tree */
2004 uInt md; /* mask for distance tree */
2005 uInt c; /* bytes to copy */
2006 uInt d; /* distance back to copy from */
2007 Bytef *r; /* copy source pointer */
2008
2009 /* load input, output, bit values */
2010 LOAD
2011
2012 /* initialize masks */
2013 ml = inflate_mask[bl];
2014 md = inflate_mask[bd];
2015
2016 /* do until not enough input or output space for fast loop */
2017 do { /* assume called with m >= 258 && n >= 10 */
2018 /* get literal/length code */
2019 GRABBITS(20) /* max bits for literal/length code */
2020 if ((e = (t = tl + ((uInt)b & ml))->exop) == 0)
2021 {
2022 DUMPBITS(t->bits)
2023 Tracevv((stderr, t->base >= 0x20 && t->base < 0x7f ?
2024 "inflate: * literal '%c'\n" :
2025 "inflate: * literal 0x%02x\n", t->base));
2026 *q++ = (Byte)t->base;
2027 m--;
2028 continue;
2029 }
2030 do {
2031 DUMPBITS(t->bits)
2032 if (e & 16)
2033 {
2034 /* get extra bits for length */
2035 e &= 15;
2036 c = t->base + ((uInt)b & inflate_mask[e]);
2037 DUMPBITS(e)
2038 Tracevv((stderr, "inflate: * length %u\n", c));
2039
2040 /* decode distance base of block to copy */
2041 GRABBITS(15); /* max bits for distance code */
2042 e = (t = td + ((uInt)b & md))->exop;
2043 do {
2044 DUMPBITS(t->bits)
2045 if (e & 16)
2046 {
2047 /* get extra bits to add to distance base */
2048 e &= 15;
2049 GRABBITS(e) /* get extra bits (up to 13) */
2050 d = t->base + ((uInt)b & inflate_mask[e]);
2051 DUMPBITS(e)
2052 Tracevv((stderr, "inflate: * distance %u\n", d));
2053
2054 /* do the copy */
2055 m -= c;
2056 if ((uInt)(q - s->window) >= d) /* offset before dest */
2057 { /* just copy */
2058 r = q - d;
2059 *q++ = *r++; c--; /* minimum count is three, */
2060 *q++ = *r++; c--; /* so unroll loop a little */
2061 }
2062 else /* else offset after destination */
2063 {
2064 e = d - (q - s->window); /* bytes from offset to end */
2065 r = s->end - e; /* pointer to offset */
2066 if (c > e) /* if source crosses, */
2067 {
2068 c -= e; /* copy to end of window */
2069 do {
2070 *q++ = *r++;
2071 } while (--e);
2072 r = s->window; /* copy rest from start of window */
2073 }
2074 }
2075 do { /* copy all or what's left */
2076 *q++ = *r++;
2077 } while (--c);
2078 break;
2079 }
2080 else if ((e & 64) == 0)
2081 e = (t = t->next + ((uInt)b & inflate_mask[e]))->exop;
2082 else
2083 {
2084 z->msg = "invalid distance code";
2085 UNGRAB
2086 UPDATE
2087 return Z_DATA_ERROR;
2088 }
2089 } while (1);
2090 break;
2091 }
2092 if ((e & 64) == 0)
2093 {
2094 if ((e = (t = t->next + ((uInt)b & inflate_mask[e]))->exop) == 0)
2095 {
2096 DUMPBITS(t->bits)
2097 Tracevv((stderr, t->base >= 0x20 && t->base < 0x7f ?
2098 "inflate: * literal '%c'\n" :
2099 "inflate: * literal 0x%02x\n", t->base));
2100 *q++ = (Byte)t->base;
2101 m--;
2102 break;
2103 }
2104 }
2105 else if (e & 32)
2106 {
2107 Tracevv((stderr, "inflate: * end of block\n"));
2108 UNGRAB
2109 UPDATE
2110 return Z_STREAM_END;
2111 }
2112 else
2113 {
2114 z->msg = "invalid literal/length code";
2115 UNGRAB
2116 UPDATE
2117 return Z_DATA_ERROR;
2118 }
2119 } while (1);
2120 } while (m >= 258 && n >= 10);
2121
2122 /* not enough input or output--restore pointers and return */
2123 UNGRAB
2124 UPDATE
2125 return Z_OK;
2126}
2127
2128
2129/*+++++*/
2130/* zutil.c -- target dependent utility functions for the compression library
2131 * Copyright (C) 1995 Jean-loup Gailly.
2132 * For conditions of distribution and use, see copyright notice in zlib.h
2133 */
2134
2135/* From: zutil.c,v 1.8 1995/05/03 17:27:12 jloup Exp */
2136
2137char *zlib_version = ZLIB_VERSION;
2138
2139char *z_errmsg[] = {
2140"stream end", /* Z_STREAM_END 1 */
2141"", /* Z_OK 0 */
2142"file error", /* Z_ERRNO (-1) */
2143"stream error", /* Z_STREAM_ERROR (-2) */
2144"data error", /* Z_DATA_ERROR (-3) */
2145"insufficient memory", /* Z_MEM_ERROR (-4) */
2146"buffer error", /* Z_BUF_ERROR (-5) */
2147""};
2148
2149
2150/*+++++*/
2151/* adler32.c -- compute the Adler-32 checksum of a data stream
2152 * Copyright (C) 1995 Mark Adler
2153 * For conditions of distribution and use, see copyright notice in zlib.h
2154 */
2155
2156/* From: adler32.c,v 1.6 1995/05/03 17:27:08 jloup Exp */
2157
2158#define BASE 65521L /* largest prime smaller than 65536 */
2159#define NMAX 5552
2160/* NMAX is the largest n such that 255n(n+1)/2 + (n+1)(BASE-1) <= 2^32-1 */
2161
2162#define DO1(buf) {s1 += *buf++; s2 += s1;}
2163#define DO2(buf) DO1(buf); DO1(buf);
2164#define DO4(buf) DO2(buf); DO2(buf);
2165#define DO8(buf) DO4(buf); DO4(buf);
2166#define DO16(buf) DO8(buf); DO8(buf);
2167
2168/* ========================================================================= */
2169uLong adler32(
2170 uLong adler,
2171 Bytef *buf,
2172 uInt len
2173)
2174{
2175 unsigned long s1 = adler & 0xffff;
2176 unsigned long s2 = (adler >> 16) & 0xffff;
2177 int k;
2178
2179 if (buf == Z_NULL) return 1L;
2180
2181 while (len > 0) {
2182 k = len < NMAX ? len : NMAX;
2183 len -= k;
2184 while (k >= 16) {
2185 DO16(buf);
2186 k -= 16;
2187 }
2188 if (k != 0) do {
2189 DO1(buf);
2190 } while (--k);
2191 s1 %= BASE;
2192 s2 %= BASE;
2193 }
2194 return (s2 << 16) | s1;
2195}
diff --git a/arch/ppc64/boot/zlib.h b/arch/ppc64/boot/zlib.h
deleted file mode 100644
index f0b996c6864f..000000000000
--- a/arch/ppc64/boot/zlib.h
+++ /dev/null
@@ -1,432 +0,0 @@
1/* */
2
3/*
4 * This file is derived from zlib.h and zconf.h from the zlib-0.95
5 * distribution by Jean-loup Gailly and Mark Adler, with some additions
6 * by Paul Mackerras to aid in implementing Deflate compression and
7 * decompression for PPP packets.
8 */
9
10/*
11 * ==FILEVERSION 960122==
12 *
13 * This marker is used by the Linux installation script to determine
14 * whether an up-to-date version of this file is already installed.
15 */
16
17/* zlib.h -- interface of the 'zlib' general purpose compression library
18 version 0.95, Aug 16th, 1995.
19
20 Copyright (C) 1995 Jean-loup Gailly and Mark Adler
21
22 This software is provided 'as-is', without any express or implied
23 warranty. In no event will the authors be held liable for any damages
24 arising from the use of this software.
25
26 Permission is granted to anyone to use this software for any purpose,
27 including commercial applications, and to alter it and redistribute it
28 freely, subject to the following restrictions:
29
30 1. The origin of this software must not be misrepresented; you must not
31 claim that you wrote the original software. If you use this software
32 in a product, an acknowledgment in the product documentation would be
33 appreciated but is not required.
34 2. Altered source versions must be plainly marked as such, and must not be
35 misrepresented as being the original software.
36 3. This notice may not be removed or altered from any source distribution.
37
38 Jean-loup Gailly Mark Adler
39 gzip@prep.ai.mit.edu madler@alumni.caltech.edu
40 */
41
42#ifndef _ZLIB_H
43#define _ZLIB_H
44
45/* #include "zconf.h" */ /* included directly here */
46
47/* zconf.h -- configuration of the zlib compression library
48 * Copyright (C) 1995 Jean-loup Gailly.
49 * For conditions of distribution and use, see copyright notice in zlib.h
50 */
51
52/* From: zconf.h,v 1.12 1995/05/03 17:27:12 jloup Exp */
53
54/*
55 The library does not install any signal handler. It is recommended to
56 add at least a handler for SIGSEGV when decompressing; the library checks
57 the consistency of the input data whenever possible but may go nuts
58 for some forms of corrupted input.
59 */
60
61/*
62 * Compile with -DMAXSEG_64K if the alloc function cannot allocate more
63 * than 64k bytes at a time (needed on systems with 16-bit int).
64 * Compile with -DUNALIGNED_OK if it is OK to access shorts or ints
65 * at addresses which are not a multiple of their size.
66 * Under DOS, -DFAR=far or -DFAR=__far may be needed.
67 */
68
69#ifndef STDC
70# if defined(MSDOS) || defined(__STDC__) || defined(__cplusplus)
71# define STDC
72# endif
73#endif
74
75#ifdef __MWERKS__ /* Metrowerks CodeWarrior declares fileno() in unix.h */
76# include <unix.h>
77#endif
78
79/* Maximum value for memLevel in deflateInit2 */
80#ifndef MAX_MEM_LEVEL
81# ifdef MAXSEG_64K
82# define MAX_MEM_LEVEL 8
83# else
84# define MAX_MEM_LEVEL 9
85# endif
86#endif
87
88#ifndef FAR
89# define FAR
90#endif
91
92/* Maximum value for windowBits in deflateInit2 and inflateInit2 */
93#ifndef MAX_WBITS
94# define MAX_WBITS 15 /* 32K LZ77 window */
95#endif
96
97/* The memory requirements for deflate are (in bytes):
98 1 << (windowBits+2) + 1 << (memLevel+9)
99 that is: 128K for windowBits=15 + 128K for memLevel = 8 (default values)
100 plus a few kilobytes for small objects. For example, if you want to reduce
101 the default memory requirements from 256K to 128K, compile with
102 make CFLAGS="-O -DMAX_WBITS=14 -DMAX_MEM_LEVEL=7"
103 Of course this will generally degrade compression (there's no free lunch).
104
105 The memory requirements for inflate are (in bytes) 1 << windowBits
106 that is, 32K for windowBits=15 (default value) plus a few kilobytes
107 for small objects.
108*/
109
110 /* Type declarations */
111
112#ifndef OF /* function prototypes */
113# ifdef STDC
114# define OF(args) args
115# else
116# define OF(args) ()
117# endif
118#endif
119
120typedef unsigned char Byte; /* 8 bits */
121typedef unsigned int uInt; /* 16 bits or more */
122typedef unsigned long uLong; /* 32 bits or more */
123
124typedef Byte FAR Bytef;
125typedef char FAR charf;
126typedef int FAR intf;
127typedef uInt FAR uIntf;
128typedef uLong FAR uLongf;
129
130#ifdef STDC
131 typedef void FAR *voidpf;
132 typedef void *voidp;
133#else
134 typedef Byte FAR *voidpf;
135 typedef Byte *voidp;
136#endif
137
138/* end of original zconf.h */
139
140#define ZLIB_VERSION "0.95P"
141
142/*
143 The 'zlib' compression library provides in-memory compression and
144 decompression functions, including integrity checks of the uncompressed
145 data. This version of the library supports only one compression method
146 (deflation) but other algorithms may be added later and will have the same
147 stream interface.
148
149 For compression the application must provide the output buffer and
150 may optionally provide the input buffer for optimization. For decompression,
151 the application must provide the input buffer and may optionally provide
152 the output buffer for optimization.
153
154 Compression can be done in a single step if the buffers are large
155 enough (for example if an input file is mmap'ed), or can be done by
156 repeated calls of the compression function. In the latter case, the
157 application must provide more input and/or consume the output
158 (providing more output space) before each call.
159*/
160
161typedef voidpf (*alloc_func) OF((voidpf opaque, uInt items, uInt size));
162typedef void (*free_func) OF((voidpf opaque, voidpf address, uInt nbytes));
163
164struct internal_state;
165
166typedef struct z_stream_s {
167 Bytef *next_in; /* next input byte */
168 uInt avail_in; /* number of bytes available at next_in */
169 uLong total_in; /* total nb of input bytes read so far */
170
171 Bytef *next_out; /* next output byte should be put there */
172 uInt avail_out; /* remaining free space at next_out */
173 uLong total_out; /* total nb of bytes output so far */
174
175 char *msg; /* last error message, NULL if no error */
176 struct internal_state FAR *state; /* not visible by applications */
177
178 alloc_func zalloc; /* used to allocate the internal state */
179 free_func zfree; /* used to free the internal state */
180 voidp opaque; /* private data object passed to zalloc and zfree */
181
182 Byte data_type; /* best guess about the data type: ascii or binary */
183
184} z_stream;
185
186/*
187 The application must update next_in and avail_in when avail_in has
188 dropped to zero. It must update next_out and avail_out when avail_out
189 has dropped to zero. The application must initialize zalloc, zfree and
190 opaque before calling the init function. All other fields are set by the
191 compression library and must not be updated by the application.
192
193 The opaque value provided by the application will be passed as the first
194 parameter for calls of zalloc and zfree. This can be useful for custom
195 memory management. The compression library attaches no meaning to the
196 opaque value.
197
198 zalloc must return Z_NULL if there is not enough memory for the object.
199 On 16-bit systems, the functions zalloc and zfree must be able to allocate
200 exactly 65536 bytes, but will not be required to allocate more than this
201 if the symbol MAXSEG_64K is defined (see zconf.h). WARNING: On MSDOS,
202 pointers returned by zalloc for objects of exactly 65536 bytes *must*
203 have their offset normalized to zero. The default allocation function
204 provided by this library ensures this (see zutil.c). To reduce memory
205 requirements and avoid any allocation of 64K objects, at the expense of
206 compression ratio, compile the library with -DMAX_WBITS=14 (see zconf.h).
207
208 The fields total_in and total_out can be used for statistics or
209 progress reports. After compression, total_in holds the total size of
210 the uncompressed data and may be saved for use in the decompressor
211 (particularly if the decompressor wants to decompress everything in
212 a single step).
213*/
214
215 /* constants */
216
217#define Z_NO_FLUSH 0
218#define Z_PARTIAL_FLUSH 1
219#define Z_FULL_FLUSH 2
220#define Z_SYNC_FLUSH 3 /* experimental: partial_flush + byte align */
221#define Z_FINISH 4
222#define Z_PACKET_FLUSH 5
223/* See deflate() below for the usage of these constants */
224
225#define Z_OK 0
226#define Z_STREAM_END 1
227#define Z_ERRNO (-1)
228#define Z_STREAM_ERROR (-2)
229#define Z_DATA_ERROR (-3)
230#define Z_MEM_ERROR (-4)
231#define Z_BUF_ERROR (-5)
232/* error codes for the compression/decompression functions */
233
234#define Z_BEST_SPEED 1
235#define Z_BEST_COMPRESSION 9
236#define Z_DEFAULT_COMPRESSION (-1)
237/* compression levels */
238
239#define Z_FILTERED 1
240#define Z_HUFFMAN_ONLY 2
241#define Z_DEFAULT_STRATEGY 0
242
243#define Z_BINARY 0
244#define Z_ASCII 1
245#define Z_UNKNOWN 2
246/* Used to set the data_type field */
247
248#define Z_NULL 0 /* for initializing zalloc, zfree, opaque */
249
250extern char *zlib_version;
251/* The application can compare zlib_version and ZLIB_VERSION for consistency.
252 If the first character differs, the library code actually used is
253 not compatible with the zlib.h header file used by the application.
254 */
255
256 /* basic functions */
257
258extern int inflateInit OF((z_stream *strm));
259/*
260 Initializes the internal stream state for decompression. The fields
261 zalloc and zfree must be initialized before by the caller. If zalloc and
262 zfree are set to Z_NULL, inflateInit updates them to use default allocation
263 functions.
264
265 inflateInit returns Z_OK if success, Z_MEM_ERROR if there was not
266 enough memory. msg is set to null if there is no error message.
267 inflateInit does not perform any decompression: this will be done by
268 inflate().
269*/
270
271
272extern int inflate OF((z_stream *strm, int flush));
273/*
274 Performs one or both of the following actions:
275
276 - Decompress more input starting at next_in and update next_in and avail_in
277 accordingly. If not all input can be processed (because there is not
278 enough room in the output buffer), next_in is updated and processing
279 will resume at this point for the next call of inflate().
280
281 - Provide more output starting at next_out and update next_out and avail_out
282 accordingly. inflate() always provides as much output as possible
283 (until there is no more input data or no more space in the output buffer).
284
285 Before the call of inflate(), the application should ensure that at least
286 one of the actions is possible, by providing more input and/or consuming
287 more output, and updating the next_* and avail_* values accordingly.
288 The application can consume the uncompressed output when it wants, for
289 example when the output buffer is full (avail_out == 0), or after each
290 call of inflate().
291
292 If the parameter flush is set to Z_PARTIAL_FLUSH or Z_PACKET_FLUSH,
293 inflate flushes as much output as possible to the output buffer. The
294 flushing behavior of inflate is not specified for values of the flush
295 parameter other than Z_PARTIAL_FLUSH, Z_PACKET_FLUSH or Z_FINISH, but the
296 current implementation actually flushes as much output as possible
297 anyway. For Z_PACKET_FLUSH, inflate checks that once all the input data
298 has been consumed, it is expecting to see the length field of a stored
299 block; if not, it returns Z_DATA_ERROR.
300
301 inflate() should normally be called until it returns Z_STREAM_END or an
302 error. However if all decompression is to be performed in a single step
303 (a single call of inflate), the parameter flush should be set to
304 Z_FINISH. In this case all pending input is processed and all pending
305 output is flushed; avail_out must be large enough to hold all the
306 uncompressed data. (The size of the uncompressed data may have been saved
307 by the compressor for this purpose.) The next operation on this stream must
308 be inflateEnd to deallocate the decompression state. The use of Z_FINISH
309 is never required, but can be used to inform inflate that a faster routine
310 may be used for the single inflate() call.
311
312 inflate() returns Z_OK if some progress has been made (more input
313 processed or more output produced), Z_STREAM_END if the end of the
314 compressed data has been reached and all uncompressed output has been
315 produced, Z_DATA_ERROR if the input data was corrupted, Z_STREAM_ERROR if
316 the stream structure was inconsistent (for example if next_in or next_out
317 was NULL), Z_MEM_ERROR if there was not enough memory, Z_BUF_ERROR if no
318 progress is possible or if there was not enough room in the output buffer
319 when Z_FINISH is used. In the Z_DATA_ERROR case, the application may then
320 call inflateSync to look for a good compression block. */
321
322
323extern int inflateEnd OF((z_stream *strm));
324/*
325 All dynamically allocated data structures for this stream are freed.
326 This function discards any unprocessed input and does not flush any
327 pending output.
328
329 inflateEnd returns Z_OK if success, Z_STREAM_ERROR if the stream state
330 was inconsistent. In the error case, msg may be set but then points to a
331 static string (which must not be deallocated).
332*/
333
334 /* advanced functions */
335
336extern int inflateInit2 OF((z_stream *strm,
337 int windowBits));
338/*
339 This is another version of inflateInit with more compression options. The
340 fields next_out, zalloc and zfree must be initialized before by the caller.
341
342 The windowBits parameter is the base two logarithm of the maximum window
343 size (the size of the history buffer). It should be in the range 8..15 for
344 this version of the library (the value 16 will be allowed soon). The
345 default value is 15 if inflateInit is used instead. If a compressed stream
346 with a larger window size is given as input, inflate() will return with
347 the error code Z_DATA_ERROR instead of trying to allocate a larger window.
348
349 If next_out is not null, the library will use this buffer for the history
350 buffer; the buffer must either be large enough to hold the entire output
351 data, or have at least 1<<windowBits bytes. If next_out is null, the
352 library will allocate its own buffer (and leave next_out null). next_in
353 need not be provided here but must be provided by the application for the
354 next call of inflate().
355
356 If the history buffer is provided by the application, next_out must
357 never be changed by the application since the decompressor maintains
358 history information inside this buffer from call to call; the application
359 can only reset next_out to the beginning of the history buffer when
360 avail_out is zero and all output has been consumed.
361
362 inflateInit2 returns Z_OK if success, Z_MEM_ERROR if there was
363 not enough memory, Z_STREAM_ERROR if a parameter is invalid (such as
364 windowBits < 8). msg is set to null if there is no error message.
365 inflateInit2 does not perform any decompression: this will be done by
366 inflate().
367*/
368
369extern int inflateSync OF((z_stream *strm));
370/*
371 Skips invalid compressed data until the special marker (see deflate()
372 above) can be found, or until all available input is skipped. No output
373 is provided.
374
375 inflateSync returns Z_OK if the special marker has been found, Z_BUF_ERROR
376 if no more input was provided, Z_DATA_ERROR if no marker has been found,
377 or Z_STREAM_ERROR if the stream structure was inconsistent. In the success
378 case, the application may save the current current value of total_in which
379 indicates where valid compressed data was found. In the error case, the
380 application may repeatedly call inflateSync, providing more input each time,
381 until success or end of the input data.
382*/
383
384extern int inflateReset OF((z_stream *strm));
385/*
386 This function is equivalent to inflateEnd followed by inflateInit,
387 but does not free and reallocate all the internal decompression state.
388 The stream will keep attributes that may have been set by inflateInit2.
389
390 inflateReset returns Z_OK if success, or Z_STREAM_ERROR if the source
391 stream state was inconsistent (such as zalloc or state being NULL).
392*/
393
394extern int inflateIncomp OF((z_stream *strm));
395/*
396 This function adds the data at next_in (avail_in bytes) to the output
397 history without performing any output. There must be no pending output,
398 and the decompressor must be expecting to see the start of a block.
399 Calling this function is equivalent to decompressing a stored block
400 containing the data at next_in (except that the data is not output).
401*/
402
403 /* checksum functions */
404
405/*
406 This function is not related to compression but is exported
407 anyway because it might be useful in applications using the
408 compression library.
409*/
410
411extern uLong adler32 OF((uLong adler, Bytef *buf, uInt len));
412
413/*
414 Update a running Adler-32 checksum with the bytes buf[0..len-1] and
415 return the updated checksum. If buf is NULL, this function returns
416 the required initial value for the checksum.
417 An Adler-32 checksum is almost as reliable as a CRC32 but can be computed
418 much faster. Usage example:
419
420 uLong adler = adler32(0L, Z_NULL, 0);
421
422 while (read_buffer(buffer, length) != EOF) {
423 adler = adler32(adler, buffer, length);
424 }
425 if (adler != original_adler) error();
426*/
427
428#ifndef _Z_UTIL_H
429 struct internal_state {int dummy;}; /* hack for buggy compilers */
430#endif
431
432#endif /* _ZLIB_H */
diff --git a/arch/ppc64/defconfig b/arch/ppc64/defconfig
index 37c157c93cef..e79fd60bc122 100644
--- a/arch/ppc64/defconfig
+++ b/arch/ppc64/defconfig
@@ -1318,7 +1318,7 @@ CONFIG_MSDOS_PARTITION=y
1318# 1318#
1319CONFIG_NLS=y 1319CONFIG_NLS=y
1320CONFIG_NLS_DEFAULT="iso8859-1" 1320CONFIG_NLS_DEFAULT="iso8859-1"
1321CONFIG_NLS_CODEPAGE_437=m 1321CONFIG_NLS_CODEPAGE_437=y
1322CONFIG_NLS_CODEPAGE_737=m 1322CONFIG_NLS_CODEPAGE_737=m
1323CONFIG_NLS_CODEPAGE_775=m 1323CONFIG_NLS_CODEPAGE_775=m
1324CONFIG_NLS_CODEPAGE_850=m 1324CONFIG_NLS_CODEPAGE_850=m
@@ -1342,7 +1342,7 @@ CONFIG_NLS_ISO8859_8=m
1342CONFIG_NLS_CODEPAGE_1250=m 1342CONFIG_NLS_CODEPAGE_1250=m
1343CONFIG_NLS_CODEPAGE_1251=m 1343CONFIG_NLS_CODEPAGE_1251=m
1344CONFIG_NLS_ASCII=m 1344CONFIG_NLS_ASCII=m
1345CONFIG_NLS_ISO8859_1=m 1345CONFIG_NLS_ISO8859_1=y
1346CONFIG_NLS_ISO8859_2=m 1346CONFIG_NLS_ISO8859_2=m
1347CONFIG_NLS_ISO8859_3=m 1347CONFIG_NLS_ISO8859_3=m
1348CONFIG_NLS_ISO8859_4=m 1348CONFIG_NLS_ISO8859_4=m
diff --git a/arch/ppc64/kernel/HvCall.c b/arch/ppc64/kernel/HvCall.c
deleted file mode 100644
index b772e65b57a2..000000000000
--- a/arch/ppc64/kernel/HvCall.c
+++ /dev/null
@@ -1,36 +0,0 @@
1/*
2 * HvCall.c
3 * Copyright (C) 2001 Mike Corrigan IBM Corporation
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 */
10
11#include <asm/page.h>
12#include <asm/abs_addr.h>
13#include <asm/iSeries/HvCall.h>
14#include <asm/iSeries/HvCallSc.h>
15#include <asm/iSeries/HvTypes.h>
16
17
18void HvCall_writeLogBuffer(const void *buffer, u64 len)
19{
20 struct HvLpBufferList hv_buf;
21 u64 left_this_page;
22 u64 cur = virt_to_abs(buffer);
23
24 while (len) {
25 hv_buf.addr = cur;
26 left_this_page = ((cur & PAGE_MASK) + PAGE_SIZE) - cur;
27 if (left_this_page > len)
28 left_this_page = len;
29 hv_buf.len = left_this_page;
30 len -= left_this_page;
31 HvCall2(HvCallBaseWriteLogBuffer,
32 virt_to_abs(&hv_buf),
33 left_this_page);
34 cur = (cur & PAGE_MASK) + PAGE_SIZE;
35 }
36}
diff --git a/arch/ppc64/kernel/HvLpConfig.c b/arch/ppc64/kernel/HvLpConfig.c
deleted file mode 100644
index cb1d6473203c..000000000000
--- a/arch/ppc64/kernel/HvLpConfig.c
+++ /dev/null
@@ -1,27 +0,0 @@
1/*
2 * HvLpConfig.c
3 * Copyright (C) 2001 Kyle A. Lucke, IBM Corporation
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 */
19
20#include <linux/module.h>
21#include <asm/iSeries/HvLpConfig.h>
22
23HvLpIndex HvLpConfig_getLpIndex_outline(void)
24{
25 return HvLpConfig_getLpIndex();
26}
27EXPORT_SYMBOL(HvLpConfig_getLpIndex_outline);
diff --git a/arch/ppc64/kernel/HvLpEvent.c b/arch/ppc64/kernel/HvLpEvent.c
deleted file mode 100644
index 90032b138902..000000000000
--- a/arch/ppc64/kernel/HvLpEvent.c
+++ /dev/null
@@ -1,88 +0,0 @@
1/*
2 * Copyright 2001 Mike Corrigan IBM Corp
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 */
9#include <linux/stddef.h>
10#include <linux/kernel.h>
11#include <linux/module.h>
12#include <asm/system.h>
13#include <asm/iSeries/HvLpEvent.h>
14#include <asm/iSeries/HvCallEvent.h>
15#include <asm/iSeries/ItLpNaca.h>
16
17/* Array of LpEvent handler functions */
18LpEventHandler lpEventHandler[HvLpEvent_Type_NumTypes];
19unsigned lpEventHandlerPaths[HvLpEvent_Type_NumTypes];
20
21/* Register a handler for an LpEvent type */
22
23int HvLpEvent_registerHandler( HvLpEvent_Type eventType, LpEventHandler handler )
24{
25 int rc = 1;
26 if ( eventType < HvLpEvent_Type_NumTypes ) {
27 lpEventHandler[eventType] = handler;
28 rc = 0;
29 }
30 return rc;
31
32}
33
34int HvLpEvent_unregisterHandler( HvLpEvent_Type eventType )
35{
36 int rc = 1;
37
38 might_sleep();
39
40 if ( eventType < HvLpEvent_Type_NumTypes ) {
41 if ( !lpEventHandlerPaths[eventType] ) {
42 lpEventHandler[eventType] = NULL;
43 rc = 0;
44
45 /* We now sleep until all other CPUs have scheduled. This ensures that
46 * the deletion is seen by all other CPUs, and that the deleted handler
47 * isn't still running on another CPU when we return. */
48 synchronize_rcu();
49 }
50 }
51 return rc;
52}
53EXPORT_SYMBOL(HvLpEvent_registerHandler);
54EXPORT_SYMBOL(HvLpEvent_unregisterHandler);
55
56/* (lpIndex is the partition index of the target partition.
57 * needed only for VirtualIo, VirtualLan and SessionMgr. Zero
58 * indicates to use our partition index - for the other types)
59 */
60int HvLpEvent_openPath( HvLpEvent_Type eventType, HvLpIndex lpIndex )
61{
62 int rc = 1;
63 if ( eventType < HvLpEvent_Type_NumTypes &&
64 lpEventHandler[eventType] ) {
65 if ( lpIndex == 0 )
66 lpIndex = itLpNaca.xLpIndex;
67 HvCallEvent_openLpEventPath( lpIndex, eventType );
68 ++lpEventHandlerPaths[eventType];
69 rc = 0;
70 }
71 return rc;
72}
73
74int HvLpEvent_closePath( HvLpEvent_Type eventType, HvLpIndex lpIndex )
75{
76 int rc = 1;
77 if ( eventType < HvLpEvent_Type_NumTypes &&
78 lpEventHandler[eventType] &&
79 lpEventHandlerPaths[eventType] ) {
80 if ( lpIndex == 0 )
81 lpIndex = itLpNaca.xLpIndex;
82 HvCallEvent_closeLpEventPath( lpIndex, eventType );
83 --lpEventHandlerPaths[eventType];
84 rc = 0;
85 }
86 return rc;
87}
88
diff --git a/arch/ppc64/kernel/ItLpQueue.c b/arch/ppc64/kernel/ItLpQueue.c
deleted file mode 100644
index 4231861288a3..000000000000
--- a/arch/ppc64/kernel/ItLpQueue.c
+++ /dev/null
@@ -1,262 +0,0 @@
1/*
2 * ItLpQueue.c
3 * Copyright (C) 2001 Mike Corrigan IBM Corporation
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 */
10
11#include <linux/stddef.h>
12#include <linux/kernel.h>
13#include <linux/sched.h>
14#include <linux/bootmem.h>
15#include <linux/seq_file.h>
16#include <linux/proc_fs.h>
17#include <asm/system.h>
18#include <asm/paca.h>
19#include <asm/iSeries/ItLpQueue.h>
20#include <asm/iSeries/HvLpEvent.h>
21#include <asm/iSeries/HvCallEvent.h>
22
23/*
24 * The LpQueue is used to pass event data from the hypervisor to
25 * the partition. This is where I/O interrupt events are communicated.
26 *
27 * It is written to by the hypervisor so cannot end up in the BSS.
28 */
29struct hvlpevent_queue hvlpevent_queue __attribute__((__section__(".data")));
30
31DEFINE_PER_CPU(unsigned long[HvLpEvent_Type_NumTypes], hvlpevent_counts);
32
33static char *event_types[HvLpEvent_Type_NumTypes] = {
34 "Hypervisor",
35 "Machine Facilities",
36 "Session Manager",
37 "SPD I/O",
38 "Virtual Bus",
39 "PCI I/O",
40 "RIO I/O",
41 "Virtual Lan",
42 "Virtual I/O"
43};
44
45/* Array of LpEvent handler functions */
46extern LpEventHandler lpEventHandler[HvLpEvent_Type_NumTypes];
47
48static struct HvLpEvent * get_next_hvlpevent(void)
49{
50 struct HvLpEvent * event;
51 event = (struct HvLpEvent *)hvlpevent_queue.xSlicCurEventPtr;
52
53 if (event->xFlags.xValid) {
54 /* rmb() needed only for weakly consistent machines (regatta) */
55 rmb();
56 /* Set pointer to next potential event */
57 hvlpevent_queue.xSlicCurEventPtr += ((event->xSizeMinus1 +
58 LpEventAlign) / LpEventAlign) * LpEventAlign;
59
60 /* Wrap to beginning if no room at end */
61 if (hvlpevent_queue.xSlicCurEventPtr >
62 hvlpevent_queue.xSlicLastValidEventPtr) {
63 hvlpevent_queue.xSlicCurEventPtr =
64 hvlpevent_queue.xSlicEventStackPtr;
65 }
66 } else {
67 event = NULL;
68 }
69
70 return event;
71}
72
73static unsigned long spread_lpevents = NR_CPUS;
74
75int hvlpevent_is_pending(void)
76{
77 struct HvLpEvent *next_event;
78
79 if (smp_processor_id() >= spread_lpevents)
80 return 0;
81
82 next_event = (struct HvLpEvent *)hvlpevent_queue.xSlicCurEventPtr;
83
84 return next_event->xFlags.xValid |
85 hvlpevent_queue.xPlicOverflowIntPending;
86}
87
88static void hvlpevent_clear_valid(struct HvLpEvent * event)
89{
90 /* Tell the Hypervisor that we're done with this event.
91 * Also clear bits within this event that might look like valid bits.
92 * ie. on 64-byte boundaries.
93 */
94 struct HvLpEvent *tmp;
95 unsigned extra = ((event->xSizeMinus1 + LpEventAlign) /
96 LpEventAlign) - 1;
97
98 switch (extra) {
99 case 3:
100 tmp = (struct HvLpEvent*)((char*)event + 3 * LpEventAlign);
101 tmp->xFlags.xValid = 0;
102 case 2:
103 tmp = (struct HvLpEvent*)((char*)event + 2 * LpEventAlign);
104 tmp->xFlags.xValid = 0;
105 case 1:
106 tmp = (struct HvLpEvent*)((char*)event + 1 * LpEventAlign);
107 tmp->xFlags.xValid = 0;
108 }
109
110 mb();
111
112 event->xFlags.xValid = 0;
113}
114
115void process_hvlpevents(struct pt_regs *regs)
116{
117 struct HvLpEvent * event;
118
119 /* If we have recursed, just return */
120 if (!spin_trylock(&hvlpevent_queue.lock))
121 return;
122
123 for (;;) {
124 event = get_next_hvlpevent();
125 if (event) {
126 /* Call appropriate handler here, passing
127 * a pointer to the LpEvent. The handler
128 * must make a copy of the LpEvent if it
129 * needs it in a bottom half. (perhaps for
130 * an ACK)
131 *
132 * Handlers are responsible for ACK processing
133 *
134 * The Hypervisor guarantees that LpEvents will
135 * only be delivered with types that we have
136 * registered for, so no type check is necessary
137 * here!
138 */
139 if (event->xType < HvLpEvent_Type_NumTypes)
140 __get_cpu_var(hvlpevent_counts)[event->xType]++;
141 if (event->xType < HvLpEvent_Type_NumTypes &&
142 lpEventHandler[event->xType])
143 lpEventHandler[event->xType](event, regs);
144 else
145 printk(KERN_INFO "Unexpected Lp Event type=%d\n", event->xType );
146
147 hvlpevent_clear_valid(event);
148 } else if (hvlpevent_queue.xPlicOverflowIntPending)
149 /*
150 * No more valid events. If overflow events are
151 * pending process them
152 */
153 HvCallEvent_getOverflowLpEvents(hvlpevent_queue.xIndex);
154 else
155 break;
156 }
157
158 spin_unlock(&hvlpevent_queue.lock);
159}
160
161static int set_spread_lpevents(char *str)
162{
163 unsigned long val = simple_strtoul(str, NULL, 0);
164
165 /*
166 * The parameter is the number of processors to share in processing
167 * lp events.
168 */
169 if (( val > 0) && (val <= NR_CPUS)) {
170 spread_lpevents = val;
171 printk("lpevent processing spread over %ld processors\n", val);
172 } else {
173 printk("invalid spread_lpevents %ld\n", val);
174 }
175
176 return 1;
177}
178__setup("spread_lpevents=", set_spread_lpevents);
179
180void setup_hvlpevent_queue(void)
181{
182 void *eventStack;
183
184 /*
185 * Allocate a page for the Event Stack. The Hypervisor needs the
186 * absolute real address, so we subtract out the KERNELBASE and add
187 * in the absolute real address of the kernel load area.
188 */
189 eventStack = alloc_bootmem_pages(LpEventStackSize);
190 memset(eventStack, 0, LpEventStackSize);
191
192 /* Invoke the hypervisor to initialize the event stack */
193 HvCallEvent_setLpEventStack(0, eventStack, LpEventStackSize);
194
195 hvlpevent_queue.xSlicEventStackPtr = (char *)eventStack;
196 hvlpevent_queue.xSlicCurEventPtr = (char *)eventStack;
197 hvlpevent_queue.xSlicLastValidEventPtr = (char *)eventStack +
198 (LpEventStackSize - LpEventMaxSize);
199 hvlpevent_queue.xIndex = 0;
200}
201
202static int proc_lpevents_show(struct seq_file *m, void *v)
203{
204 int cpu, i;
205 unsigned long sum;
206 static unsigned long cpu_totals[NR_CPUS];
207
208 /* FIXME: do we care that there's no locking here? */
209 sum = 0;
210 for_each_online_cpu(cpu) {
211 cpu_totals[cpu] = 0;
212 for (i = 0; i < HvLpEvent_Type_NumTypes; i++) {
213 cpu_totals[cpu] += per_cpu(hvlpevent_counts, cpu)[i];
214 }
215 sum += cpu_totals[cpu];
216 }
217
218 seq_printf(m, "LpEventQueue 0\n");
219 seq_printf(m, " events processed:\t%lu\n", sum);
220
221 for (i = 0; i < HvLpEvent_Type_NumTypes; ++i) {
222 sum = 0;
223 for_each_online_cpu(cpu) {
224 sum += per_cpu(hvlpevent_counts, cpu)[i];
225 }
226
227 seq_printf(m, " %-20s %10lu\n", event_types[i], sum);
228 }
229
230 seq_printf(m, "\n events processed by processor:\n");
231
232 for_each_online_cpu(cpu) {
233 seq_printf(m, " CPU%02d %10lu\n", cpu, cpu_totals[cpu]);
234 }
235
236 return 0;
237}
238
239static int proc_lpevents_open(struct inode *inode, struct file *file)
240{
241 return single_open(file, proc_lpevents_show, NULL);
242}
243
244static struct file_operations proc_lpevents_operations = {
245 .open = proc_lpevents_open,
246 .read = seq_read,
247 .llseek = seq_lseek,
248 .release = single_release,
249};
250
251static int __init proc_lpevents_init(void)
252{
253 struct proc_dir_entry *e;
254
255 e = create_proc_entry("iSeries/lpevents", S_IFREG|S_IRUGO, NULL);
256 if (e)
257 e->proc_fops = &proc_lpevents_operations;
258
259 return 0;
260}
261__initcall(proc_lpevents_init);
262
diff --git a/arch/ppc64/kernel/LparData.c b/arch/ppc64/kernel/LparData.c
deleted file mode 100644
index 0a9c23ca2f0c..000000000000
--- a/arch/ppc64/kernel/LparData.c
+++ /dev/null
@@ -1,227 +0,0 @@
1/*
2 * Copyright 2001 Mike Corrigan, IBM Corp
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 */
9#include <linux/config.h>
10#include <linux/types.h>
11#include <linux/threads.h>
12#include <linux/module.h>
13#include <linux/bitops.h>
14#include <asm/processor.h>
15#include <asm/ptrace.h>
16#include <asm/naca.h>
17#include <asm/abs_addr.h>
18#include <asm/iSeries/ItLpNaca.h>
19#include <asm/lppaca.h>
20#include <asm/iSeries/ItLpRegSave.h>
21#include <asm/paca.h>
22#include <asm/iSeries/HvReleaseData.h>
23#include <asm/iSeries/LparMap.h>
24#include <asm/iSeries/ItVpdAreas.h>
25#include <asm/iSeries/ItIplParmsReal.h>
26#include <asm/iSeries/ItExtVpdPanel.h>
27#include <asm/iSeries/ItLpQueue.h>
28#include <asm/iSeries/IoHriProcessorVpd.h>
29#include <asm/iSeries/ItSpCommArea.h>
30
31
32/* The HvReleaseData is the root of the information shared between
33 * the hypervisor and Linux.
34 */
35struct HvReleaseData hvReleaseData = {
36 .xDesc = 0xc8a5d9c4, /* "HvRD" ebcdic */
37 .xSize = sizeof(struct HvReleaseData),
38 .xVpdAreasPtrOffset = offsetof(struct naca_struct, xItVpdAreas),
39 .xSlicNacaAddr = &naca, /* 64-bit Naca address */
40 .xMsNucDataOffset = LPARMAP_PHYS,
41 .xFlags = HVREL_TAGSINACTIVE /* tags inactive */
42 /* 64 bit */
43 /* shared processors */
44 /* HMT allowed */
45 | 6, /* TEMP: This allows non-GA driver */
46 .xVrmIndex = 4, /* We are v5r2m0 */
47 .xMinSupportedPlicVrmIndex = 3, /* v5r1m0 */
48 .xMinCompatablePlicVrmIndex = 3, /* v5r1m0 */
49 .xVrmName = { 0xd3, 0x89, 0x95, 0xa4, /* "Linux 2.4.64" ebcdic */
50 0xa7, 0x40, 0xf2, 0x4b,
51 0xf4, 0x4b, 0xf6, 0xf4 },
52};
53
54/*
55 * The NACA. The first dword of the naca is required by the iSeries
56 * hypervisor to point to itVpdAreas. The hypervisor finds the NACA
57 * through the pointer in hvReleaseData.
58 */
59struct naca_struct naca = {
60 .xItVpdAreas = &itVpdAreas,
61 .xRamDisk = 0,
62 .xRamDiskSize = 0,
63};
64
65extern void system_reset_iSeries(void);
66extern void machine_check_iSeries(void);
67extern void data_access_iSeries(void);
68extern void instruction_access_iSeries(void);
69extern void hardware_interrupt_iSeries(void);
70extern void alignment_iSeries(void);
71extern void program_check_iSeries(void);
72extern void fp_unavailable_iSeries(void);
73extern void decrementer_iSeries(void);
74extern void trap_0a_iSeries(void);
75extern void trap_0b_iSeries(void);
76extern void system_call_iSeries(void);
77extern void single_step_iSeries(void);
78extern void trap_0e_iSeries(void);
79extern void performance_monitor_iSeries(void);
80extern void data_access_slb_iSeries(void);
81extern void instruction_access_slb_iSeries(void);
82
83struct ItLpNaca itLpNaca = {
84 .xDesc = 0xd397d581, /* "LpNa" ebcdic */
85 .xSize = 0x0400, /* size of ItLpNaca */
86 .xIntHdlrOffset = 0x0300, /* offset to int array */
87 .xMaxIntHdlrEntries = 19, /* # ents */
88 .xPrimaryLpIndex = 0, /* Part # of primary */
89 .xServiceLpIndex = 0, /* Part # of serv */
90 .xLpIndex = 0, /* Part # of me */
91 .xMaxLpQueues = 0, /* # of LP queues */
92 .xLpQueueOffset = 0x100, /* offset of start of LP queues */
93 .xPirEnvironMode = 0, /* Piranha stuff */
94 .xPirConsoleMode = 0,
95 .xPirDasdMode = 0,
96 .xLparInstalled = 0,
97 .xSysPartitioned = 0,
98 .xHwSyncedTBs = 0,
99 .xIntProcUtilHmt = 0,
100 .xSpVpdFormat = 0,
101 .xIntProcRatio = 0,
102 .xPlicVrmIndex = 0, /* VRM index of PLIC */
103 .xMinSupportedSlicVrmInd = 0, /* min supported SLIC */
104 .xMinCompatableSlicVrmInd = 0, /* min compat SLIC */
105 .xLoadAreaAddr = 0, /* 64-bit addr of load area */
106 .xLoadAreaChunks = 0, /* chunks for load area */
107 .xPaseSysCallCRMask = 0, /* PASE mask */
108 .xSlicSegmentTablePtr = 0, /* seg table */
109 .xOldLpQueue = { 0 }, /* Old LP Queue */
110 .xInterruptHdlr = {
111 (u64)system_reset_iSeries, /* 0x100 System Reset */
112 (u64)machine_check_iSeries, /* 0x200 Machine Check */
113 (u64)data_access_iSeries, /* 0x300 Data Access */
114 (u64)instruction_access_iSeries, /* 0x400 Instruction Access */
115 (u64)hardware_interrupt_iSeries, /* 0x500 External */
116 (u64)alignment_iSeries, /* 0x600 Alignment */
117 (u64)program_check_iSeries, /* 0x700 Program Check */
118 (u64)fp_unavailable_iSeries, /* 0x800 FP Unavailable */
119 (u64)decrementer_iSeries, /* 0x900 Decrementer */
120 (u64)trap_0a_iSeries, /* 0xa00 Trap 0A */
121 (u64)trap_0b_iSeries, /* 0xb00 Trap 0B */
122 (u64)system_call_iSeries, /* 0xc00 System Call */
123 (u64)single_step_iSeries, /* 0xd00 Single Step */
124 (u64)trap_0e_iSeries, /* 0xe00 Trap 0E */
125 (u64)performance_monitor_iSeries,/* 0xf00 Performance Monitor */
126 0, /* int 0x1000 */
127 0, /* int 0x1010 */
128 0, /* int 0x1020 CPU ctls */
129 (u64)hardware_interrupt_iSeries, /* SC Ret Hdlr */
130 (u64)data_access_slb_iSeries, /* 0x380 D-SLB */
131 (u64)instruction_access_slb_iSeries /* 0x480 I-SLB */
132 }
133};
134EXPORT_SYMBOL(itLpNaca);
135
136/* May be filled in by the hypervisor so cannot end up in the BSS */
137struct ItIplParmsReal xItIplParmsReal __attribute__((__section__(".data")));
138
139/* May be filled in by the hypervisor so cannot end up in the BSS */
140struct ItExtVpdPanel xItExtVpdPanel __attribute__((__section__(".data")));
141EXPORT_SYMBOL(xItExtVpdPanel);
142
143#define maxPhysicalProcessors 32
144
145struct IoHriProcessorVpd xIoHriProcessorVpd[maxPhysicalProcessors] = {
146 {
147 .xInstCacheOperandSize = 32,
148 .xDataCacheOperandSize = 32,
149 .xProcFreq = 50000000,
150 .xTimeBaseFreq = 50000000,
151 .xPVR = 0x3600
152 }
153};
154
155/* Space for Main Store Vpd 27,200 bytes */
156/* May be filled in by the hypervisor so cannot end up in the BSS */
157u64 xMsVpd[3400] __attribute__((__section__(".data")));
158
159/* Space for Recovery Log Buffer */
160/* May be filled in by the hypervisor so cannot end up in the BSS */
161u64 xRecoveryLogBuffer[32] __attribute__((__section__(".data")));
162
163struct SpCommArea xSpCommArea = {
164 .xDesc = 0xE2D7C3C2,
165 .xFormat = 1,
166};
167
168/* The LparMap data is now located at offset 0x6000 in head.S
169 * It was put there so that the HvReleaseData could address it
170 * with a 32-bit offset as required by the iSeries hypervisor
171 *
172 * The Naca has a pointer to the ItVpdAreas. The hypervisor finds
173 * the Naca via the HvReleaseData area. The HvReleaseData has the
174 * offset into the Naca of the pointer to the ItVpdAreas.
175 */
176struct ItVpdAreas itVpdAreas = {
177 .xSlicDesc = 0xc9a3e5c1, /* "ItVA" */
178 .xSlicSize = sizeof(struct ItVpdAreas),
179 .xSlicVpdEntries = ItVpdMaxEntries, /* # VPD array entries */
180 .xSlicDmaEntries = ItDmaMaxEntries, /* # DMA array entries */
181 .xSlicMaxLogicalProcs = NR_CPUS * 2, /* Max logical procs */
182 .xSlicMaxPhysicalProcs = maxPhysicalProcessors, /* Max physical procs */
183 .xSlicDmaToksOffset = offsetof(struct ItVpdAreas, xPlicDmaToks),
184 .xSlicVpdAdrsOffset = offsetof(struct ItVpdAreas, xSlicVpdAdrs),
185 .xSlicDmaLensOffset = offsetof(struct ItVpdAreas, xPlicDmaLens),
186 .xSlicVpdLensOffset = offsetof(struct ItVpdAreas, xSlicVpdLens),
187 .xSlicMaxSlotLabels = 0, /* max slot labels */
188 .xSlicMaxLpQueues = 1, /* max LP queues */
189 .xPlicDmaLens = { 0 }, /* DMA lengths */
190 .xPlicDmaToks = { 0 }, /* DMA tokens */
191 .xSlicVpdLens = { /* VPD lengths */
192 0,0,0, /* 0 - 2 */
193 sizeof(xItExtVpdPanel), /* 3 Extended VPD */
194 sizeof(struct paca_struct), /* 4 length of Paca */
195 0, /* 5 */
196 sizeof(struct ItIplParmsReal),/* 6 length of IPL parms */
197 26992, /* 7 length of MS VPD */
198 0, /* 8 */
199 sizeof(struct ItLpNaca),/* 9 length of LP Naca */
200 0, /* 10 */
201 256, /* 11 length of Recovery Log Buf */
202 sizeof(struct SpCommArea), /* 12 length of SP Comm Area */
203 0,0,0, /* 13 - 15 */
204 sizeof(struct IoHriProcessorVpd),/* 16 length of Proc Vpd */
205 0,0,0,0,0,0, /* 17 - 22 */
206 sizeof(struct hvlpevent_queue), /* 23 length of Lp Queue */
207 0,0 /* 24 - 25 */
208 },
209 .xSlicVpdAdrs = { /* VPD addresses */
210 0,0,0, /* 0 - 2 */
211 &xItExtVpdPanel, /* 3 Extended VPD */
212 &paca[0], /* 4 first Paca */
213 0, /* 5 */
214 &xItIplParmsReal, /* 6 IPL parms */
215 &xMsVpd, /* 7 MS Vpd */
216 0, /* 8 */
217 &itLpNaca, /* 9 LpNaca */
218 0, /* 10 */
219 &xRecoveryLogBuffer, /* 11 Recovery Log Buffer */
220 &xSpCommArea, /* 12 SP Comm Area */
221 0,0,0, /* 13 - 15 */
222 &xIoHriProcessorVpd, /* 16 Proc Vpd */
223 0,0,0,0,0,0, /* 17 - 22 */
224 &hvlpevent_queue, /* 23 Lp Queue */
225 0,0
226 }
227};
diff --git a/arch/ppc64/kernel/Makefile b/arch/ppc64/kernel/Makefile
index ae60eb1193c6..327c08ce4291 100644
--- a/arch/ppc64/kernel/Makefile
+++ b/arch/ppc64/kernel/Makefile
@@ -2,36 +2,34 @@
2# Makefile for the linux ppc64 kernel. 2# Makefile for the linux ppc64 kernel.
3# 3#
4 4
5ifneq ($(CONFIG_PPC_MERGE),y)
6
5EXTRA_CFLAGS += -mno-minimal-toc 7EXTRA_CFLAGS += -mno-minimal-toc
6extra-y := head.o vmlinux.lds 8extra-y := head.o vmlinux.lds
7 9
8obj-y := setup.o entry.o traps.o irq.o idle.o dma.o \ 10obj-y := misc.o prom.o
9 time.o process.o signal.o syscalls.o misc.o ptrace.o \ 11
10 align.o semaphore.o bitops.o pacaData.o \ 12endif
11 udbg.o binfmt_elf32.o sys_ppc32.o ioctl32.o \
12 ptrace32.o signal32.o rtc.o init_task.o \
13 lmb.o cputable.o cpu_setup_power4.o idle_power4.o \
14 iommu.o sysfs.o vdso.o pmc.o firmware.o
15obj-y += vdso32/ vdso64/
16 13
17obj-$(CONFIG_PPC_OF) += of_device.o 14obj-y += irq.o idle.o dma.o \
15 signal.o \
16 align.o bitops.o pacaData.o \
17 udbg.o ioctl32.o \
18 rtc.o \
19 cpu_setup_power4.o \
20 iommu.o sysfs.o vdso.o firmware.o
21obj-y += vdso32/ vdso64/
18 22
19pci-obj-$(CONFIG_PPC_ISERIES) += iSeries_pci.o iSeries_irq.o \
20 iSeries_VpdInfo.o
21pci-obj-$(CONFIG_PPC_MULTIPLATFORM) += pci_dn.o pci_direct_iommu.o 23pci-obj-$(CONFIG_PPC_MULTIPLATFORM) += pci_dn.o pci_direct_iommu.o
22 24
23obj-$(CONFIG_PCI) += pci.o pci_iommu.o iomap.o $(pci-obj-y) 25obj-$(CONFIG_PCI) += pci.o pci_iommu.o iomap.o $(pci-obj-y)
24 26
25obj-$(CONFIG_PPC_ISERIES) += HvCall.o HvLpConfig.o LparData.o \ 27obj-$(CONFIG_PPC_MULTIPLATFORM) += nvram.o
26 iSeries_setup.o ItLpQueue.o hvCall.o \ 28ifneq ($(CONFIG_PPC_MERGE),y)
27 mf.o HvLpEvent.o iSeries_proc.o iSeries_htab.o \ 29obj-$(CONFIG_PPC_MULTIPLATFORM) += prom_init.o
28 iSeries_iommu.o 30endif
29
30obj-$(CONFIG_PPC_MULTIPLATFORM) += nvram.o i8259.o prom_init.o prom.o
31 31
32obj-$(CONFIG_PPC_PSERIES) += pSeries_pci.o pSeries_lpar.o pSeries_hvCall.o \ 32obj-$(CONFIG_PPC_PSERIES) += rtasd.o udbg_16550.o
33 pSeries_nvram.o rtasd.o ras.o pSeries_reconfig.o \
34 pSeries_setup.o pSeries_iommu.o udbg_16550.o
35 33
36obj-$(CONFIG_PPC_BPA) += bpa_setup.o bpa_iommu.o bpa_nvram.o \ 34obj-$(CONFIG_PPC_BPA) += bpa_setup.o bpa_iommu.o bpa_nvram.o \
37 bpa_iic.o spider-pic.o 35 bpa_iic.o spider-pic.o
@@ -41,45 +39,36 @@ obj-$(CONFIG_EEH) += eeh.o
41obj-$(CONFIG_PROC_FS) += proc_ppc64.o 39obj-$(CONFIG_PROC_FS) += proc_ppc64.o
42obj-$(CONFIG_RTAS_FLASH) += rtas_flash.o 40obj-$(CONFIG_RTAS_FLASH) += rtas_flash.o
43obj-$(CONFIG_SMP) += smp.o 41obj-$(CONFIG_SMP) += smp.o
44obj-$(CONFIG_MODULES) += module.o ppc_ksyms.o 42obj-$(CONFIG_MODULES) += module.o
45obj-$(CONFIG_PPC_RTAS) += rtas.o rtas_pci.o 43ifneq ($(CONFIG_PPC_MERGE),y)
44obj-$(CONFIG_MODULES) += ppc_ksyms.o
45endif
46obj-$(CONFIG_PPC_RTAS) += rtas_pci.o
46obj-$(CONFIG_RTAS_PROC) += rtas-proc.o 47obj-$(CONFIG_RTAS_PROC) += rtas-proc.o
47obj-$(CONFIG_SCANLOG) += scanlog.o 48obj-$(CONFIG_SCANLOG) += scanlog.o
48obj-$(CONFIG_VIOPATH) += viopath.o
49obj-$(CONFIG_LPARCFG) += lparcfg.o 49obj-$(CONFIG_LPARCFG) += lparcfg.o
50obj-$(CONFIG_HVC_CONSOLE) += hvconsole.o 50obj-$(CONFIG_HVC_CONSOLE) += hvconsole.o
51ifneq ($(CONFIG_PPC_MERGE),y)
51obj-$(CONFIG_BOOTX_TEXT) += btext.o 52obj-$(CONFIG_BOOTX_TEXT) += btext.o
53endif
52obj-$(CONFIG_HVCS) += hvcserver.o 54obj-$(CONFIG_HVCS) += hvcserver.o
53 55
54vio-obj-$(CONFIG_PPC_PSERIES) += pSeries_vio.o 56obj-$(CONFIG_PPC_PMAC) += udbg_scc.o
55vio-obj-$(CONFIG_PPC_ISERIES) += iSeries_vio.o
56obj-$(CONFIG_IBMVIO) += vio.o $(vio-obj-y)
57obj-$(CONFIG_XICS) += xics.o
58obj-$(CONFIG_MPIC) += mpic.o
59 57
60obj-$(CONFIG_PPC_PMAC) += pmac_setup.o pmac_feature.o pmac_pci.o \ 58obj-$(CONFIG_PPC_MAPLE) += udbg_16550.o
61 pmac_time.o pmac_nvram.o pmac_low_i2c.o \
62 udbg_scc.o
63
64obj-$(CONFIG_PPC_MAPLE) += maple_setup.o maple_pci.o maple_time.o \
65 udbg_16550.o
66
67obj-$(CONFIG_U3_DART) += u3_iommu.o
68 59
69ifdef CONFIG_SMP 60ifdef CONFIG_SMP
70obj-$(CONFIG_PPC_PMAC) += pmac_smp.o smp-tbsync.o 61obj-$(CONFIG_PPC_PMAC) += smp-tbsync.o
71obj-$(CONFIG_PPC_ISERIES) += iSeries_smp.o
72obj-$(CONFIG_PPC_PSERIES) += pSeries_smp.o
73obj-$(CONFIG_PPC_BPA) += pSeries_smp.o
74obj-$(CONFIG_PPC_MAPLE) += smp-tbsync.o 62obj-$(CONFIG_PPC_MAPLE) += smp-tbsync.o
75endif 63endif
76 64
77obj-$(CONFIG_ALTIVEC) += vecemu.o vector.o
78obj-$(CONFIG_KPROBES) += kprobes.o 65obj-$(CONFIG_KPROBES) += kprobes.o
79 66
80CFLAGS_ioctl32.o += -Ifs/ 67CFLAGS_ioctl32.o += -Ifs/
81 68
69ifneq ($(CONFIG_PPC_MERGE),y)
82ifeq ($(CONFIG_PPC_ISERIES),y) 70ifeq ($(CONFIG_PPC_ISERIES),y)
83arch/ppc64/kernel/head.o: arch/ppc64/kernel/lparmap.s 71arch/ppc64/kernel/head.o: arch/powerpc/kernel/lparmap.s
84AFLAGS_head.o += -Iarch/ppc64/kernel 72AFLAGS_head.o += -Iarch/powerpc/kernel
73endif
85endif 74endif
diff --git a/arch/ppc64/kernel/align.c b/arch/ppc64/kernel/align.c
index 330e7ef81427..256d5b592aa1 100644
--- a/arch/ppc64/kernel/align.c
+++ b/arch/ppc64/kernel/align.c
@@ -313,7 +313,7 @@ fix_alignment(struct pt_regs *regs)
313 /* Doing stfs, have to convert to single */ 313 /* Doing stfs, have to convert to single */
314 preempt_disable(); 314 preempt_disable();
315 enable_kernel_fp(); 315 enable_kernel_fp();
316 cvt_df(&current->thread.fpr[reg], (float *)&data.v[4], &current->thread.fpscr); 316 cvt_df(&current->thread.fpr[reg], (float *)&data.v[4], &current->thread);
317 disable_kernel_fp(); 317 disable_kernel_fp();
318 preempt_enable(); 318 preempt_enable();
319 } 319 }
@@ -349,7 +349,7 @@ fix_alignment(struct pt_regs *regs)
349 /* Doing lfs, have to convert to double */ 349 /* Doing lfs, have to convert to double */
350 preempt_disable(); 350 preempt_disable();
351 enable_kernel_fp(); 351 enable_kernel_fp();
352 cvt_fd((float *)&data.v[4], &current->thread.fpr[reg], &current->thread.fpscr); 352 cvt_fd((float *)&data.v[4], &current->thread.fpr[reg], &current->thread);
353 disable_kernel_fp(); 353 disable_kernel_fp();
354 preempt_enable(); 354 preempt_enable();
355 } 355 }
diff --git a/arch/ppc64/kernel/asm-offsets.c b/arch/ppc64/kernel/asm-offsets.c
index 1ff4fa05a973..5e6046cb414e 100644
--- a/arch/ppc64/kernel/asm-offsets.c
+++ b/arch/ppc64/kernel/asm-offsets.c
@@ -46,8 +46,6 @@
46int main(void) 46int main(void)
47{ 47{
48 /* thread struct on stack */ 48 /* thread struct on stack */
49 DEFINE(THREAD_SHIFT, THREAD_SHIFT);
50 DEFINE(THREAD_SIZE, THREAD_SIZE);
51 DEFINE(TI_FLAGS, offsetof(struct thread_info, flags)); 49 DEFINE(TI_FLAGS, offsetof(struct thread_info, flags));
52 DEFINE(TI_PREEMPT, offsetof(struct thread_info, preempt_count)); 50 DEFINE(TI_PREEMPT, offsetof(struct thread_info, preempt_count));
53 DEFINE(TI_SC_NOERR, offsetof(struct thread_info, syscall_noerror)); 51 DEFINE(TI_SC_NOERR, offsetof(struct thread_info, syscall_noerror));
@@ -77,6 +75,7 @@ int main(void)
77 DEFINE(ICACHEL1LOGLINESIZE, offsetof(struct ppc64_caches, log_iline_size)); 75 DEFINE(ICACHEL1LOGLINESIZE, offsetof(struct ppc64_caches, log_iline_size));
78 DEFINE(ICACHEL1LINESPERPAGE, offsetof(struct ppc64_caches, ilines_per_page)); 76 DEFINE(ICACHEL1LINESPERPAGE, offsetof(struct ppc64_caches, ilines_per_page));
79 DEFINE(PLATFORM, offsetof(struct systemcfg, platform)); 77 DEFINE(PLATFORM, offsetof(struct systemcfg, platform));
78 DEFINE(PLATFORM_LPAR, PLATFORM_LPAR);
80 79
81 /* paca */ 80 /* paca */
82 DEFINE(PACA_SIZE, sizeof(struct paca_struct)); 81 DEFINE(PACA_SIZE, sizeof(struct paca_struct));
diff --git a/arch/ppc64/kernel/binfmt_elf32.c b/arch/ppc64/kernel/binfmt_elf32.c
deleted file mode 100644
index fadc699a0497..000000000000
--- a/arch/ppc64/kernel/binfmt_elf32.c
+++ /dev/null
@@ -1,78 +0,0 @@
1/*
2 * binfmt_elf32.c: Support 32-bit PPC ELF binaries on Power3 and followons.
3 * based on the SPARC64 version.
4 * Copyright (C) 1995, 1996, 1997, 1998 David S. Miller (davem@redhat.com)
5 * Copyright (C) 1995, 1996, 1997, 1998 Jakub Jelinek (jj@ultra.linux.cz)
6 *
7 * Copyright (C) 2000,2001 Ken Aaker (kdaaker@rchland.vnet.ibm.com), IBM Corp
8 * Copyright (C) 2001 Anton Blanchard (anton@au.ibm.com), IBM
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version
13 * 2 of the License, or (at your option) any later version.
14 */
15
16#define ELF_ARCH EM_PPC
17#define ELF_CLASS ELFCLASS32
18#define ELF_DATA ELFDATA2MSB;
19
20#include <asm/processor.h>
21#include <linux/module.h>
22#include <linux/config.h>
23#include <linux/elfcore.h>
24#include <linux/compat.h>
25
26#define elf_prstatus elf_prstatus32
27struct elf_prstatus32
28{
29 struct elf_siginfo pr_info; /* Info associated with signal */
30 short pr_cursig; /* Current signal */
31 unsigned int pr_sigpend; /* Set of pending signals */
32 unsigned int pr_sighold; /* Set of held signals */
33 pid_t pr_pid;
34 pid_t pr_ppid;
35 pid_t pr_pgrp;
36 pid_t pr_sid;
37 struct compat_timeval pr_utime; /* User time */
38 struct compat_timeval pr_stime; /* System time */
39 struct compat_timeval pr_cutime; /* Cumulative user time */
40 struct compat_timeval pr_cstime; /* Cumulative system time */
41 elf_gregset_t pr_reg; /* General purpose registers. */
42 int pr_fpvalid; /* True if math co-processor being used. */
43};
44
45#define elf_prpsinfo elf_prpsinfo32
46struct elf_prpsinfo32
47{
48 char pr_state; /* numeric process state */
49 char pr_sname; /* char for pr_state */
50 char pr_zomb; /* zombie */
51 char pr_nice; /* nice val */
52 unsigned int pr_flag; /* flags */
53 u32 pr_uid;
54 u32 pr_gid;
55 pid_t pr_pid, pr_ppid, pr_pgrp, pr_sid;
56 /* Lots missing */
57 char pr_fname[16]; /* filename of executable */
58 char pr_psargs[ELF_PRARGSZ]; /* initial part of arg list */
59};
60
61#include <linux/time.h>
62
63#undef cputime_to_timeval
64#define cputime_to_timeval cputime_to_compat_timeval
65static __inline__ void
66cputime_to_compat_timeval(const cputime_t cputime, struct compat_timeval *value)
67{
68 unsigned long jiffies = cputime_to_jiffies(cputime);
69 value->tv_usec = (jiffies % HZ) * (1000000L / HZ);
70 value->tv_sec = jiffies / HZ;
71}
72
73extern void start_thread32(struct pt_regs *, unsigned long, unsigned long);
74#undef start_thread
75#define start_thread start_thread32
76#define init_elf_binfmt init_elf32_binfmt
77
78#include "../../../fs/binfmt_elf.c"
diff --git a/arch/ppc64/kernel/bpa_iommu.c b/arch/ppc64/kernel/bpa_iommu.c
index 5f2460090e03..da1b4b7a3269 100644
--- a/arch/ppc64/kernel/bpa_iommu.c
+++ b/arch/ppc64/kernel/bpa_iommu.c
@@ -39,8 +39,8 @@
39#include <asm/pmac_feature.h> 39#include <asm/pmac_feature.h>
40#include <asm/abs_addr.h> 40#include <asm/abs_addr.h>
41#include <asm/system.h> 41#include <asm/system.h>
42#include <asm/ppc-pci.h>
42 43
43#include "pci.h"
44#include "bpa_iommu.h" 44#include "bpa_iommu.h"
45 45
46static inline unsigned long 46static inline unsigned long
diff --git a/arch/ppc64/kernel/bpa_setup.c b/arch/ppc64/kernel/bpa_setup.c
index 57b3db66f458..c2dc8f282eb8 100644
--- a/arch/ppc64/kernel/bpa_setup.c
+++ b/arch/ppc64/kernel/bpa_setup.c
@@ -43,8 +43,9 @@
43#include <asm/time.h> 43#include <asm/time.h>
44#include <asm/nvram.h> 44#include <asm/nvram.h>
45#include <asm/cputable.h> 45#include <asm/cputable.h>
46#include <asm/ppc-pci.h>
47#include <asm/irq.h>
46 48
47#include "pci.h"
48#include "bpa_iic.h" 49#include "bpa_iic.h"
49#include "bpa_iommu.h" 50#include "bpa_iommu.h"
50 51
@@ -54,7 +55,7 @@
54#define DBG(fmt...) 55#define DBG(fmt...)
55#endif 56#endif
56 57
57void bpa_get_cpuinfo(struct seq_file *m) 58void bpa_show_cpuinfo(struct seq_file *m)
58{ 59{
59 struct device_node *root; 60 struct device_node *root;
60 const char *model = ""; 61 const char *model = "";
@@ -128,7 +129,7 @@ struct machdep_calls __initdata bpa_md = {
128 .probe = bpa_probe, 129 .probe = bpa_probe,
129 .setup_arch = bpa_setup_arch, 130 .setup_arch = bpa_setup_arch,
130 .init_early = bpa_init_early, 131 .init_early = bpa_init_early,
131 .get_cpuinfo = bpa_get_cpuinfo, 132 .show_cpuinfo = bpa_show_cpuinfo,
132 .restart = rtas_restart, 133 .restart = rtas_restart,
133 .power_off = rtas_power_off, 134 .power_off = rtas_power_off,
134 .halt = rtas_halt, 135 .halt = rtas_halt,
diff --git a/arch/ppc64/kernel/btext.c b/arch/ppc64/kernel/btext.c
index b6fbfbe9032d..506a37885c5c 100644
--- a/arch/ppc64/kernel/btext.c
+++ b/arch/ppc64/kernel/btext.c
@@ -18,6 +18,7 @@
18#include <asm/io.h> 18#include <asm/io.h>
19#include <asm/lmb.h> 19#include <asm/lmb.h>
20#include <asm/processor.h> 20#include <asm/processor.h>
21#include <asm/udbg.h>
21 22
22#undef NO_SCROLL 23#undef NO_SCROLL
23 24
@@ -131,6 +132,47 @@ int btext_initialize(struct device_node *np)
131 return 0; 132 return 0;
132} 133}
133 134
135static void btext_putc(unsigned char c)
136{
137 btext_drawchar(c);
138}
139
140void __init init_boot_display(void)
141{
142 char *name;
143 struct device_node *np = NULL;
144 int rc = -ENODEV;
145
146 printk("trying to initialize btext ...\n");
147
148 name = (char *)get_property(of_chosen, "linux,stdout-path", NULL);
149 if (name != NULL) {
150 np = of_find_node_by_path(name);
151 if (np != NULL) {
152 if (strcmp(np->type, "display") != 0) {
153 printk("boot stdout isn't a display !\n");
154 of_node_put(np);
155 np = NULL;
156 }
157 }
158 }
159 if (np)
160 rc = btext_initialize(np);
161 if (rc) {
162 for (np = NULL; (np = of_find_node_by_type(np, "display"));) {
163 if (get_property(np, "linux,opened", NULL)) {
164 printk("trying %s ...\n", np->full_name);
165 rc = btext_initialize(np);
166 printk("result: %d\n", rc);
167 }
168 if (rc == 0)
169 break;
170 }
171 }
172 if (rc == 0 && udbg_putc == NULL)
173 udbg_putc = btext_putc;
174}
175
134 176
135/* Calc the base address of a given point (x,y) */ 177/* Calc the base address of a given point (x,y) */
136static unsigned char * calc_base(int x, int y) 178static unsigned char * calc_base(int x, int y)
diff --git a/arch/ppc64/kernel/cputable.c b/arch/ppc64/kernel/cputable.c
deleted file mode 100644
index 8831a28c3c4e..000000000000
--- a/arch/ppc64/kernel/cputable.c
+++ /dev/null
@@ -1,308 +0,0 @@
1/*
2 * arch/ppc64/kernel/cputable.c
3 *
4 * Copyright (C) 2001 Ben. Herrenschmidt (benh@kernel.crashing.org)
5 *
6 * Modifications for ppc64:
7 * Copyright (C) 2003 Dave Engebretsen <engebret@us.ibm.com>
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; either version
12 * 2 of the License, or (at your option) any later version.
13 */
14
15#include <linux/config.h>
16#include <linux/string.h>
17#include <linux/sched.h>
18#include <linux/threads.h>
19#include <linux/init.h>
20#include <linux/module.h>
21
22#include <asm/oprofile_impl.h>
23#include <asm/cputable.h>
24
25struct cpu_spec* cur_cpu_spec = NULL;
26EXPORT_SYMBOL(cur_cpu_spec);
27
28/* NOTE:
29 * Unlike ppc32, ppc64 will only call this once for the boot CPU, it's
30 * the responsibility of the appropriate CPU save/restore functions to
31 * eventually copy these settings over. Those save/restore aren't yet
32 * part of the cputable though. That has to be fixed for both ppc32
33 * and ppc64
34 */
35extern void __setup_cpu_power3(unsigned long offset, struct cpu_spec* spec);
36extern void __setup_cpu_power4(unsigned long offset, struct cpu_spec* spec);
37extern void __setup_cpu_ppc970(unsigned long offset, struct cpu_spec* spec);
38extern void __setup_cpu_be(unsigned long offset, struct cpu_spec* spec);
39
40
41/* We only set the altivec features if the kernel was compiled with altivec
42 * support
43 */
44#ifdef CONFIG_ALTIVEC
45#define CPU_FTR_ALTIVEC_COMP CPU_FTR_ALTIVEC
46#define PPC_FEATURE_HAS_ALTIVEC_COMP PPC_FEATURE_HAS_ALTIVEC
47#else
48#define CPU_FTR_ALTIVEC_COMP 0
49#define PPC_FEATURE_HAS_ALTIVEC_COMP 0
50#endif
51
52struct cpu_spec cpu_specs[] = {
53 { /* Power3 */
54 .pvr_mask = 0xffff0000,
55 .pvr_value = 0x00400000,
56 .cpu_name = "POWER3 (630)",
57 .cpu_features = CPU_FTR_SPLIT_ID_CACHE |
58 CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE | CPU_FTR_IABR,
59 .cpu_user_features = COMMON_USER_PPC64,
60 .icache_bsize = 128,
61 .dcache_bsize = 128,
62 .num_pmcs = 8,
63 .cpu_setup = __setup_cpu_power3,
64#ifdef CONFIG_OPROFILE
65 .oprofile_cpu_type = "ppc64/power3",
66 .oprofile_model = &op_model_rs64,
67#endif
68 },
69 { /* Power3+ */
70 .pvr_mask = 0xffff0000,
71 .pvr_value = 0x00410000,
72 .cpu_name = "POWER3 (630+)",
73 .cpu_features = CPU_FTR_SPLIT_ID_CACHE |
74 CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE | CPU_FTR_IABR,
75 .cpu_user_features = COMMON_USER_PPC64,
76 .icache_bsize = 128,
77 .dcache_bsize = 128,
78 .num_pmcs = 8,
79 .cpu_setup = __setup_cpu_power3,
80#ifdef CONFIG_OPROFILE
81 .oprofile_cpu_type = "ppc64/power3",
82 .oprofile_model = &op_model_rs64,
83#endif
84 },
85 { /* Northstar */
86 .pvr_mask = 0xffff0000,
87 .pvr_value = 0x00330000,
88 .cpu_name = "RS64-II (northstar)",
89 .cpu_features = CPU_FTR_SPLIT_ID_CACHE |
90 CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE | CPU_FTR_IABR |
91 CPU_FTR_MMCRA | CPU_FTR_CTRL,
92 .cpu_user_features = COMMON_USER_PPC64,
93 .icache_bsize = 128,
94 .dcache_bsize = 128,
95 .num_pmcs = 8,
96 .cpu_setup = __setup_cpu_power3,
97#ifdef CONFIG_OPROFILE
98 .oprofile_cpu_type = "ppc64/rs64",
99 .oprofile_model = &op_model_rs64,
100#endif
101 },
102 { /* Pulsar */
103 .pvr_mask = 0xffff0000,
104 .pvr_value = 0x00340000,
105 .cpu_name = "RS64-III (pulsar)",
106 .cpu_features = CPU_FTR_SPLIT_ID_CACHE |
107 CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE | CPU_FTR_IABR |
108 CPU_FTR_MMCRA | CPU_FTR_CTRL,
109 .cpu_user_features = COMMON_USER_PPC64,
110 .icache_bsize = 128,
111 .dcache_bsize = 128,
112 .num_pmcs = 8,
113 .cpu_setup = __setup_cpu_power3,
114#ifdef CONFIG_OPROFILE
115 .oprofile_cpu_type = "ppc64/rs64",
116 .oprofile_model = &op_model_rs64,
117#endif
118 },
119 { /* I-star */
120 .pvr_mask = 0xffff0000,
121 .pvr_value = 0x00360000,
122 .cpu_name = "RS64-III (icestar)",
123 .cpu_features = CPU_FTR_SPLIT_ID_CACHE |
124 CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE | CPU_FTR_IABR |
125 CPU_FTR_MMCRA | CPU_FTR_CTRL,
126 .cpu_user_features = COMMON_USER_PPC64,
127 .icache_bsize = 128,
128 .dcache_bsize = 128,
129 .num_pmcs = 8,
130 .cpu_setup = __setup_cpu_power3,
131#ifdef CONFIG_OPROFILE
132 .oprofile_cpu_type = "ppc64/rs64",
133 .oprofile_model = &op_model_rs64,
134#endif
135 },
136 { /* S-star */
137 .pvr_mask = 0xffff0000,
138 .pvr_value = 0x00370000,
139 .cpu_name = "RS64-IV (sstar)",
140 .cpu_features = CPU_FTR_SPLIT_ID_CACHE |
141 CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE | CPU_FTR_IABR |
142 CPU_FTR_MMCRA | CPU_FTR_CTRL,
143 .cpu_user_features = COMMON_USER_PPC64,
144 .icache_bsize = 128,
145 .dcache_bsize = 128,
146 .num_pmcs = 8,
147 .cpu_setup = __setup_cpu_power3,
148#ifdef CONFIG_OPROFILE
149 .oprofile_cpu_type = "ppc64/rs64",
150 .oprofile_model = &op_model_rs64,
151#endif
152 },
153 { /* Power4 */
154 .pvr_mask = 0xffff0000,
155 .pvr_value = 0x00350000,
156 .cpu_name = "POWER4 (gp)",
157 .cpu_features = CPU_FTR_SPLIT_ID_CACHE |
158 CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE |
159 CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_MMCRA,
160 .cpu_user_features = COMMON_USER_PPC64,
161 .icache_bsize = 128,
162 .dcache_bsize = 128,
163 .num_pmcs = 8,
164 .cpu_setup = __setup_cpu_power4,
165#ifdef CONFIG_OPROFILE
166 .oprofile_cpu_type = "ppc64/power4",
167 .oprofile_model = &op_model_rs64,
168#endif
169 },
170 { /* Power4+ */
171 .pvr_mask = 0xffff0000,
172 .pvr_value = 0x00380000,
173 .cpu_name = "POWER4+ (gq)",
174 .cpu_features = CPU_FTR_SPLIT_ID_CACHE |
175 CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE |
176 CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_MMCRA,
177 .cpu_user_features = COMMON_USER_PPC64,
178 .icache_bsize = 128,
179 .dcache_bsize = 128,
180 .num_pmcs = 8,
181 .cpu_setup = __setup_cpu_power4,
182#ifdef CONFIG_OPROFILE
183 .oprofile_cpu_type = "ppc64/power4",
184 .oprofile_model = &op_model_power4,
185#endif
186 },
187 { /* PPC970 */
188 .pvr_mask = 0xffff0000,
189 .pvr_value = 0x00390000,
190 .cpu_name = "PPC970",
191 .cpu_features = CPU_FTR_SPLIT_ID_CACHE |
192 CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE |
193 CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_ALTIVEC_COMP |
194 CPU_FTR_CAN_NAP | CPU_FTR_MMCRA,
195 .cpu_user_features = COMMON_USER_PPC64 |
196 PPC_FEATURE_HAS_ALTIVEC_COMP,
197 .icache_bsize = 128,
198 .dcache_bsize = 128,
199 .num_pmcs = 8,
200 .cpu_setup = __setup_cpu_ppc970,
201#ifdef CONFIG_OPROFILE
202 .oprofile_cpu_type = "ppc64/970",
203 .oprofile_model = &op_model_power4,
204#endif
205 },
206 { /* PPC970FX */
207 .pvr_mask = 0xffff0000,
208 .pvr_value = 0x003c0000,
209 .cpu_name = "PPC970FX",
210 .cpu_features = CPU_FTR_SPLIT_ID_CACHE |
211 CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE |
212 CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_ALTIVEC_COMP |
213 CPU_FTR_CAN_NAP | CPU_FTR_MMCRA,
214 .cpu_user_features = COMMON_USER_PPC64 |
215 PPC_FEATURE_HAS_ALTIVEC_COMP,
216 .icache_bsize = 128,
217 .dcache_bsize = 128,
218 .num_pmcs = 8,
219 .cpu_setup = __setup_cpu_ppc970,
220#ifdef CONFIG_OPROFILE
221 .oprofile_cpu_type = "ppc64/970",
222 .oprofile_model = &op_model_power4,
223#endif
224 },
225 { /* PPC970MP */
226 .pvr_mask = 0xffff0000,
227 .pvr_value = 0x00440000,
228 .cpu_name = "PPC970MP",
229 .cpu_features = CPU_FTR_SPLIT_ID_CACHE |
230 CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE |
231 CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_ALTIVEC_COMP |
232 CPU_FTR_CAN_NAP | CPU_FTR_MMCRA,
233 .cpu_user_features = COMMON_USER_PPC64 |
234 PPC_FEATURE_HAS_ALTIVEC_COMP,
235 .icache_bsize = 128,
236 .dcache_bsize = 128,
237 .cpu_setup = __setup_cpu_ppc970,
238#ifdef CONFIG_OPROFILE
239 .oprofile_cpu_type = "ppc64/970",
240 .oprofile_model = &op_model_power4,
241#endif
242 },
243 { /* Power5 */
244 .pvr_mask = 0xffff0000,
245 .pvr_value = 0x003a0000,
246 .cpu_name = "POWER5 (gr)",
247 .cpu_features = CPU_FTR_SPLIT_ID_CACHE |
248 CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE |
249 CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_MMCRA | CPU_FTR_SMT |
250 CPU_FTR_COHERENT_ICACHE | CPU_FTR_LOCKLESS_TLBIE |
251 CPU_FTR_MMCRA_SIHV,
252 .cpu_user_features = COMMON_USER_PPC64,
253 .icache_bsize = 128,
254 .dcache_bsize = 128,
255 .num_pmcs = 6,
256 .cpu_setup = __setup_cpu_power4,
257#ifdef CONFIG_OPROFILE
258 .oprofile_cpu_type = "ppc64/power5",
259 .oprofile_model = &op_model_power4,
260#endif
261 },
262 { /* Power5 */
263 .pvr_mask = 0xffff0000,
264 .pvr_value = 0x003b0000,
265 .cpu_name = "POWER5 (gs)",
266 .cpu_features = CPU_FTR_SPLIT_ID_CACHE |
267 CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE |
268 CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_MMCRA | CPU_FTR_SMT |
269 CPU_FTR_COHERENT_ICACHE | CPU_FTR_LOCKLESS_TLBIE |
270 CPU_FTR_MMCRA_SIHV,
271 .cpu_user_features = COMMON_USER_PPC64,
272 .icache_bsize = 128,
273 .dcache_bsize = 128,
274 .num_pmcs = 6,
275 .cpu_setup = __setup_cpu_power4,
276#ifdef CONFIG_OPROFILE
277 .oprofile_cpu_type = "ppc64/power5",
278 .oprofile_model = &op_model_power4,
279#endif
280 },
281 { /* BE DD1.x */
282 .pvr_mask = 0xffff0000,
283 .pvr_value = 0x00700000,
284 .cpu_name = "Broadband Engine",
285 .cpu_features = CPU_FTR_SPLIT_ID_CACHE |
286 CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE |
287 CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_ALTIVEC_COMP |
288 CPU_FTR_SMT,
289 .cpu_user_features = COMMON_USER_PPC64 |
290 PPC_FEATURE_HAS_ALTIVEC_COMP,
291 .icache_bsize = 128,
292 .dcache_bsize = 128,
293 .cpu_setup = __setup_cpu_be,
294 },
295 { /* default match */
296 .pvr_mask = 0x00000000,
297 .pvr_value = 0x00000000,
298 .cpu_name = "POWER4 (compatible)",
299 .cpu_features = CPU_FTR_SPLIT_ID_CACHE |
300 CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE |
301 CPU_FTR_PPCAS_ARCH_V2,
302 .cpu_user_features = COMMON_USER_PPC64,
303 .icache_bsize = 128,
304 .dcache_bsize = 128,
305 .num_pmcs = 6,
306 .cpu_setup = __setup_cpu_power4,
307 }
308};
diff --git a/arch/ppc64/kernel/eeh.c b/arch/ppc64/kernel/eeh.c
index ba93fd731222..035d1b14a207 100644
--- a/arch/ppc64/kernel/eeh.c
+++ b/arch/ppc64/kernel/eeh.c
@@ -33,7 +33,7 @@
33#include <asm/rtas.h> 33#include <asm/rtas.h>
34#include <asm/atomic.h> 34#include <asm/atomic.h>
35#include <asm/systemcfg.h> 35#include <asm/systemcfg.h>
36#include "pci.h" 36#include <asm/ppc-pci.h>
37 37
38#undef DEBUG 38#undef DEBUG
39 39
diff --git a/arch/ppc64/kernel/entry.S b/arch/ppc64/kernel/entry.S
deleted file mode 100644
index e8c0bbf4d000..000000000000
--- a/arch/ppc64/kernel/entry.S
+++ /dev/null
@@ -1,845 +0,0 @@
1/*
2 * arch/ppc64/kernel/entry.S
3 *
4 * PowerPC version
5 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
6 * Rewritten by Cort Dougan (cort@cs.nmt.edu) for PReP
7 * Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu>
8 * Adapted for Power Macintosh by Paul Mackerras.
9 * Low-level exception handlers and MMU support
10 * rewritten by Paul Mackerras.
11 * Copyright (C) 1996 Paul Mackerras.
12 * MPC8xx modifications Copyright (C) 1997 Dan Malek (dmalek@jlc.net).
13 *
14 * This file contains the system call entry code, context switch
15 * code, and exception/interrupt return code for PowerPC.
16 *
17 * This program is free software; you can redistribute it and/or
18 * modify it under the terms of the GNU General Public License
19 * as published by the Free Software Foundation; either version
20 * 2 of the License, or (at your option) any later version.
21 */
22
23#include <linux/config.h>
24#include <linux/errno.h>
25#include <asm/unistd.h>
26#include <asm/processor.h>
27#include <asm/page.h>
28#include <asm/mmu.h>
29#include <asm/thread_info.h>
30#include <asm/ppc_asm.h>
31#include <asm/asm-offsets.h>
32#include <asm/cputable.h>
33
34#ifdef CONFIG_PPC_ISERIES
35#define DO_SOFT_DISABLE
36#endif
37
38/*
39 * System calls.
40 */
41 .section ".toc","aw"
42.SYS_CALL_TABLE:
43 .tc .sys_call_table[TC],.sys_call_table
44
45.SYS_CALL_TABLE32:
46 .tc .sys_call_table32[TC],.sys_call_table32
47
48/* This value is used to mark exception frames on the stack. */
49exception_marker:
50 .tc ID_72656773_68657265[TC],0x7265677368657265
51
52 .section ".text"
53 .align 7
54
55#undef SHOW_SYSCALLS
56
57 .globl system_call_common
58system_call_common:
59 andi. r10,r12,MSR_PR
60 mr r10,r1
61 addi r1,r1,-INT_FRAME_SIZE
62 beq- 1f
63 ld r1,PACAKSAVE(r13)
641: std r10,0(r1)
65 std r11,_NIP(r1)
66 std r12,_MSR(r1)
67 std r0,GPR0(r1)
68 std r10,GPR1(r1)
69 std r2,GPR2(r1)
70 std r3,GPR3(r1)
71 std r4,GPR4(r1)
72 std r5,GPR5(r1)
73 std r6,GPR6(r1)
74 std r7,GPR7(r1)
75 std r8,GPR8(r1)
76 li r11,0
77 std r11,GPR9(r1)
78 std r11,GPR10(r1)
79 std r11,GPR11(r1)
80 std r11,GPR12(r1)
81 std r9,GPR13(r1)
82 crclr so
83 mfcr r9
84 mflr r10
85 li r11,0xc01
86 std r9,_CCR(r1)
87 std r10,_LINK(r1)
88 std r11,_TRAP(r1)
89 mfxer r9
90 mfctr r10
91 std r9,_XER(r1)
92 std r10,_CTR(r1)
93 std r3,ORIG_GPR3(r1)
94 ld r2,PACATOC(r13)
95 addi r9,r1,STACK_FRAME_OVERHEAD
96 ld r11,exception_marker@toc(r2)
97 std r11,-16(r9) /* "regshere" marker */
98#ifdef CONFIG_PPC_ISERIES
99 /* Hack for handling interrupts when soft-enabling on iSeries */
100 cmpdi cr1,r0,0x5555 /* syscall 0x5555 */
101 andi. r10,r12,MSR_PR /* from kernel */
102 crand 4*cr0+eq,4*cr1+eq,4*cr0+eq
103 beq hardware_interrupt_entry
104 lbz r10,PACAPROCENABLED(r13)
105 std r10,SOFTE(r1)
106#endif
107 mfmsr r11
108 ori r11,r11,MSR_EE
109 mtmsrd r11,1
110
111#ifdef SHOW_SYSCALLS
112 bl .do_show_syscall
113 REST_GPR(0,r1)
114 REST_4GPRS(3,r1)
115 REST_2GPRS(7,r1)
116 addi r9,r1,STACK_FRAME_OVERHEAD
117#endif
118 clrrdi r11,r1,THREAD_SHIFT
119 li r12,0
120 ld r10,TI_FLAGS(r11)
121 stb r12,TI_SC_NOERR(r11)
122 andi. r11,r10,_TIF_SYSCALL_T_OR_A
123 bne- syscall_dotrace
124syscall_dotrace_cont:
125 cmpldi 0,r0,NR_syscalls
126 bge- syscall_enosys
127
128system_call: /* label this so stack traces look sane */
129/*
130 * Need to vector to 32 Bit or default sys_call_table here,
131 * based on caller's run-mode / personality.
132 */
133 ld r11,.SYS_CALL_TABLE@toc(2)
134 andi. r10,r10,_TIF_32BIT
135 beq 15f
136 ld r11,.SYS_CALL_TABLE32@toc(2)
137 clrldi r3,r3,32
138 clrldi r4,r4,32
139 clrldi r5,r5,32
140 clrldi r6,r6,32
141 clrldi r7,r7,32
142 clrldi r8,r8,32
14315:
144 slwi r0,r0,3
145 ldx r10,r11,r0 /* Fetch system call handler [ptr] */
146 mtctr r10
147 bctrl /* Call handler */
148
149syscall_exit:
150#ifdef SHOW_SYSCALLS
151 std r3,GPR3(r1)
152 bl .do_show_syscall_exit
153 ld r3,GPR3(r1)
154#endif
155 std r3,RESULT(r1)
156 ld r5,_CCR(r1)
157 li r10,-_LAST_ERRNO
158 cmpld r3,r10
159 clrrdi r12,r1,THREAD_SHIFT
160 bge- syscall_error
161syscall_error_cont:
162
163 /* check for syscall tracing or audit */
164 ld r9,TI_FLAGS(r12)
165 andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP)
166 bne- syscall_exit_trace
167syscall_exit_trace_cont:
168
169 /* disable interrupts so current_thread_info()->flags can't change,
170 and so that we don't get interrupted after loading SRR0/1. */
171 ld r8,_MSR(r1)
172 andi. r10,r8,MSR_RI
173 beq- unrecov_restore
174 mfmsr r10
175 rldicl r10,r10,48,1
176 rotldi r10,r10,16
177 mtmsrd r10,1
178 ld r9,TI_FLAGS(r12)
179 andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SIGPENDING|_TIF_NEED_RESCHED)
180 bne- syscall_exit_work
181 ld r7,_NIP(r1)
182 stdcx. r0,0,r1 /* to clear the reservation */
183 andi. r6,r8,MSR_PR
184 ld r4,_LINK(r1)
185 beq- 1f /* only restore r13 if */
186 ld r13,GPR13(r1) /* returning to usermode */
1871: ld r2,GPR2(r1)
188 li r12,MSR_RI
189 andc r10,r10,r12
190 mtmsrd r10,1 /* clear MSR.RI */
191 ld r1,GPR1(r1)
192 mtlr r4
193 mtcr r5
194 mtspr SRR0,r7
195 mtspr SRR1,r8
196 rfid
197 b . /* prevent speculative execution */
198
199syscall_enosys:
200 li r3,-ENOSYS
201 std r3,RESULT(r1)
202 clrrdi r12,r1,THREAD_SHIFT
203 ld r5,_CCR(r1)
204
205syscall_error:
206 lbz r11,TI_SC_NOERR(r12)
207 cmpwi 0,r11,0
208 bne- syscall_error_cont
209 neg r3,r3
210 oris r5,r5,0x1000 /* Set SO bit in CR */
211 std r5,_CCR(r1)
212 b syscall_error_cont
213
214/* Traced system call support */
215syscall_dotrace:
216 bl .save_nvgprs
217 addi r3,r1,STACK_FRAME_OVERHEAD
218 bl .do_syscall_trace_enter
219 ld r0,GPR0(r1) /* Restore original registers */
220 ld r3,GPR3(r1)
221 ld r4,GPR4(r1)
222 ld r5,GPR5(r1)
223 ld r6,GPR6(r1)
224 ld r7,GPR7(r1)
225 ld r8,GPR8(r1)
226 addi r9,r1,STACK_FRAME_OVERHEAD
227 clrrdi r10,r1,THREAD_SHIFT
228 ld r10,TI_FLAGS(r10)
229 b syscall_dotrace_cont
230
231syscall_exit_trace:
232 std r3,GPR3(r1)
233 bl .save_nvgprs
234 addi r3,r1,STACK_FRAME_OVERHEAD
235 bl .do_syscall_trace_leave
236 REST_NVGPRS(r1)
237 ld r3,GPR3(r1)
238 ld r5,_CCR(r1)
239 clrrdi r12,r1,THREAD_SHIFT
240 b syscall_exit_trace_cont
241
242/* Stuff to do on exit from a system call. */
243syscall_exit_work:
244 std r3,GPR3(r1)
245 std r5,_CCR(r1)
246 b .ret_from_except_lite
247
248/* Save non-volatile GPRs, if not already saved. */
249_GLOBAL(save_nvgprs)
250 ld r11,_TRAP(r1)
251 andi. r0,r11,1
252 beqlr-
253 SAVE_NVGPRS(r1)
254 clrrdi r0,r11,1
255 std r0,_TRAP(r1)
256 blr
257
258/*
259 * The sigsuspend and rt_sigsuspend system calls can call do_signal
260 * and thus put the process into the stopped state where we might
261 * want to examine its user state with ptrace. Therefore we need
262 * to save all the nonvolatile registers (r14 - r31) before calling
263 * the C code. Similarly, fork, vfork and clone need the full
264 * register state on the stack so that it can be copied to the child.
265 */
266_GLOBAL(ppc32_sigsuspend)
267 bl .save_nvgprs
268 bl .sys32_sigsuspend
269 b 70f
270
271_GLOBAL(ppc64_rt_sigsuspend)
272 bl .save_nvgprs
273 bl .sys_rt_sigsuspend
274 b 70f
275
276_GLOBAL(ppc32_rt_sigsuspend)
277 bl .save_nvgprs
278 bl .sys32_rt_sigsuspend
27970: cmpdi 0,r3,0
280 /* If it returned an error, we need to return via syscall_exit to set
281 the SO bit in cr0 and potentially stop for ptrace. */
282 bne syscall_exit
283 /* If sigsuspend() returns zero, we are going into a signal handler. We
284 may need to call audit_syscall_exit() to mark the exit from sigsuspend() */
285#ifdef CONFIG_AUDIT
286 ld r3,PACACURRENT(r13)
287 ld r4,AUDITCONTEXT(r3)
288 cmpdi 0,r4,0
289 beq .ret_from_except /* No audit_context: Leave immediately. */
290 li r4, 2 /* AUDITSC_FAILURE */
291 li r5,-4 /* It's always -EINTR */
292 bl .audit_syscall_exit
293#endif
294 b .ret_from_except
295
296_GLOBAL(ppc_fork)
297 bl .save_nvgprs
298 bl .sys_fork
299 b syscall_exit
300
301_GLOBAL(ppc_vfork)
302 bl .save_nvgprs
303 bl .sys_vfork
304 b syscall_exit
305
306_GLOBAL(ppc_clone)
307 bl .save_nvgprs
308 bl .sys_clone
309 b syscall_exit
310
311_GLOBAL(ppc32_swapcontext)
312 bl .save_nvgprs
313 bl .sys32_swapcontext
314 b 80f
315
316_GLOBAL(ppc64_swapcontext)
317 bl .save_nvgprs
318 bl .sys_swapcontext
319 b 80f
320
321_GLOBAL(ppc32_sigreturn)
322 bl .sys32_sigreturn
323 b 80f
324
325_GLOBAL(ppc32_rt_sigreturn)
326 bl .sys32_rt_sigreturn
327 b 80f
328
329_GLOBAL(ppc64_rt_sigreturn)
330 bl .sys_rt_sigreturn
331
33280: cmpdi 0,r3,0
333 blt syscall_exit
334 clrrdi r4,r1,THREAD_SHIFT
335 ld r4,TI_FLAGS(r4)
336 andi. r4,r4,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP)
337 beq+ 81f
338 addi r3,r1,STACK_FRAME_OVERHEAD
339 bl .do_syscall_trace_leave
34081: b .ret_from_except
341
342_GLOBAL(ret_from_fork)
343 bl .schedule_tail
344 REST_NVGPRS(r1)
345 li r3,0
346 b syscall_exit
347
348/*
349 * This routine switches between two different tasks. The process
350 * state of one is saved on its kernel stack. Then the state
351 * of the other is restored from its kernel stack. The memory
352 * management hardware is updated to the second process's state.
353 * Finally, we can return to the second process, via ret_from_except.
354 * On entry, r3 points to the THREAD for the current task, r4
355 * points to the THREAD for the new task.
356 *
357 * Note: there are two ways to get to the "going out" portion
358 * of this code; either by coming in via the entry (_switch)
359 * or via "fork" which must set up an environment equivalent
360 * to the "_switch" path. If you change this you'll have to change
361 * the fork code also.
362 *
363 * The code which creates the new task context is in 'copy_thread'
364 * in arch/ppc64/kernel/process.c
365 */
366 .align 7
367_GLOBAL(_switch)
368 mflr r0
369 std r0,16(r1)
370 stdu r1,-SWITCH_FRAME_SIZE(r1)
371 /* r3-r13 are caller saved -- Cort */
372 SAVE_8GPRS(14, r1)
373 SAVE_10GPRS(22, r1)
374 mflr r20 /* Return to switch caller */
375 mfmsr r22
376 li r0, MSR_FP
377#ifdef CONFIG_ALTIVEC
378BEGIN_FTR_SECTION
379 oris r0,r0,MSR_VEC@h /* Disable altivec */
380 mfspr r24,SPRN_VRSAVE /* save vrsave register value */
381 std r24,THREAD_VRSAVE(r3)
382END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
383#endif /* CONFIG_ALTIVEC */
384 and. r0,r0,r22
385 beq+ 1f
386 andc r22,r22,r0
387 mtmsrd r22
388 isync
3891: std r20,_NIP(r1)
390 mfcr r23
391 std r23,_CCR(r1)
392 std r1,KSP(r3) /* Set old stack pointer */
393
394#ifdef CONFIG_SMP
395 /* We need a sync somewhere here to make sure that if the
396 * previous task gets rescheduled on another CPU, it sees all
397 * stores it has performed on this one.
398 */
399 sync
400#endif /* CONFIG_SMP */
401
402 addi r6,r4,-THREAD /* Convert THREAD to 'current' */
403 std r6,PACACURRENT(r13) /* Set new 'current' */
404
405 ld r8,KSP(r4) /* new stack pointer */
406BEGIN_FTR_SECTION
407 clrrdi r6,r8,28 /* get its ESID */
408 clrrdi r9,r1,28 /* get current sp ESID */
409 clrldi. r0,r6,2 /* is new ESID c00000000? */
410 cmpd cr1,r6,r9 /* or is new ESID the same as current ESID? */
411 cror eq,4*cr1+eq,eq
412 beq 2f /* if yes, don't slbie it */
413
414 /* Bolt in the new stack SLB entry */
415 ld r7,KSP_VSID(r4) /* Get new stack's VSID */
416 oris r0,r6,(SLB_ESID_V)@h
417 ori r0,r0,(SLB_NUM_BOLTED-1)@l
418 slbie r6
419 slbie r6 /* Workaround POWER5 < DD2.1 issue */
420 slbmte r7,r0
421 isync
422
4232:
424END_FTR_SECTION_IFSET(CPU_FTR_SLB)
425 clrrdi r7,r8,THREAD_SHIFT /* base of new stack */
426 /* Note: this uses SWITCH_FRAME_SIZE rather than INT_FRAME_SIZE
427 because we don't need to leave the 288-byte ABI gap at the
428 top of the kernel stack. */
429 addi r7,r7,THREAD_SIZE-SWITCH_FRAME_SIZE
430
431 mr r1,r8 /* start using new stack pointer */
432 std r7,PACAKSAVE(r13)
433
434 ld r6,_CCR(r1)
435 mtcrf 0xFF,r6
436
437#ifdef CONFIG_ALTIVEC
438BEGIN_FTR_SECTION
439 ld r0,THREAD_VRSAVE(r4)
440 mtspr SPRN_VRSAVE,r0 /* if G4, restore VRSAVE reg */
441END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
442#endif /* CONFIG_ALTIVEC */
443
444 /* r3-r13 are destroyed -- Cort */
445 REST_8GPRS(14, r1)
446 REST_10GPRS(22, r1)
447
448 /* convert old thread to its task_struct for return value */
449 addi r3,r3,-THREAD
450 ld r7,_NIP(r1) /* Return to _switch caller in new task */
451 mtlr r7
452 addi r1,r1,SWITCH_FRAME_SIZE
453 blr
454
455 .align 7
456_GLOBAL(ret_from_except)
457 ld r11,_TRAP(r1)
458 andi. r0,r11,1
459 bne .ret_from_except_lite
460 REST_NVGPRS(r1)
461
462_GLOBAL(ret_from_except_lite)
463 /*
464 * Disable interrupts so that current_thread_info()->flags
465 * can't change between when we test it and when we return
466 * from the interrupt.
467 */
468 mfmsr r10 /* Get current interrupt state */
469 rldicl r9,r10,48,1 /* clear MSR_EE */
470 rotldi r9,r9,16
471 mtmsrd r9,1 /* Update machine state */
472
473#ifdef CONFIG_PREEMPT
474 clrrdi r9,r1,THREAD_SHIFT /* current_thread_info() */
475 li r0,_TIF_NEED_RESCHED /* bits to check */
476 ld r3,_MSR(r1)
477 ld r4,TI_FLAGS(r9)
478 /* Move MSR_PR bit in r3 to _TIF_SIGPENDING position in r0 */
479 rlwimi r0,r3,32+TIF_SIGPENDING-MSR_PR_LG,_TIF_SIGPENDING
480 and. r0,r4,r0 /* check NEED_RESCHED and maybe SIGPENDING */
481 bne do_work
482
483#else /* !CONFIG_PREEMPT */
484 ld r3,_MSR(r1) /* Returning to user mode? */
485 andi. r3,r3,MSR_PR
486 beq restore /* if not, just restore regs and return */
487
488 /* Check current_thread_info()->flags */
489 clrrdi r9,r1,THREAD_SHIFT
490 ld r4,TI_FLAGS(r9)
491 andi. r0,r4,_TIF_USER_WORK_MASK
492 bne do_work
493#endif
494
495restore:
496#ifdef CONFIG_PPC_ISERIES
497 ld r5,SOFTE(r1)
498 cmpdi 0,r5,0
499 beq 4f
500 /* Check for pending interrupts (iSeries) */
501 ld r3,PACALPPACA+LPPACAANYINT(r13)
502 cmpdi r3,0
503 beq+ 4f /* skip do_IRQ if no interrupts */
504
505 li r3,0
506 stb r3,PACAPROCENABLED(r13) /* ensure we are soft-disabled */
507 ori r10,r10,MSR_EE
508 mtmsrd r10 /* hard-enable again */
509 addi r3,r1,STACK_FRAME_OVERHEAD
510 bl .do_IRQ
511 b .ret_from_except_lite /* loop back and handle more */
512
5134: stb r5,PACAPROCENABLED(r13)
514#endif
515
516 ld r3,_MSR(r1)
517 andi. r0,r3,MSR_RI
518 beq- unrecov_restore
519
520 andi. r0,r3,MSR_PR
521
522 /*
523 * r13 is our per cpu area, only restore it if we are returning to
524 * userspace
525 */
526 beq 1f
527 REST_GPR(13, r1)
5281:
529 ld r3,_CTR(r1)
530 ld r0,_LINK(r1)
531 mtctr r3
532 mtlr r0
533 ld r3,_XER(r1)
534 mtspr XER,r3
535
536 REST_8GPRS(5, r1)
537
538 stdcx. r0,0,r1 /* to clear the reservation */
539
540 mfmsr r0
541 li r2, MSR_RI
542 andc r0,r0,r2
543 mtmsrd r0,1
544
545 ld r0,_MSR(r1)
546 mtspr SRR1,r0
547
548 ld r2,_CCR(r1)
549 mtcrf 0xFF,r2
550 ld r2,_NIP(r1)
551 mtspr SRR0,r2
552
553 ld r0,GPR0(r1)
554 ld r2,GPR2(r1)
555 ld r3,GPR3(r1)
556 ld r4,GPR4(r1)
557 ld r1,GPR1(r1)
558
559 rfid
560 b . /* prevent speculative execution */
561
562/* Note: this must change if we start using the TIF_NOTIFY_RESUME bit */
563do_work:
564#ifdef CONFIG_PREEMPT
565 andi. r0,r3,MSR_PR /* Returning to user mode? */
566 bne user_work
567 /* Check that preempt_count() == 0 and interrupts are enabled */
568 lwz r8,TI_PREEMPT(r9)
569 cmpwi cr1,r8,0
570#ifdef CONFIG_PPC_ISERIES
571 ld r0,SOFTE(r1)
572 cmpdi r0,0
573#else
574 andi. r0,r3,MSR_EE
575#endif
576 crandc eq,cr1*4+eq,eq
577 bne restore
578 /* here we are preempting the current task */
5791:
580#ifdef CONFIG_PPC_ISERIES
581 li r0,1
582 stb r0,PACAPROCENABLED(r13)
583#endif
584 ori r10,r10,MSR_EE
585 mtmsrd r10,1 /* reenable interrupts */
586 bl .preempt_schedule
587 mfmsr r10
588 clrrdi r9,r1,THREAD_SHIFT
589 rldicl r10,r10,48,1 /* disable interrupts again */
590 rotldi r10,r10,16
591 mtmsrd r10,1
592 ld r4,TI_FLAGS(r9)
593 andi. r0,r4,_TIF_NEED_RESCHED
594 bne 1b
595 b restore
596
597user_work:
598#endif
599 /* Enable interrupts */
600 ori r10,r10,MSR_EE
601 mtmsrd r10,1
602
603 andi. r0,r4,_TIF_NEED_RESCHED
604 beq 1f
605 bl .schedule
606 b .ret_from_except_lite
607
6081: bl .save_nvgprs
609 li r3,0
610 addi r4,r1,STACK_FRAME_OVERHEAD
611 bl .do_signal
612 b .ret_from_except
613
614unrecov_restore:
615 addi r3,r1,STACK_FRAME_OVERHEAD
616 bl .unrecoverable_exception
617 b unrecov_restore
618
619#ifdef CONFIG_PPC_RTAS
620/*
621 * On CHRP, the Run-Time Abstraction Services (RTAS) have to be
622 * called with the MMU off.
623 *
624 * In addition, we need to be in 32b mode, at least for now.
625 *
626 * Note: r3 is an input parameter to rtas, so don't trash it...
627 */
628_GLOBAL(enter_rtas)
629 mflr r0
630 std r0,16(r1)
631 stdu r1,-RTAS_FRAME_SIZE(r1) /* Save SP and create stack space. */
632
633 /* Because RTAS is running in 32b mode, it clobbers the high order half
634 * of all registers that it saves. We therefore save those registers
635 * RTAS might touch to the stack. (r0, r3-r13 are caller saved)
636 */
637 SAVE_GPR(2, r1) /* Save the TOC */
638 SAVE_GPR(13, r1) /* Save paca */
639 SAVE_8GPRS(14, r1) /* Save the non-volatiles */
640 SAVE_10GPRS(22, r1) /* ditto */
641
642 mfcr r4
643 std r4,_CCR(r1)
644 mfctr r5
645 std r5,_CTR(r1)
646 mfspr r6,XER
647 std r6,_XER(r1)
648 mfdar r7
649 std r7,_DAR(r1)
650 mfdsisr r8
651 std r8,_DSISR(r1)
652 mfsrr0 r9
653 std r9,_SRR0(r1)
654 mfsrr1 r10
655 std r10,_SRR1(r1)
656
657 /* There is no way it is acceptable to get here with interrupts enabled,
658 * check it with the asm equivalent of WARN_ON
659 */
660 mfmsr r6
661 andi. r0,r6,MSR_EE
6621: tdnei r0,0
663.section __bug_table,"a"
664 .llong 1b,__LINE__ + 0x1000000, 1f, 2f
665.previous
666.section .rodata,"a"
6671: .asciz __FILE__
6682: .asciz "enter_rtas"
669.previous
670
671 /* Unfortunately, the stack pointer and the MSR are also clobbered,
672 * so they are saved in the PACA which allows us to restore
673 * our original state after RTAS returns.
674 */
675 std r1,PACAR1(r13)
676 std r6,PACASAVEDMSR(r13)
677
678 /* Setup our real return addr */
679 SET_REG_TO_LABEL(r4,.rtas_return_loc)
680 SET_REG_TO_CONST(r9,KERNELBASE)
681 sub r4,r4,r9
682 mtlr r4
683
684 li r0,0
685 ori r0,r0,MSR_EE|MSR_SE|MSR_BE|MSR_RI
686 andc r0,r6,r0
687
688 li r9,1
689 rldicr r9,r9,MSR_SF_LG,(63-MSR_SF_LG)
690 ori r9,r9,MSR_IR|MSR_DR|MSR_FE0|MSR_FE1|MSR_FP
691 andc r6,r0,r9
692 ori r6,r6,MSR_RI
693 sync /* disable interrupts so SRR0/1 */
694 mtmsrd r0 /* don't get trashed */
695
696 SET_REG_TO_LABEL(r4,rtas)
697 ld r5,RTASENTRY(r4) /* get the rtas->entry value */
698 ld r4,RTASBASE(r4) /* get the rtas->base value */
699
700 mtspr SRR0,r5
701 mtspr SRR1,r6
702 rfid
703 b . /* prevent speculative execution */
704
705_STATIC(rtas_return_loc)
706 /* relocation is off at this point */
707 mfspr r4,SPRG3 /* Get PACA */
708 SET_REG_TO_CONST(r5, KERNELBASE)
709 sub r4,r4,r5 /* RELOC the PACA base pointer */
710
711 mfmsr r6
712 li r0,MSR_RI
713 andc r6,r6,r0
714 sync
715 mtmsrd r6
716
717 ld r1,PACAR1(r4) /* Restore our SP */
718 LOADADDR(r3,.rtas_restore_regs)
719 ld r4,PACASAVEDMSR(r4) /* Restore our MSR */
720
721 mtspr SRR0,r3
722 mtspr SRR1,r4
723 rfid
724 b . /* prevent speculative execution */
725
726_STATIC(rtas_restore_regs)
727 /* relocation is on at this point */
728 REST_GPR(2, r1) /* Restore the TOC */
729 REST_GPR(13, r1) /* Restore paca */
730 REST_8GPRS(14, r1) /* Restore the non-volatiles */
731 REST_10GPRS(22, r1) /* ditto */
732
733 mfspr r13,SPRG3
734
735 ld r4,_CCR(r1)
736 mtcr r4
737 ld r5,_CTR(r1)
738 mtctr r5
739 ld r6,_XER(r1)
740 mtspr XER,r6
741 ld r7,_DAR(r1)
742 mtdar r7
743 ld r8,_DSISR(r1)
744 mtdsisr r8
745 ld r9,_SRR0(r1)
746 mtsrr0 r9
747 ld r10,_SRR1(r1)
748 mtsrr1 r10
749
750 addi r1,r1,RTAS_FRAME_SIZE /* Unstack our frame */
751 ld r0,16(r1) /* get return address */
752
753 mtlr r0
754 blr /* return to caller */
755
756#endif /* CONFIG_PPC_RTAS */
757
758#ifdef CONFIG_PPC_MULTIPLATFORM
759
760_GLOBAL(enter_prom)
761 mflr r0
762 std r0,16(r1)
763 stdu r1,-PROM_FRAME_SIZE(r1) /* Save SP and create stack space */
764
765 /* Because PROM is running in 32b mode, it clobbers the high order half
766 * of all registers that it saves. We therefore save those registers
767 * PROM might touch to the stack. (r0, r3-r13 are caller saved)
768 */
769 SAVE_8GPRS(2, r1)
770 SAVE_GPR(13, r1)
771 SAVE_8GPRS(14, r1)
772 SAVE_10GPRS(22, r1)
773 mfcr r4
774 std r4,_CCR(r1)
775 mfctr r5
776 std r5,_CTR(r1)
777 mfspr r6,XER
778 std r6,_XER(r1)
779 mfdar r7
780 std r7,_DAR(r1)
781 mfdsisr r8
782 std r8,_DSISR(r1)
783 mfsrr0 r9
784 std r9,_SRR0(r1)
785 mfsrr1 r10
786 std r10,_SRR1(r1)
787 mfmsr r11
788 std r11,_MSR(r1)
789
790 /* Get the PROM entrypoint */
791 ld r0,GPR4(r1)
792 mtlr r0
793
794 /* Switch MSR to 32 bits mode
795 */
796 mfmsr r11
797 li r12,1
798 rldicr r12,r12,MSR_SF_LG,(63-MSR_SF_LG)
799 andc r11,r11,r12
800 li r12,1
801 rldicr r12,r12,MSR_ISF_LG,(63-MSR_ISF_LG)
802 andc r11,r11,r12
803 mtmsrd r11
804 isync
805
806 /* Restore arguments & enter PROM here... */
807 ld r3,GPR3(r1)
808 blrl
809
810 /* Just make sure that r1 top 32 bits didn't get
811 * corrupt by OF
812 */
813 rldicl r1,r1,0,32
814
815 /* Restore the MSR (back to 64 bits) */
816 ld r0,_MSR(r1)
817 mtmsrd r0
818 isync
819
820 /* Restore other registers */
821 REST_GPR(2, r1)
822 REST_GPR(13, r1)
823 REST_8GPRS(14, r1)
824 REST_10GPRS(22, r1)
825 ld r4,_CCR(r1)
826 mtcr r4
827 ld r5,_CTR(r1)
828 mtctr r5
829 ld r6,_XER(r1)
830 mtspr XER,r6
831 ld r7,_DAR(r1)
832 mtdar r7
833 ld r8,_DSISR(r1)
834 mtdsisr r8
835 ld r9,_SRR0(r1)
836 mtsrr0 r9
837 ld r10,_SRR1(r1)
838 mtsrr1 r10
839
840 addi r1,r1,PROM_FRAME_SIZE
841 ld r0,16(r1)
842 mtlr r0
843 blr
844
845#endif /* CONFIG_PPC_MULTIPLATFORM */
diff --git a/arch/ppc64/kernel/head.S b/arch/ppc64/kernel/head.S
index 72c61041151a..929f9f42cf7a 100644
--- a/arch/ppc64/kernel/head.S
+++ b/arch/ppc64/kernel/head.S
@@ -36,6 +36,7 @@
36#include <asm/setup.h> 36#include <asm/setup.h>
37#include <asm/hvcall.h> 37#include <asm/hvcall.h>
38#include <asm/iSeries/LparMap.h> 38#include <asm/iSeries/LparMap.h>
39#include <asm/thread_info.h>
39 40
40#ifdef CONFIG_PPC_ISERIES 41#ifdef CONFIG_PPC_ISERIES
41#define DO_SOFT_DISABLE 42#define DO_SOFT_DISABLE
@@ -80,7 +81,7 @@ _stext:
80_GLOBAL(__start) 81_GLOBAL(__start)
81 /* NOP this out unconditionally */ 82 /* NOP this out unconditionally */
82BEGIN_FTR_SECTION 83BEGIN_FTR_SECTION
83 b .__start_initialization_multiplatform 84 b .__start_initialization_multiplatform
84END_FTR_SECTION(0, 1) 85END_FTR_SECTION(0, 1)
85#endif /* CONFIG_PPC_MULTIPLATFORM */ 86#endif /* CONFIG_PPC_MULTIPLATFORM */
86 87
@@ -201,22 +202,22 @@ exception_marker:
201#define EX_CCR 60 202#define EX_CCR 60
202 203
203#define EXCEPTION_PROLOG_PSERIES(area, label) \ 204#define EXCEPTION_PROLOG_PSERIES(area, label) \
204 mfspr r13,SPRG3; /* get paca address into r13 */ \ 205 mfspr r13,SPRN_SPRG3; /* get paca address into r13 */ \
205 std r9,area+EX_R9(r13); /* save r9 - r12 */ \ 206 std r9,area+EX_R9(r13); /* save r9 - r12 */ \
206 std r10,area+EX_R10(r13); \ 207 std r10,area+EX_R10(r13); \
207 std r11,area+EX_R11(r13); \ 208 std r11,area+EX_R11(r13); \
208 std r12,area+EX_R12(r13); \ 209 std r12,area+EX_R12(r13); \
209 mfspr r9,SPRG1; \ 210 mfspr r9,SPRN_SPRG1; \
210 std r9,area+EX_R13(r13); \ 211 std r9,area+EX_R13(r13); \
211 mfcr r9; \ 212 mfcr r9; \
212 clrrdi r12,r13,32; /* get high part of &label */ \ 213 clrrdi r12,r13,32; /* get high part of &label */ \
213 mfmsr r10; \ 214 mfmsr r10; \
214 mfspr r11,SRR0; /* save SRR0 */ \ 215 mfspr r11,SPRN_SRR0; /* save SRR0 */ \
215 ori r12,r12,(label)@l; /* virt addr of handler */ \ 216 ori r12,r12,(label)@l; /* virt addr of handler */ \
216 ori r10,r10,MSR_IR|MSR_DR|MSR_RI; \ 217 ori r10,r10,MSR_IR|MSR_DR|MSR_RI; \
217 mtspr SRR0,r12; \ 218 mtspr SPRN_SRR0,r12; \
218 mfspr r12,SRR1; /* and SRR1 */ \ 219 mfspr r12,SPRN_SRR1; /* and SRR1 */ \
219 mtspr SRR1,r10; \ 220 mtspr SPRN_SRR1,r10; \
220 rfid; \ 221 rfid; \
221 b . /* prevent speculative execution */ 222 b . /* prevent speculative execution */
222 223
@@ -225,12 +226,12 @@ exception_marker:
225 * This code runs with relocation on. 226 * This code runs with relocation on.
226 */ 227 */
227#define EXCEPTION_PROLOG_ISERIES_1(area) \ 228#define EXCEPTION_PROLOG_ISERIES_1(area) \
228 mfspr r13,SPRG3; /* get paca address into r13 */ \ 229 mfspr r13,SPRN_SPRG3; /* get paca address into r13 */ \
229 std r9,area+EX_R9(r13); /* save r9 - r12 */ \ 230 std r9,area+EX_R9(r13); /* save r9 - r12 */ \
230 std r10,area+EX_R10(r13); \ 231 std r10,area+EX_R10(r13); \
231 std r11,area+EX_R11(r13); \ 232 std r11,area+EX_R11(r13); \
232 std r12,area+EX_R12(r13); \ 233 std r12,area+EX_R12(r13); \
233 mfspr r9,SPRG1; \ 234 mfspr r9,SPRN_SPRG1; \
234 std r9,area+EX_R13(r13); \ 235 std r9,area+EX_R13(r13); \
235 mfcr r9 236 mfcr r9
236 237
@@ -283,7 +284,7 @@ exception_marker:
283 std r9,_LINK(r1); \ 284 std r9,_LINK(r1); \
284 mfctr r10; /* save CTR in stackframe */ \ 285 mfctr r10; /* save CTR in stackframe */ \
285 std r10,_CTR(r1); \ 286 std r10,_CTR(r1); \
286 mfspr r11,XER; /* save XER in stackframe */ \ 287 mfspr r11,SPRN_XER; /* save XER in stackframe */ \
287 std r11,_XER(r1); \ 288 std r11,_XER(r1); \
288 li r9,(n)+1; \ 289 li r9,(n)+1; \
289 std r9,_TRAP(r1); /* set trap number */ \ 290 std r9,_TRAP(r1); /* set trap number */ \
@@ -300,7 +301,7 @@ exception_marker:
300 .globl label##_pSeries; \ 301 .globl label##_pSeries; \
301label##_pSeries: \ 302label##_pSeries: \
302 HMT_MEDIUM; \ 303 HMT_MEDIUM; \
303 mtspr SPRG1,r13; /* save r13 */ \ 304 mtspr SPRN_SPRG1,r13; /* save r13 */ \
304 RUNLATCH_ON(r13); \ 305 RUNLATCH_ON(r13); \
305 EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, label##_common) 306 EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, label##_common)
306 307
@@ -308,7 +309,7 @@ label##_pSeries: \
308 .globl label##_iSeries; \ 309 .globl label##_iSeries; \
309label##_iSeries: \ 310label##_iSeries: \
310 HMT_MEDIUM; \ 311 HMT_MEDIUM; \
311 mtspr SPRG1,r13; /* save r13 */ \ 312 mtspr SPRN_SPRG1,r13; /* save r13 */ \
312 RUNLATCH_ON(r13); \ 313 RUNLATCH_ON(r13); \
313 EXCEPTION_PROLOG_ISERIES_1(area); \ 314 EXCEPTION_PROLOG_ISERIES_1(area); \
314 EXCEPTION_PROLOG_ISERIES_2; \ 315 EXCEPTION_PROLOG_ISERIES_2; \
@@ -318,7 +319,7 @@ label##_iSeries: \
318 .globl label##_iSeries; \ 319 .globl label##_iSeries; \
319label##_iSeries: \ 320label##_iSeries: \
320 HMT_MEDIUM; \ 321 HMT_MEDIUM; \
321 mtspr SPRG1,r13; /* save r13 */ \ 322 mtspr SPRN_SPRG1,r13; /* save r13 */ \
322 RUNLATCH_ON(r13); \ 323 RUNLATCH_ON(r13); \
323 EXCEPTION_PROLOG_ISERIES_1(PACA_EXGEN); \ 324 EXCEPTION_PROLOG_ISERIES_1(PACA_EXGEN); \
324 lbz r10,PACAPROCENABLED(r13); \ 325 lbz r10,PACAPROCENABLED(r13); \
@@ -388,7 +389,7 @@ __start_interrupts:
388 . = 0x200 389 . = 0x200
389_machine_check_pSeries: 390_machine_check_pSeries:
390 HMT_MEDIUM 391 HMT_MEDIUM
391 mtspr SPRG1,r13 /* save r13 */ 392 mtspr SPRN_SPRG1,r13 /* save r13 */
392 RUNLATCH_ON(r13) 393 RUNLATCH_ON(r13)
393 EXCEPTION_PROLOG_PSERIES(PACA_EXMC, machine_check_common) 394 EXCEPTION_PROLOG_PSERIES(PACA_EXMC, machine_check_common)
394 395
@@ -396,18 +397,18 @@ _machine_check_pSeries:
396 .globl data_access_pSeries 397 .globl data_access_pSeries
397data_access_pSeries: 398data_access_pSeries:
398 HMT_MEDIUM 399 HMT_MEDIUM
399 mtspr SPRG1,r13 400 mtspr SPRN_SPRG1,r13
400BEGIN_FTR_SECTION 401BEGIN_FTR_SECTION
401 mtspr SPRG2,r12 402 mtspr SPRN_SPRG2,r12
402 mfspr r13,DAR 403 mfspr r13,SPRN_DAR
403 mfspr r12,DSISR 404 mfspr r12,SPRN_DSISR
404 srdi r13,r13,60 405 srdi r13,r13,60
405 rlwimi r13,r12,16,0x20 406 rlwimi r13,r12,16,0x20
406 mfcr r12 407 mfcr r12
407 cmpwi r13,0x2c 408 cmpwi r13,0x2c
408 beq .do_stab_bolted_pSeries 409 beq .do_stab_bolted_pSeries
409 mtcrf 0x80,r12 410 mtcrf 0x80,r12
410 mfspr r12,SPRG2 411 mfspr r12,SPRN_SPRG2
411END_FTR_SECTION_IFCLR(CPU_FTR_SLB) 412END_FTR_SECTION_IFCLR(CPU_FTR_SLB)
412 EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, data_access_common) 413 EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, data_access_common)
413 414
@@ -415,19 +416,19 @@ END_FTR_SECTION_IFCLR(CPU_FTR_SLB)
415 .globl data_access_slb_pSeries 416 .globl data_access_slb_pSeries
416data_access_slb_pSeries: 417data_access_slb_pSeries:
417 HMT_MEDIUM 418 HMT_MEDIUM
418 mtspr SPRG1,r13 419 mtspr SPRN_SPRG1,r13
419 RUNLATCH_ON(r13) 420 RUNLATCH_ON(r13)
420 mfspr r13,SPRG3 /* get paca address into r13 */ 421 mfspr r13,SPRN_SPRG3 /* get paca address into r13 */
421 std r9,PACA_EXSLB+EX_R9(r13) /* save r9 - r12 */ 422 std r9,PACA_EXSLB+EX_R9(r13) /* save r9 - r12 */
422 std r10,PACA_EXSLB+EX_R10(r13) 423 std r10,PACA_EXSLB+EX_R10(r13)
423 std r11,PACA_EXSLB+EX_R11(r13) 424 std r11,PACA_EXSLB+EX_R11(r13)
424 std r12,PACA_EXSLB+EX_R12(r13) 425 std r12,PACA_EXSLB+EX_R12(r13)
425 std r3,PACA_EXSLB+EX_R3(r13) 426 std r3,PACA_EXSLB+EX_R3(r13)
426 mfspr r9,SPRG1 427 mfspr r9,SPRN_SPRG1
427 std r9,PACA_EXSLB+EX_R13(r13) 428 std r9,PACA_EXSLB+EX_R13(r13)
428 mfcr r9 429 mfcr r9
429 mfspr r12,SRR1 /* and SRR1 */ 430 mfspr r12,SPRN_SRR1 /* and SRR1 */
430 mfspr r3,DAR 431 mfspr r3,SPRN_DAR
431 b .do_slb_miss /* Rel. branch works in real mode */ 432 b .do_slb_miss /* Rel. branch works in real mode */
432 433
433 STD_EXCEPTION_PSERIES(0x400, instruction_access) 434 STD_EXCEPTION_PSERIES(0x400, instruction_access)
@@ -436,19 +437,19 @@ data_access_slb_pSeries:
436 .globl instruction_access_slb_pSeries 437 .globl instruction_access_slb_pSeries
437instruction_access_slb_pSeries: 438instruction_access_slb_pSeries:
438 HMT_MEDIUM 439 HMT_MEDIUM
439 mtspr SPRG1,r13 440 mtspr SPRN_SPRG1,r13
440 RUNLATCH_ON(r13) 441 RUNLATCH_ON(r13)
441 mfspr r13,SPRG3 /* get paca address into r13 */ 442 mfspr r13,SPRN_SPRG3 /* get paca address into r13 */
442 std r9,PACA_EXSLB+EX_R9(r13) /* save r9 - r12 */ 443 std r9,PACA_EXSLB+EX_R9(r13) /* save r9 - r12 */
443 std r10,PACA_EXSLB+EX_R10(r13) 444 std r10,PACA_EXSLB+EX_R10(r13)
444 std r11,PACA_EXSLB+EX_R11(r13) 445 std r11,PACA_EXSLB+EX_R11(r13)
445 std r12,PACA_EXSLB+EX_R12(r13) 446 std r12,PACA_EXSLB+EX_R12(r13)
446 std r3,PACA_EXSLB+EX_R3(r13) 447 std r3,PACA_EXSLB+EX_R3(r13)
447 mfspr r9,SPRG1 448 mfspr r9,SPRN_SPRG1
448 std r9,PACA_EXSLB+EX_R13(r13) 449 std r9,PACA_EXSLB+EX_R13(r13)
449 mfcr r9 450 mfcr r9
450 mfspr r12,SRR1 /* and SRR1 */ 451 mfspr r12,SPRN_SRR1 /* and SRR1 */
451 mfspr r3,SRR0 /* SRR0 is faulting address */ 452 mfspr r3,SPRN_SRR0 /* SRR0 is faulting address */
452 b .do_slb_miss /* Rel. branch works in real mode */ 453 b .do_slb_miss /* Rel. branch works in real mode */
453 454
454 STD_EXCEPTION_PSERIES(0x500, hardware_interrupt) 455 STD_EXCEPTION_PSERIES(0x500, hardware_interrupt)
@@ -466,15 +467,15 @@ system_call_pSeries:
466 RUNLATCH_ON(r9) 467 RUNLATCH_ON(r9)
467 mr r9,r13 468 mr r9,r13
468 mfmsr r10 469 mfmsr r10
469 mfspr r13,SPRG3 470 mfspr r13,SPRN_SPRG3
470 mfspr r11,SRR0 471 mfspr r11,SPRN_SRR0
471 clrrdi r12,r13,32 472 clrrdi r12,r13,32
472 oris r12,r12,system_call_common@h 473 oris r12,r12,system_call_common@h
473 ori r12,r12,system_call_common@l 474 ori r12,r12,system_call_common@l
474 mtspr SRR0,r12 475 mtspr SPRN_SRR0,r12
475 ori r10,r10,MSR_IR|MSR_DR|MSR_RI 476 ori r10,r10,MSR_IR|MSR_DR|MSR_RI
476 mfspr r12,SRR1 477 mfspr r12,SPRN_SRR1
477 mtspr SRR1,r10 478 mtspr SPRN_SRR1,r10
478 rfid 479 rfid
479 b . /* prevent speculative execution */ 480 b . /* prevent speculative execution */
480 481
@@ -504,25 +505,25 @@ system_call_pSeries:
504 .align 7 505 .align 7
505_GLOBAL(do_stab_bolted_pSeries) 506_GLOBAL(do_stab_bolted_pSeries)
506 mtcrf 0x80,r12 507 mtcrf 0x80,r12
507 mfspr r12,SPRG2 508 mfspr r12,SPRN_SPRG2
508 EXCEPTION_PROLOG_PSERIES(PACA_EXSLB, .do_stab_bolted) 509 EXCEPTION_PROLOG_PSERIES(PACA_EXSLB, .do_stab_bolted)
509 510
510/* 511/*
511 * Vectors for the FWNMI option. Share common code. 512 * Vectors for the FWNMI option. Share common code.
512 */ 513 */
513 .globl system_reset_fwnmi 514 .globl system_reset_fwnmi
514system_reset_fwnmi: 515system_reset_fwnmi:
515 HMT_MEDIUM 516 HMT_MEDIUM
516 mtspr SPRG1,r13 /* save r13 */ 517 mtspr SPRN_SPRG1,r13 /* save r13 */
517 RUNLATCH_ON(r13) 518 RUNLATCH_ON(r13)
518 EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, system_reset_common) 519 EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, system_reset_common)
519 520
520 .globl machine_check_fwnmi 521 .globl machine_check_fwnmi
521machine_check_fwnmi: 522machine_check_fwnmi:
522 HMT_MEDIUM 523 HMT_MEDIUM
523 mtspr SPRG1,r13 /* save r13 */ 524 mtspr SPRN_SPRG1,r13 /* save r13 */
524 RUNLATCH_ON(r13) 525 RUNLATCH_ON(r13)
525 EXCEPTION_PROLOG_PSERIES(PACA_EXMC, machine_check_common) 526 EXCEPTION_PROLOG_PSERIES(PACA_EXMC, machine_check_common)
526 527
527#ifdef CONFIG_PPC_ISERIES 528#ifdef CONFIG_PPC_ISERIES
528/*** ISeries-LPAR interrupt handlers ***/ 529/*** ISeries-LPAR interrupt handlers ***/
@@ -531,18 +532,18 @@ machine_check_fwnmi:
531 532
532 .globl data_access_iSeries 533 .globl data_access_iSeries
533data_access_iSeries: 534data_access_iSeries:
534 mtspr SPRG1,r13 535 mtspr SPRN_SPRG1,r13
535BEGIN_FTR_SECTION 536BEGIN_FTR_SECTION
536 mtspr SPRG2,r12 537 mtspr SPRN_SPRG2,r12
537 mfspr r13,DAR 538 mfspr r13,SPRN_DAR
538 mfspr r12,DSISR 539 mfspr r12,SPRN_DSISR
539 srdi r13,r13,60 540 srdi r13,r13,60
540 rlwimi r13,r12,16,0x20 541 rlwimi r13,r12,16,0x20
541 mfcr r12 542 mfcr r12
542 cmpwi r13,0x2c 543 cmpwi r13,0x2c
543 beq .do_stab_bolted_iSeries 544 beq .do_stab_bolted_iSeries
544 mtcrf 0x80,r12 545 mtcrf 0x80,r12
545 mfspr r12,SPRG2 546 mfspr r12,SPRN_SPRG2
546END_FTR_SECTION_IFCLR(CPU_FTR_SLB) 547END_FTR_SECTION_IFCLR(CPU_FTR_SLB)
547 EXCEPTION_PROLOG_ISERIES_1(PACA_EXGEN) 548 EXCEPTION_PROLOG_ISERIES_1(PACA_EXGEN)
548 EXCEPTION_PROLOG_ISERIES_2 549 EXCEPTION_PROLOG_ISERIES_2
@@ -550,25 +551,25 @@ END_FTR_SECTION_IFCLR(CPU_FTR_SLB)
550 551
551.do_stab_bolted_iSeries: 552.do_stab_bolted_iSeries:
552 mtcrf 0x80,r12 553 mtcrf 0x80,r12
553 mfspr r12,SPRG2 554 mfspr r12,SPRN_SPRG2
554 EXCEPTION_PROLOG_ISERIES_1(PACA_EXSLB) 555 EXCEPTION_PROLOG_ISERIES_1(PACA_EXSLB)
555 EXCEPTION_PROLOG_ISERIES_2 556 EXCEPTION_PROLOG_ISERIES_2
556 b .do_stab_bolted 557 b .do_stab_bolted
557 558
558 .globl data_access_slb_iSeries 559 .globl data_access_slb_iSeries
559data_access_slb_iSeries: 560data_access_slb_iSeries:
560 mtspr SPRG1,r13 /* save r13 */ 561 mtspr SPRN_SPRG1,r13 /* save r13 */
561 EXCEPTION_PROLOG_ISERIES_1(PACA_EXSLB) 562 EXCEPTION_PROLOG_ISERIES_1(PACA_EXSLB)
562 std r3,PACA_EXSLB+EX_R3(r13) 563 std r3,PACA_EXSLB+EX_R3(r13)
563 ld r12,PACALPPACA+LPPACASRR1(r13) 564 ld r12,PACALPPACA+LPPACASRR1(r13)
564 mfspr r3,DAR 565 mfspr r3,SPRN_DAR
565 b .do_slb_miss 566 b .do_slb_miss
566 567
567 STD_EXCEPTION_ISERIES(0x400, instruction_access, PACA_EXGEN) 568 STD_EXCEPTION_ISERIES(0x400, instruction_access, PACA_EXGEN)
568 569
569 .globl instruction_access_slb_iSeries 570 .globl instruction_access_slb_iSeries
570instruction_access_slb_iSeries: 571instruction_access_slb_iSeries:
571 mtspr SPRG1,r13 /* save r13 */ 572 mtspr SPRN_SPRG1,r13 /* save r13 */
572 EXCEPTION_PROLOG_ISERIES_1(PACA_EXSLB) 573 EXCEPTION_PROLOG_ISERIES_1(PACA_EXSLB)
573 std r3,PACA_EXSLB+EX_R3(r13) 574 std r3,PACA_EXSLB+EX_R3(r13)
574 ld r12,PACALPPACA+LPPACASRR1(r13) 575 ld r12,PACALPPACA+LPPACASRR1(r13)
@@ -586,7 +587,7 @@ instruction_access_slb_iSeries:
586 .globl system_call_iSeries 587 .globl system_call_iSeries
587system_call_iSeries: 588system_call_iSeries:
588 mr r9,r13 589 mr r9,r13
589 mfspr r13,SPRG3 590 mfspr r13,SPRN_SPRG3
590 EXCEPTION_PROLOG_ISERIES_2 591 EXCEPTION_PROLOG_ISERIES_2
591 b system_call_common 592 b system_call_common
592 593
@@ -596,7 +597,7 @@ system_call_iSeries:
596 597
597 .globl system_reset_iSeries 598 .globl system_reset_iSeries
598system_reset_iSeries: 599system_reset_iSeries:
599 mfspr r13,SPRG3 /* Get paca address */ 600 mfspr r13,SPRN_SPRG3 /* Get paca address */
600 mfmsr r24 601 mfmsr r24
601 ori r24,r24,MSR_RI 602 ori r24,r24,MSR_RI
602 mtmsrd r24 /* RI on */ 603 mtmsrd r24 /* RI on */
@@ -639,7 +640,7 @@ iSeries_secondary_smp_loop:
639#endif /* CONFIG_SMP */ 640#endif /* CONFIG_SMP */
640 li r0,-1 /* r0=-1 indicates a Hypervisor call */ 641 li r0,-1 /* r0=-1 indicates a Hypervisor call */
641 sc /* Invoke the hypervisor via a system call */ 642 sc /* Invoke the hypervisor via a system call */
642 mfspr r13,SPRG3 /* Put r13 back ???? */ 643 mfspr r13,SPRN_SPRG3 /* Put r13 back ???? */
643 b 1b /* If SMP not configured, secondaries 644 b 1b /* If SMP not configured, secondaries
644 * loop forever */ 645 * loop forever */
645 646
@@ -656,8 +657,8 @@ hardware_interrupt_iSeries_masked:
656 mtcrf 0x80,r9 /* Restore regs */ 657 mtcrf 0x80,r9 /* Restore regs */
657 ld r11,PACALPPACA+LPPACASRR0(r13) 658 ld r11,PACALPPACA+LPPACASRR0(r13)
658 ld r12,PACALPPACA+LPPACASRR1(r13) 659 ld r12,PACALPPACA+LPPACASRR1(r13)
659 mtspr SRR0,r11 660 mtspr SPRN_SRR0,r11
660 mtspr SRR1,r12 661 mtspr SPRN_SRR1,r12
661 ld r9,PACA_EXGEN+EX_R9(r13) 662 ld r9,PACA_EXGEN+EX_R9(r13)
662 ld r10,PACA_EXGEN+EX_R10(r13) 663 ld r10,PACA_EXGEN+EX_R10(r13)
663 ld r11,PACA_EXGEN+EX_R11(r13) 664 ld r11,PACA_EXGEN+EX_R11(r13)
@@ -713,8 +714,8 @@ bad_stack:
713 std r10,GPR1(r1) 714 std r10,GPR1(r1)
714 std r11,_NIP(r1) 715 std r11,_NIP(r1)
715 std r12,_MSR(r1) 716 std r12,_MSR(r1)
716 mfspr r11,DAR 717 mfspr r11,SPRN_DAR
717 mfspr r12,DSISR 718 mfspr r12,SPRN_DSISR
718 std r11,_DAR(r1) 719 std r11,_DAR(r1)
719 std r12,_DSISR(r1) 720 std r12,_DSISR(r1)
720 mflr r10 721 mflr r10
@@ -746,6 +747,7 @@ bad_stack:
746 * any task or sent any task a signal, you should use 747 * any task or sent any task a signal, you should use
747 * ret_from_except or ret_from_except_lite instead of this. 748 * ret_from_except or ret_from_except_lite instead of this.
748 */ 749 */
750 .globl fast_exception_return
749fast_exception_return: 751fast_exception_return:
750 ld r12,_MSR(r1) 752 ld r12,_MSR(r1)
751 ld r11,_NIP(r1) 753 ld r11,_NIP(r1)
@@ -766,8 +768,8 @@ fast_exception_return:
766 clrrdi r10,r10,2 /* clear RI (LE is 0 already) */ 768 clrrdi r10,r10,2 /* clear RI (LE is 0 already) */
767 mtmsrd r10,1 769 mtmsrd r10,1
768 770
769 mtspr SRR1,r12 771 mtspr SPRN_SRR1,r12
770 mtspr SRR0,r11 772 mtspr SPRN_SRR0,r11
771 REST_4GPRS(10, r1) 773 REST_4GPRS(10, r1)
772 ld r1,GPR1(r1) 774 ld r1,GPR1(r1)
773 rfid 775 rfid
@@ -788,9 +790,9 @@ unrecov_fer:
788 .globl data_access_common 790 .globl data_access_common
789data_access_common: 791data_access_common:
790 RUNLATCH_ON(r10) /* It wont fit in the 0x300 handler */ 792 RUNLATCH_ON(r10) /* It wont fit in the 0x300 handler */
791 mfspr r10,DAR 793 mfspr r10,SPRN_DAR
792 std r10,PACA_EXGEN+EX_DAR(r13) 794 std r10,PACA_EXGEN+EX_DAR(r13)
793 mfspr r10,DSISR 795 mfspr r10,SPRN_DSISR
794 stw r10,PACA_EXGEN+EX_DSISR(r13) 796 stw r10,PACA_EXGEN+EX_DSISR(r13)
795 EXCEPTION_PROLOG_COMMON(0x300, PACA_EXGEN) 797 EXCEPTION_PROLOG_COMMON(0x300, PACA_EXGEN)
796 ld r3,PACA_EXGEN+EX_DAR(r13) 798 ld r3,PACA_EXGEN+EX_DAR(r13)
@@ -821,9 +823,9 @@ hardware_interrupt_entry:
821 .align 7 823 .align 7
822 .globl alignment_common 824 .globl alignment_common
823alignment_common: 825alignment_common:
824 mfspr r10,DAR 826 mfspr r10,SPRN_DAR
825 std r10,PACA_EXGEN+EX_DAR(r13) 827 std r10,PACA_EXGEN+EX_DAR(r13)
826 mfspr r10,DSISR 828 mfspr r10,SPRN_DSISR
827 stw r10,PACA_EXGEN+EX_DSISR(r13) 829 stw r10,PACA_EXGEN+EX_DSISR(r13)
828 EXCEPTION_PROLOG_COMMON(0x600, PACA_EXGEN) 830 EXCEPTION_PROLOG_COMMON(0x600, PACA_EXGEN)
829 ld r3,PACA_EXGEN+EX_DAR(r13) 831 ld r3,PACA_EXGEN+EX_DAR(r13)
@@ -857,62 +859,6 @@ fp_unavailable_common:
857 bl .kernel_fp_unavailable_exception 859 bl .kernel_fp_unavailable_exception
858 BUG_OPCODE 860 BUG_OPCODE
859 861
860/*
861 * load_up_fpu(unused, unused, tsk)
862 * Disable FP for the task which had the FPU previously,
863 * and save its floating-point registers in its thread_struct.
864 * Enables the FPU for use in the kernel on return.
865 * On SMP we know the fpu is free, since we give it up every
866 * switch (ie, no lazy save of the FP registers).
867 * On entry: r13 == 'current' && last_task_used_math != 'current'
868 */
869_STATIC(load_up_fpu)
870 mfmsr r5 /* grab the current MSR */
871 ori r5,r5,MSR_FP
872 mtmsrd r5 /* enable use of fpu now */
873 isync
874/*
875 * For SMP, we don't do lazy FPU switching because it just gets too
876 * horrendously complex, especially when a task switches from one CPU
877 * to another. Instead we call giveup_fpu in switch_to.
878 *
879 */
880#ifndef CONFIG_SMP
881 ld r3,last_task_used_math@got(r2)
882 ld r4,0(r3)
883 cmpdi 0,r4,0
884 beq 1f
885 /* Save FP state to last_task_used_math's THREAD struct */
886 addi r4,r4,THREAD
887 SAVE_32FPRS(0, r4)
888 mffs fr0
889 stfd fr0,THREAD_FPSCR(r4)
890 /* Disable FP for last_task_used_math */
891 ld r5,PT_REGS(r4)
892 ld r4,_MSR-STACK_FRAME_OVERHEAD(r5)
893 li r6,MSR_FP|MSR_FE0|MSR_FE1
894 andc r4,r4,r6
895 std r4,_MSR-STACK_FRAME_OVERHEAD(r5)
8961:
897#endif /* CONFIG_SMP */
898 /* enable use of FP after return */
899 ld r4,PACACURRENT(r13)
900 addi r5,r4,THREAD /* Get THREAD */
901 ld r4,THREAD_FPEXC_MODE(r5)
902 ori r12,r12,MSR_FP
903 or r12,r12,r4
904 std r12,_MSR(r1)
905 lfd fr0,THREAD_FPSCR(r5)
906 mtfsf 0xff,fr0
907 REST_32FPRS(0, r5)
908#ifndef CONFIG_SMP
909 /* Update last_task_used_math to 'current' */
910 subi r4,r5,THREAD /* Back to 'current' */
911 std r4,0(r3)
912#endif /* CONFIG_SMP */
913 /* restore registers and return */
914 b fast_exception_return
915
916 .align 7 862 .align 7
917 .globl altivec_unavailable_common 863 .globl altivec_unavailable_common
918altivec_unavailable_common: 864altivec_unavailable_common:
@@ -1120,7 +1066,7 @@ _GLOBAL(do_stab_bolted)
1120 1066
1121 /* Hash to the primary group */ 1067 /* Hash to the primary group */
1122 ld r10,PACASTABVIRT(r13) 1068 ld r10,PACASTABVIRT(r13)
1123 mfspr r11,DAR 1069 mfspr r11,SPRN_DAR
1124 srdi r11,r11,28 1070 srdi r11,r11,28
1125 rldimi r10,r11,7,52 /* r10 = first ste of the group */ 1071 rldimi r10,r11,7,52 /* r10 = first ste of the group */
1126 1072
@@ -1162,7 +1108,7 @@ _GLOBAL(do_stab_bolted)
11622: std r9,8(r10) /* Store the vsid part of the ste */ 11082: std r9,8(r10) /* Store the vsid part of the ste */
1163 eieio 1109 eieio
1164 1110
1165 mfspr r11,DAR /* Get the new esid */ 1111 mfspr r11,SPRN_DAR /* Get the new esid */
1166 clrrdi r11,r11,28 /* Permits a full 32b of ESID */ 1112 clrrdi r11,r11,28 /* Permits a full 32b of ESID */
1167 ori r11,r11,0x90 /* Turn on valid and kp */ 1113 ori r11,r11,0x90 /* Turn on valid and kp */
1168 std r11,0(r10) /* Put new entry back into the stab */ 1114 std r11,0(r10) /* Put new entry back into the stab */
@@ -1182,8 +1128,8 @@ _GLOBAL(do_stab_bolted)
1182 clrrdi r10,r10,2 1128 clrrdi r10,r10,2
1183 mtmsrd r10,1 1129 mtmsrd r10,1
1184 1130
1185 mtspr SRR0,r11 1131 mtspr SPRN_SRR0,r11
1186 mtspr SRR1,r12 1132 mtspr SPRN_SRR1,r12
1187 ld r9,PACA_EXSLB+EX_R9(r13) 1133 ld r9,PACA_EXSLB+EX_R9(r13)
1188 ld r10,PACA_EXSLB+EX_R10(r13) 1134 ld r10,PACA_EXSLB+EX_R10(r13)
1189 ld r11,PACA_EXSLB+EX_R11(r13) 1135 ld r11,PACA_EXSLB+EX_R11(r13)
@@ -1229,8 +1175,8 @@ _GLOBAL(do_slb_miss)
1229.machine pop 1175.machine pop
1230 1176
1231#ifdef CONFIG_PPC_ISERIES 1177#ifdef CONFIG_PPC_ISERIES
1232 mtspr SRR0,r11 1178 mtspr SPRN_SRR0,r11
1233 mtspr SRR1,r12 1179 mtspr SPRN_SRR1,r12
1234#endif /* CONFIG_PPC_ISERIES */ 1180#endif /* CONFIG_PPC_ISERIES */
1235 ld r9,PACA_EXSLB+EX_R9(r13) 1181 ld r9,PACA_EXSLB+EX_R9(r13)
1236 ld r10,PACA_EXSLB+EX_R10(r13) 1182 ld r10,PACA_EXSLB+EX_R10(r13)
@@ -1253,7 +1199,7 @@ unrecov_slb:
1253 * 1199 *
1254 * On iSeries, the hypervisor must fill in at least one entry before 1200 * On iSeries, the hypervisor must fill in at least one entry before
1255 * we get control (with relocate on). The address is give to the hv 1201 * we get control (with relocate on). The address is give to the hv
1256 * as a page number (see xLparMap in LparData.c), so this must be at a 1202 * as a page number (see xLparMap in lpardata.c), so this must be at a
1257 * fixed address (the linker can't compute (u64)&initial_stab >> 1203 * fixed address (the linker can't compute (u64)&initial_stab >>
1258 * PAGE_SHIFT). 1204 * PAGE_SHIFT).
1259 */ 1205 */
@@ -1316,7 +1262,7 @@ _GLOBAL(pSeries_secondary_smp_init)
1316 mr r3,r24 /* not found, copy phys to r3 */ 1262 mr r3,r24 /* not found, copy phys to r3 */
1317 b .kexec_wait /* next kernel might do better */ 1263 b .kexec_wait /* next kernel might do better */
1318 1264
13192: mtspr SPRG3,r13 /* Save vaddr of paca in SPRG3 */ 12652: mtspr SPRN_SPRG3,r13 /* Save vaddr of paca in SPRG3 */
1320 /* From now on, r24 is expected to be logical cpuid */ 1266 /* From now on, r24 is expected to be logical cpuid */
1321 mr r24,r5 1267 mr r24,r5
13223: HMT_LOW 12683: HMT_LOW
@@ -1364,6 +1310,7 @@ _STATIC(__start_initialization_iSeries)
1364 addi r2,r2,0x4000 1310 addi r2,r2,0x4000
1365 1311
1366 bl .iSeries_early_setup 1312 bl .iSeries_early_setup
1313 bl .early_setup
1367 1314
1368 /* relocation is on at this point */ 1315 /* relocation is on at this point */
1369 1316
@@ -1554,20 +1501,17 @@ copy_to_here:
1554 .section ".text"; 1501 .section ".text";
1555 .align 2 ; 1502 .align 2 ;
1556 1503
1557 .globl pmac_secondary_start_1 1504 .globl __secondary_start_pmac_0
1558pmac_secondary_start_1: 1505__secondary_start_pmac_0:
1559 li r24, 1 1506 /* NB the entries for cpus 0, 1, 2 must each occupy 8 bytes. */
1560 b .pmac_secondary_start 1507 li r24,0
1561 1508 b 1f
1562 .globl pmac_secondary_start_2 1509 li r24,1
1563pmac_secondary_start_2: 1510 b 1f
1564 li r24, 2 1511 li r24,2
1565 b .pmac_secondary_start 1512 b 1f
1566 1513 li r24,3
1567 .globl pmac_secondary_start_3 15141:
1568pmac_secondary_start_3:
1569 li r24, 3
1570 b .pmac_secondary_start
1571 1515
1572_GLOBAL(pmac_secondary_start) 1516_GLOBAL(pmac_secondary_start)
1573 /* turn on 64-bit mode */ 1517 /* turn on 64-bit mode */
@@ -1586,7 +1530,7 @@ _GLOBAL(pmac_secondary_start)
1586 LOADADDR(r4, paca) /* Get base vaddr of paca array */ 1530 LOADADDR(r4, paca) /* Get base vaddr of paca array */
1587 mulli r13,r24,PACA_SIZE /* Calculate vaddr of right paca */ 1531 mulli r13,r24,PACA_SIZE /* Calculate vaddr of right paca */
1588 add r13,r13,r4 /* for this processor. */ 1532 add r13,r13,r4 /* for this processor. */
1589 mtspr SPRG3,r13 /* Save vaddr of paca in SPRG3 */ 1533 mtspr SPRN_SPRG3,r13 /* Save vaddr of paca in SPRG3 */
1590 1534
1591 /* Create a temp kernel stack for use before relocation is on. */ 1535 /* Create a temp kernel stack for use before relocation is on. */
1592 ld r1,PACAEMERGSP(r13) 1536 ld r1,PACAEMERGSP(r13)
@@ -1621,7 +1565,7 @@ _GLOBAL(__secondary_start)
1621 /* Initialize the page table pointer register. */ 1565 /* Initialize the page table pointer register. */
1622 LOADADDR(r6,_SDR1) 1566 LOADADDR(r6,_SDR1)
1623 ld r6,0(r6) /* get the value of _SDR1 */ 1567 ld r6,0(r6) /* get the value of _SDR1 */
1624 mtspr SDR1,r6 /* set the htab location */ 1568 mtspr SPRN_SDR1,r6 /* set the htab location */
1625#endif 1569#endif
1626 /* Initialize the first segment table (or SLB) entry */ 1570 /* Initialize the first segment table (or SLB) entry */
1627 ld r3,PACASTABVIRT(r13) /* get addr of segment table */ 1571 ld r3,PACASTABVIRT(r13) /* get addr of segment table */
@@ -1650,7 +1594,7 @@ _GLOBAL(__secondary_start)
1650 lwz r3,PLATFORM(r3) /* r3 = platform flags */ 1594 lwz r3,PLATFORM(r3) /* r3 = platform flags */
1651 andi. r3,r3,PLATFORM_LPAR /* Test if bit 0 is set (LPAR bit) */ 1595 andi. r3,r3,PLATFORM_LPAR /* Test if bit 0 is set (LPAR bit) */
1652 beq 98f /* branch if result is 0 */ 1596 beq 98f /* branch if result is 0 */
1653 mfspr r3,PVR 1597 mfspr r3,SPRN_PVR
1654 srwi r3,r3,16 1598 srwi r3,r3,16
1655 cmpwi r3,0x37 /* SStar */ 1599 cmpwi r3,0x37 /* SStar */
1656 beq 97f 1600 beq 97f
@@ -1674,8 +1618,8 @@ _GLOBAL(__secondary_start)
1674#ifdef DO_SOFT_DISABLE 1618#ifdef DO_SOFT_DISABLE
1675 ori r4,r4,MSR_EE 1619 ori r4,r4,MSR_EE
1676#endif 1620#endif
1677 mtspr SRR0,r3 1621 mtspr SPRN_SRR0,r3
1678 mtspr SRR1,r4 1622 mtspr SPRN_SRR1,r4
1679 rfid 1623 rfid
1680 b . /* prevent speculative execution */ 1624 b . /* prevent speculative execution */
1681 1625
@@ -1737,7 +1681,7 @@ _STATIC(start_here_multiplatform)
1737 1681
1738#ifdef CONFIG_HMT 1682#ifdef CONFIG_HMT
1739 /* Start up the second thread on cpu 0 */ 1683 /* Start up the second thread on cpu 0 */
1740 mfspr r3,PVR 1684 mfspr r3,SPRN_PVR
1741 srwi r3,r3,16 1685 srwi r3,r3,16
1742 cmpwi r3,0x34 /* Pulsar */ 1686 cmpwi r3,0x34 /* Pulsar */
1743 beq 90f 1687 beq 90f
@@ -1797,7 +1741,7 @@ _STATIC(start_here_multiplatform)
1797 mulli r13,r27,PACA_SIZE /* Calculate vaddr of right paca */ 1741 mulli r13,r27,PACA_SIZE /* Calculate vaddr of right paca */
1798 add r13,r13,r24 /* for this processor. */ 1742 add r13,r13,r24 /* for this processor. */
1799 sub r13,r13,r26 /* convert to physical addr */ 1743 sub r13,r13,r26 /* convert to physical addr */
1800 mtspr SPRG3,r13 /* PPPBBB: Temp... -Peter */ 1744 mtspr SPRN_SPRG3,r13 /* PPPBBB: Temp... -Peter */
1801 1745
1802 /* Do very early kernel initializations, including initial hash table, 1746 /* Do very early kernel initializations, including initial hash table,
1803 * stab and slb setup before we turn on relocation. */ 1747 * stab and slb setup before we turn on relocation. */
@@ -1814,7 +1758,7 @@ _STATIC(start_here_multiplatform)
1814 lwz r3,PLATFORM(r3) /* r3 = platform flags */ 1758 lwz r3,PLATFORM(r3) /* r3 = platform flags */
1815 andi. r3,r3,PLATFORM_LPAR /* Test if bit 0 is set (LPAR bit) */ 1759 andi. r3,r3,PLATFORM_LPAR /* Test if bit 0 is set (LPAR bit) */
1816 beq 98f /* branch if result is 0 */ 1760 beq 98f /* branch if result is 0 */
1817 mfspr r3,PVR 1761 mfspr r3,SPRN_PVR
1818 srwi r3,r3,16 1762 srwi r3,r3,16
1819 cmpwi r3,0x37 /* SStar */ 1763 cmpwi r3,0x37 /* SStar */
1820 beq 97f 1764 beq 97f
@@ -1838,12 +1782,12 @@ _STATIC(start_here_multiplatform)
1838 LOADADDR(r6,_SDR1) /* Only if NOT LPAR */ 1782 LOADADDR(r6,_SDR1) /* Only if NOT LPAR */
1839 sub r6,r6,r26 1783 sub r6,r6,r26
1840 ld r6,0(r6) /* get the value of _SDR1 */ 1784 ld r6,0(r6) /* get the value of _SDR1 */
1841 mtspr SDR1,r6 /* set the htab location */ 1785 mtspr SPRN_SDR1,r6 /* set the htab location */
184298: 178698:
1843 LOADADDR(r3,.start_here_common) 1787 LOADADDR(r3,.start_here_common)
1844 SET_REG_TO_CONST(r4, MSR_KERNEL) 1788 SET_REG_TO_CONST(r4, MSR_KERNEL)
1845 mtspr SRR0,r3 1789 mtspr SPRN_SRR0,r3
1846 mtspr SRR1,r4 1790 mtspr SPRN_SRR1,r4
1847 rfid 1791 rfid
1848 b . /* prevent speculative execution */ 1792 b . /* prevent speculative execution */
1849#endif /* CONFIG_PPC_MULTIPLATFORM */ 1793#endif /* CONFIG_PPC_MULTIPLATFORM */
@@ -1874,7 +1818,7 @@ _STATIC(start_here_common)
1874 LOADADDR(r24, paca) /* Get base vaddr of paca array */ 1818 LOADADDR(r24, paca) /* Get base vaddr of paca array */
1875 mulli r13,r26,PACA_SIZE /* Calculate vaddr of right paca */ 1819 mulli r13,r26,PACA_SIZE /* Calculate vaddr of right paca */
1876 add r13,r13,r24 /* for this processor. */ 1820 add r13,r13,r24 /* for this processor. */
1877 mtspr SPRG3,r13 1821 mtspr SPRN_SPRG3,r13
1878 1822
1879 /* ptr to current */ 1823 /* ptr to current */
1880 LOADADDR(r4,init_task) 1824 LOADADDR(r4,init_task)
@@ -1901,7 +1845,7 @@ _STATIC(start_here_common)
1901_GLOBAL(hmt_init) 1845_GLOBAL(hmt_init)
1902#ifdef CONFIG_HMT 1846#ifdef CONFIG_HMT
1903 LOADADDR(r5, hmt_thread_data) 1847 LOADADDR(r5, hmt_thread_data)
1904 mfspr r7,PVR 1848 mfspr r7,SPRN_PVR
1905 srwi r7,r7,16 1849 srwi r7,r7,16
1906 cmpwi r7,0x34 /* Pulsar */ 1850 cmpwi r7,0x34 /* Pulsar */
1907 beq 90f 1851 beq 90f
@@ -1910,10 +1854,10 @@ _GLOBAL(hmt_init)
1910 cmpwi r7,0x37 /* SStar */ 1854 cmpwi r7,0x37 /* SStar */
1911 beq 91f 1855 beq 91f
1912 b 101f 1856 b 101f
191390: mfspr r6,PIR 185790: mfspr r6,SPRN_PIR
1914 andi. r6,r6,0x1f 1858 andi. r6,r6,0x1f
1915 b 92f 1859 b 92f
191691: mfspr r6,PIR 186091: mfspr r6,SPRN_PIR
1917 andi. r6,r6,0x3ff 1861 andi. r6,r6,0x3ff
191892: sldi r4,r24,3 186292: sldi r4,r24,3
1919 stwx r6,r5,r4 1863 stwx r6,r5,r4
@@ -1924,8 +1868,8 @@ __hmt_secondary_hold:
1924 LOADADDR(r5, hmt_thread_data) 1868 LOADADDR(r5, hmt_thread_data)
1925 clrldi r5,r5,4 1869 clrldi r5,r5,4
1926 li r7,0 1870 li r7,0
1927 mfspr r6,PIR 1871 mfspr r6,SPRN_PIR
1928 mfspr r8,PVR 1872 mfspr r8,SPRN_PVR
1929 srwi r8,r8,16 1873 srwi r8,r8,16
1930 cmpwi r8,0x34 1874 cmpwi r8,0x34
1931 bne 93f 1875 bne 93f
@@ -1951,39 +1895,41 @@ __hmt_secondary_hold:
1951_GLOBAL(hmt_start_secondary) 1895_GLOBAL(hmt_start_secondary)
1952 LOADADDR(r4,__hmt_secondary_hold) 1896 LOADADDR(r4,__hmt_secondary_hold)
1953 clrldi r4,r4,4 1897 clrldi r4,r4,4
1954 mtspr NIADORM, r4 1898 mtspr SPRN_NIADORM, r4
1955 mfspr r4, MSRDORM 1899 mfspr r4, SPRN_MSRDORM
1956 li r5, -65 1900 li r5, -65
1957 and r4, r4, r5 1901 and r4, r4, r5
1958 mtspr MSRDORM, r4 1902 mtspr SPRN_MSRDORM, r4
1959 lis r4,0xffef 1903 lis r4,0xffef
1960 ori r4,r4,0x7403 1904 ori r4,r4,0x7403
1961 mtspr TSC, r4 1905 mtspr SPRN_TSC, r4
1962 li r4,0x1f4 1906 li r4,0x1f4
1963 mtspr TST, r4 1907 mtspr SPRN_TST, r4
1964 mfspr r4, HID0 1908 mfspr r4, SPRN_HID0
1965 ori r4, r4, 0x1 1909 ori r4, r4, 0x1
1966 mtspr HID0, r4 1910 mtspr SPRN_HID0, r4
1967 mfspr r4, SPRN_CTRLF 1911 mfspr r4, SPRN_CTRLF
1968 oris r4, r4, 0x40 1912 oris r4, r4, 0x40
1969 mtspr SPRN_CTRLT, r4 1913 mtspr SPRN_CTRLT, r4
1970 blr 1914 blr
1971#endif 1915#endif
1972 1916
1973#if defined(CONFIG_KEXEC) || (defined(CONFIG_SMP) && !defined(CONFIG_PPC_ISERIES)) 1917#if defined(CONFIG_KEXEC) || defined(CONFIG_SMP)
1974_GLOBAL(smp_release_cpus) 1918_GLOBAL(smp_release_cpus)
1975 /* All secondary cpus are spinning on a common 1919 /* All secondary cpus are spinning on a common
1976 * spinloop, release them all now so they can start 1920 * spinloop, release them all now so they can start
1977 * to spin on their individual paca spinloops. 1921 * to spin on their individual paca spinloops.
1978 * For non SMP kernels, the secondary cpus never 1922 * For non SMP kernels, the secondary cpus never
1979 * get out of the common spinloop. 1923 * get out of the common spinloop.
1924 * XXX This does nothing useful on iSeries, secondaries are
1925 * already waiting on their paca.
1980 */ 1926 */
1981 li r3,1 1927 li r3,1
1982 LOADADDR(r5,__secondary_hold_spinloop) 1928 LOADADDR(r5,__secondary_hold_spinloop)
1983 std r3,0(r5) 1929 std r3,0(r5)
1984 sync 1930 sync
1985 blr 1931 blr
1986#endif /* CONFIG_SMP && !CONFIG_PPC_ISERIES */ 1932#endif /* CONFIG_SMP */
1987 1933
1988 1934
1989/* 1935/*
@@ -1992,7 +1938,7 @@ _GLOBAL(smp_release_cpus)
1992 */ 1938 */
1993 .section ".bss" 1939 .section ".bss"
1994 1940
1995 .align 12 1941 .align PAGE_SHIFT
1996 1942
1997 .globl empty_zero_page 1943 .globl empty_zero_page
1998empty_zero_page: 1944empty_zero_page:
diff --git a/arch/ppc64/kernel/hvCall.S b/arch/ppc64/kernel/hvCall.S
deleted file mode 100644
index 4c699eab1b95..000000000000
--- a/arch/ppc64/kernel/hvCall.S
+++ /dev/null
@@ -1,98 +0,0 @@
1/*
2 * arch/ppc64/kernel/hvCall.S
3 *
4 *
5 * This file contains the code to perform calls to the
6 * iSeries LPAR hypervisor
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
12 */
13
14#include <asm/ppc_asm.h>
15#include <asm/processor.h>
16
17 .text
18
19/*
20 * Hypervisor call
21 *
22 * Invoke the iSeries hypervisor via the System Call instruction
23 * Parameters are passed to this routine in registers r3 - r10
24 *
25 * r3 contains the HV function to be called
26 * r4-r10 contain the operands to the hypervisor function
27 *
28 */
29
30_GLOBAL(HvCall)
31_GLOBAL(HvCall0)
32_GLOBAL(HvCall1)
33_GLOBAL(HvCall2)
34_GLOBAL(HvCall3)
35_GLOBAL(HvCall4)
36_GLOBAL(HvCall5)
37_GLOBAL(HvCall6)
38_GLOBAL(HvCall7)
39
40
41 mfcr r0
42 std r0,-8(r1)
43 stdu r1,-(STACK_FRAME_OVERHEAD+16)(r1)
44
45 /* r0 = 0xffffffffffffffff indicates a hypervisor call */
46
47 li r0,-1
48
49 /* Invoke the hypervisor */
50
51 sc
52
53 ld r1,0(r1)
54 ld r0,-8(r1)
55 mtcrf 0xff,r0
56
57 /* return to caller, return value in r3 */
58
59 blr
60
61_GLOBAL(HvCall0Ret16)
62_GLOBAL(HvCall1Ret16)
63_GLOBAL(HvCall2Ret16)
64_GLOBAL(HvCall3Ret16)
65_GLOBAL(HvCall4Ret16)
66_GLOBAL(HvCall5Ret16)
67_GLOBAL(HvCall6Ret16)
68_GLOBAL(HvCall7Ret16)
69
70 mfcr r0
71 std r0,-8(r1)
72 std r31,-16(r1)
73 stdu r1,-(STACK_FRAME_OVERHEAD+32)(r1)
74
75 mr r31,r4
76 li r0,-1
77 mr r4,r5
78 mr r5,r6
79 mr r6,r7
80 mr r7,r8
81 mr r8,r9
82 mr r9,r10
83
84 sc
85
86 std r3,0(r31)
87 std r4,8(r31)
88
89 mr r3,r5
90
91 ld r1,0(r1)
92 ld r0,-8(r1)
93 mtcrf 0xff,r0
94 ld r31,-16(r1)
95
96 blr
97
98
diff --git a/arch/ppc64/kernel/hvcserver.c b/arch/ppc64/kernel/hvcserver.c
index bde8f42da854..4d584172055a 100644
--- a/arch/ppc64/kernel/hvcserver.c
+++ b/arch/ppc64/kernel/hvcserver.c
@@ -22,6 +22,8 @@
22#include <linux/kernel.h> 22#include <linux/kernel.h>
23#include <linux/list.h> 23#include <linux/list.h>
24#include <linux/module.h> 24#include <linux/module.h>
25#include <linux/slab.h>
26
25#include <asm/hvcall.h> 27#include <asm/hvcall.h>
26#include <asm/hvcserver.h> 28#include <asm/hvcserver.h>
27#include <asm/io.h> 29#include <asm/io.h>
diff --git a/arch/ppc64/kernel/i8259.c b/arch/ppc64/kernel/i8259.c
deleted file mode 100644
index 74dcfd68fc75..000000000000
--- a/arch/ppc64/kernel/i8259.c
+++ /dev/null
@@ -1,177 +0,0 @@
1/*
2 * c 2001 PPC64 Team, IBM Corp
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 */
9#include <linux/stddef.h>
10#include <linux/init.h>
11#include <linux/sched.h>
12#include <linux/signal.h>
13#include <linux/cache.h>
14#include <linux/irq.h>
15#include <linux/interrupt.h>
16#include <asm/io.h>
17#include <asm/ppcdebug.h>
18#include "i8259.h"
19
20unsigned char cached_8259[2] = { 0xff, 0xff };
21#define cached_A1 (cached_8259[0])
22#define cached_21 (cached_8259[1])
23
24static __cacheline_aligned_in_smp DEFINE_SPINLOCK(i8259_lock);
25
26static int i8259_pic_irq_offset;
27static int i8259_present;
28
29int i8259_irq(int cpu)
30{
31 int irq;
32
33 spin_lock/*_irqsave*/(&i8259_lock/*, flags*/);
34 /*
35 * Perform an interrupt acknowledge cycle on controller 1
36 */
37 outb(0x0C, 0x20);
38 irq = inb(0x20) & 7;
39 if (irq == 2)
40 {
41 /*
42 * Interrupt is cascaded so perform interrupt
43 * acknowledge on controller 2
44 */
45 outb(0x0C, 0xA0);
46 irq = (inb(0xA0) & 7) + 8;
47 }
48 else if (irq==7)
49 {
50 /*
51 * This may be a spurious interrupt
52 *
53 * Read the interrupt status register. If the most
54 * significant bit is not set then there is no valid
55 * interrupt
56 */
57 outb(0x0b, 0x20);
58 if(~inb(0x20)&0x80) {
59 spin_unlock/*_irqrestore*/(&i8259_lock/*, flags*/);
60 return -1;
61 }
62 }
63 spin_unlock/*_irqrestore*/(&i8259_lock/*, flags*/);
64 return irq;
65}
66
67static void i8259_mask_and_ack_irq(unsigned int irq_nr)
68{
69 unsigned long flags;
70
71 spin_lock_irqsave(&i8259_lock, flags);
72 if ( irq_nr >= i8259_pic_irq_offset )
73 irq_nr -= i8259_pic_irq_offset;
74
75 if (irq_nr > 7) {
76 cached_A1 |= 1 << (irq_nr-8);
77 inb(0xA1); /* DUMMY */
78 outb(cached_A1,0xA1);
79 outb(0x20,0xA0); /* Non-specific EOI */
80 outb(0x20,0x20); /* Non-specific EOI to cascade */
81 } else {
82 cached_21 |= 1 << irq_nr;
83 inb(0x21); /* DUMMY */
84 outb(cached_21,0x21);
85 outb(0x20,0x20); /* Non-specific EOI */
86 }
87 spin_unlock_irqrestore(&i8259_lock, flags);
88}
89
90static void i8259_set_irq_mask(int irq_nr)
91{
92 outb(cached_A1,0xA1);
93 outb(cached_21,0x21);
94}
95
96static void i8259_mask_irq(unsigned int irq_nr)
97{
98 unsigned long flags;
99
100 spin_lock_irqsave(&i8259_lock, flags);
101 if ( irq_nr >= i8259_pic_irq_offset )
102 irq_nr -= i8259_pic_irq_offset;
103 if ( irq_nr < 8 )
104 cached_21 |= 1 << irq_nr;
105 else
106 cached_A1 |= 1 << (irq_nr-8);
107 i8259_set_irq_mask(irq_nr);
108 spin_unlock_irqrestore(&i8259_lock, flags);
109}
110
111static void i8259_unmask_irq(unsigned int irq_nr)
112{
113 unsigned long flags;
114
115 spin_lock_irqsave(&i8259_lock, flags);
116 if ( irq_nr >= i8259_pic_irq_offset )
117 irq_nr -= i8259_pic_irq_offset;
118 if ( irq_nr < 8 )
119 cached_21 &= ~(1 << irq_nr);
120 else
121 cached_A1 &= ~(1 << (irq_nr-8));
122 i8259_set_irq_mask(irq_nr);
123 spin_unlock_irqrestore(&i8259_lock, flags);
124}
125
126static void i8259_end_irq(unsigned int irq)
127{
128 if (!(get_irq_desc(irq)->status & (IRQ_DISABLED|IRQ_INPROGRESS)) &&
129 get_irq_desc(irq)->action)
130 i8259_unmask_irq(irq);
131}
132
133struct hw_interrupt_type i8259_pic = {
134 .typename = " i8259 ",
135 .enable = i8259_unmask_irq,
136 .disable = i8259_mask_irq,
137 .ack = i8259_mask_and_ack_irq,
138 .end = i8259_end_irq,
139};
140
141void __init i8259_init(int offset)
142{
143 unsigned long flags;
144
145 spin_lock_irqsave(&i8259_lock, flags);
146 i8259_pic_irq_offset = offset;
147 i8259_present = 1;
148 /* init master interrupt controller */
149 outb(0x11, 0x20); /* Start init sequence */
150 outb(0x00, 0x21); /* Vector base */
151 outb(0x04, 0x21); /* edge tiggered, Cascade (slave) on IRQ2 */
152 outb(0x01, 0x21); /* Select 8086 mode */
153 outb(0xFF, 0x21); /* Mask all */
154 /* init slave interrupt controller */
155 outb(0x11, 0xA0); /* Start init sequence */
156 outb(0x08, 0xA1); /* Vector base */
157 outb(0x02, 0xA1); /* edge triggered, Cascade (slave) on IRQ2 */
158 outb(0x01, 0xA1); /* Select 8086 mode */
159 outb(0xFF, 0xA1); /* Mask all */
160 outb(cached_A1, 0xA1);
161 outb(cached_21, 0x21);
162 spin_unlock_irqrestore(&i8259_lock, flags);
163
164}
165
166static int i8259_request_cascade(void)
167{
168 if (!i8259_present)
169 return -ENODEV;
170
171 request_irq( i8259_pic_irq_offset + 2, no_action, SA_INTERRUPT,
172 "82c59 secondary cascade", NULL );
173
174 return 0;
175}
176
177arch_initcall(i8259_request_cascade);
diff --git a/arch/ppc64/kernel/i8259.h b/arch/ppc64/kernel/i8259.h
deleted file mode 100644
index f74764ba0bfa..000000000000
--- a/arch/ppc64/kernel/i8259.h
+++ /dev/null
@@ -1,17 +0,0 @@
1/*
2 * c 2001 PPC 64 Team, IBM Corp
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 */
9#ifndef _PPC_KERNEL_i8259_H
10#define _PPC_KERNEL_i8259_H
11
12extern struct hw_interrupt_type i8259_pic;
13
14extern void i8259_init(int offset);
15extern int i8259_irq(int);
16
17#endif /* _PPC_KERNEL_i8259_H */
diff --git a/arch/ppc64/kernel/iSeries_VpdInfo.c b/arch/ppc64/kernel/iSeries_VpdInfo.c
deleted file mode 100644
index 5d921792571f..000000000000
--- a/arch/ppc64/kernel/iSeries_VpdInfo.c
+++ /dev/null
@@ -1,268 +0,0 @@
1/*
2 * File iSeries_vpdInfo.c created by Allan Trautman on Fri Feb 2 2001.
3 *
4 * This code gets the card location of the hardware
5 * Copyright (C) 2001 <Allan H Trautman> <IBM Corp>
6 * Copyright (C) 2005 Stephen Rothwel, IBM Corp
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the:
20 * Free Software Foundation, Inc.,
21 * 59 Temple Place, Suite 330,
22 * Boston, MA 02111-1307 USA
23 *
24 * Change Activity:
25 * Created, Feb 2, 2001
26 * Ported to ppc64, August 20, 2001
27 * End Change Activity
28 */
29#include <linux/init.h>
30#include <linux/module.h>
31#include <linux/pci.h>
32#include <asm/types.h>
33#include <asm/resource.h>
34
35#include <asm/iSeries/HvCallPci.h>
36#include <asm/iSeries/HvTypes.h>
37#include <asm/iSeries/iSeries_pci.h>
38
39/*
40 * Size of Bus VPD data
41 */
42#define BUS_VPDSIZE 1024
43
44/*
45 * Bus Vpd Tags
46 */
47#define VpdEndOfAreaTag 0x79
48#define VpdIdStringTag 0x82
49#define VpdVendorAreaTag 0x84
50
51/*
52 * Mfg Area Tags
53 */
54#define VpdFruFrameId 0x4649 // "FI"
55#define VpdSlotMapFormat 0x4D46 // "MF"
56#define VpdSlotMap 0x534D // "SM"
57
58/*
59 * Structures of the areas
60 */
61struct MfgVpdAreaStruct {
62 u16 Tag;
63 u8 TagLength;
64 u8 AreaData1;
65 u8 AreaData2;
66};
67typedef struct MfgVpdAreaStruct MfgArea;
68#define MFG_ENTRY_SIZE 3
69
70struct SlotMapStruct {
71 u8 AgentId;
72 u8 SecondaryAgentId;
73 u8 PhbId;
74 char CardLocation[3];
75 char Parms[8];
76 char Reserved[2];
77};
78typedef struct SlotMapStruct SlotMap;
79#define SLOT_ENTRY_SIZE 16
80
81/*
82 * Parse the Slot Area
83 */
84static void __init iSeries_Parse_SlotArea(SlotMap *MapPtr, int MapLen,
85 HvAgentId agent, u8 *PhbId, char card[4])
86{
87 int SlotMapLen = MapLen;
88 SlotMap *SlotMapPtr = MapPtr;
89
90 /*
91 * Parse Slot label until we find the one requested
92 */
93 while (SlotMapLen > 0) {
94 if (SlotMapPtr->AgentId == agent) {
95 /*
96 * If Phb wasn't found, grab the entry first one found.
97 */
98 if (*PhbId == 0xff)
99 *PhbId = SlotMapPtr->PhbId;
100 /* Found it, extract the data. */
101 if (SlotMapPtr->PhbId == *PhbId) {
102 memcpy(card, &SlotMapPtr->CardLocation, 3);
103 card[3] = 0;
104 break;
105 }
106 }
107 /* Point to the next Slot */
108 SlotMapPtr = (SlotMap *)((char *)SlotMapPtr + SLOT_ENTRY_SIZE);
109 SlotMapLen -= SLOT_ENTRY_SIZE;
110 }
111}
112
113/*
114 * Parse the Mfg Area
115 */
116static void __init iSeries_Parse_MfgArea(u8 *AreaData, int AreaLen,
117 HvAgentId agent, u8 *PhbId,
118 u8 *frame, char card[4])
119{
120 MfgArea *MfgAreaPtr = (MfgArea *)AreaData;
121 int MfgAreaLen = AreaLen;
122 u16 SlotMapFmt = 0;
123
124 /* Parse Mfg Data */
125 while (MfgAreaLen > 0) {
126 int MfgTagLen = MfgAreaPtr->TagLength;
127 /* Frame ID (FI 4649020310 ) */
128 if (MfgAreaPtr->Tag == VpdFruFrameId) /* FI */
129 *frame = MfgAreaPtr->AreaData1;
130 /* Slot Map Format (MF 4D46020004 ) */
131 else if (MfgAreaPtr->Tag == VpdSlotMapFormat) /* MF */
132 SlotMapFmt = (MfgAreaPtr->AreaData1 * 256)
133 + MfgAreaPtr->AreaData2;
134 /* Slot Map (SM 534D90 */
135 else if (MfgAreaPtr->Tag == VpdSlotMap) { /* SM */
136 SlotMap *SlotMapPtr;
137
138 if (SlotMapFmt == 0x1004)
139 SlotMapPtr = (SlotMap *)((char *)MfgAreaPtr
140 + MFG_ENTRY_SIZE + 1);
141 else
142 SlotMapPtr = (SlotMap *)((char *)MfgAreaPtr
143 + MFG_ENTRY_SIZE);
144 iSeries_Parse_SlotArea(SlotMapPtr, MfgTagLen,
145 agent, PhbId, card);
146 }
147 /*
148 * Point to the next Mfg Area
149 * Use defined size, sizeof give wrong answer
150 */
151 MfgAreaPtr = (MfgArea *)((char *)MfgAreaPtr + MfgTagLen
152 + MFG_ENTRY_SIZE);
153 MfgAreaLen -= (MfgTagLen + MFG_ENTRY_SIZE);
154 }
155}
156
157/*
158 * Look for "BUS".. Data is not Null terminated.
159 * PHBID of 0xFF indicates PHB was not found in VPD Data.
160 */
161static int __init iSeries_Parse_PhbId(u8 *AreaPtr, int AreaLength)
162{
163 u8 *PhbPtr = AreaPtr;
164 int DataLen = AreaLength;
165 char PhbId = 0xFF;
166
167 while (DataLen > 0) {
168 if ((*PhbPtr == 'B') && (*(PhbPtr + 1) == 'U')
169 && (*(PhbPtr + 2) == 'S')) {
170 PhbPtr += 3;
171 while (*PhbPtr == ' ')
172 ++PhbPtr;
173 PhbId = (*PhbPtr & 0x0F);
174 break;
175 }
176 ++PhbPtr;
177 --DataLen;
178 }
179 return PhbId;
180}
181
182/*
183 * Parse out the VPD Areas
184 */
185static void __init iSeries_Parse_Vpd(u8 *VpdData, int VpdDataLen,
186 HvAgentId agent, u8 *frame, char card[4])
187{
188 u8 *TagPtr = VpdData;
189 int DataLen = VpdDataLen - 3;
190 u8 PhbId;
191
192 while ((*TagPtr != VpdEndOfAreaTag) && (DataLen > 0)) {
193 int AreaLen = *(TagPtr + 1) + (*(TagPtr + 2) * 256);
194 u8 *AreaData = TagPtr + 3;
195
196 if (*TagPtr == VpdIdStringTag)
197 PhbId = iSeries_Parse_PhbId(AreaData, AreaLen);
198 else if (*TagPtr == VpdVendorAreaTag)
199 iSeries_Parse_MfgArea(AreaData, AreaLen,
200 agent, &PhbId, frame, card);
201 /* Point to next Area. */
202 TagPtr = AreaData + AreaLen;
203 DataLen -= AreaLen;
204 }
205}
206
207static void __init iSeries_Get_Location_Code(u16 bus, HvAgentId agent,
208 u8 *frame, char card[4])
209{
210 int BusVpdLen = 0;
211 u8 *BusVpdPtr = kmalloc(BUS_VPDSIZE, GFP_KERNEL);
212
213 if (BusVpdPtr == NULL) {
214 printk("PCI: Bus VPD Buffer allocation failure.\n");
215 return;
216 }
217 BusVpdLen = HvCallPci_getBusVpd(bus, ISERIES_HV_ADDR(BusVpdPtr),
218 BUS_VPDSIZE);
219 if (BusVpdLen == 0) {
220 printk("PCI: Bus VPD Buffer zero length.\n");
221 goto out_free;
222 }
223 /* printk("PCI: BusVpdPtr: %p, %d\n",BusVpdPtr, BusVpdLen); */
224 /* Make sure this is what I think it is */
225 if (*BusVpdPtr != VpdIdStringTag) { /* 0x82 */
226 printk("PCI: Bus VPD Buffer missing starting tag.\n");
227 goto out_free;
228 }
229 iSeries_Parse_Vpd(BusVpdPtr, BusVpdLen, agent, frame, card);
230out_free:
231 kfree(BusVpdPtr);
232}
233
234/*
235 * Prints the device information.
236 * - Pass in pci_dev* pointer to the device.
237 * - Pass in the device count
238 *
239 * Format:
240 * PCI: Bus 0, Device 26, Vendor 0x12AE Frame 1, Card C10 Ethernet
241 * controller
242 */
243void __init iSeries_Device_Information(struct pci_dev *PciDev, int count)
244{
245 struct iSeries_Device_Node *DevNode = PciDev->sysdata;
246 u16 bus;
247 u8 frame;
248 char card[4];
249 HvSubBusNumber subbus;
250 HvAgentId agent;
251
252 if (DevNode == NULL) {
253 printk("%d. PCI: iSeries_Device_Information DevNode is NULL\n",
254 count);
255 return;
256 }
257
258 bus = ISERIES_BUS(DevNode);
259 subbus = ISERIES_SUBBUS(DevNode);
260 agent = ISERIES_PCI_AGENTID(ISERIES_GET_DEVICE_FROM_SUBBUS(subbus),
261 ISERIES_GET_FUNCTION_FROM_SUBBUS(subbus));
262 iSeries_Get_Location_Code(bus, agent, &frame, card);
263
264 printk("%d. PCI: Bus%3d, Device%3d, Vendor %04X Frame%3d, Card %4s ",
265 count, bus, PCI_SLOT(PciDev->devfn), PciDev->vendor,
266 frame, card);
267 printk("0x%04X\n", (int)(PciDev->class >> 8));
268}
diff --git a/arch/ppc64/kernel/iSeries_htab.c b/arch/ppc64/kernel/iSeries_htab.c
deleted file mode 100644
index 073b76661747..000000000000
--- a/arch/ppc64/kernel/iSeries_htab.c
+++ /dev/null
@@ -1,236 +0,0 @@
1/*
2 * iSeries hashtable management.
3 * Derived from pSeries_htab.c
4 *
5 * SMP scalability work:
6 * Copyright (C) 2001 Anton Blanchard <anton@au.ibm.com>, IBM
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
12 */
13#include <asm/machdep.h>
14#include <asm/pgtable.h>
15#include <asm/mmu.h>
16#include <asm/mmu_context.h>
17#include <asm/iSeries/HvCallHpt.h>
18#include <asm/abs_addr.h>
19#include <linux/spinlock.h>
20
21static spinlock_t iSeries_hlocks[64] __cacheline_aligned_in_smp = { [0 ... 63] = SPIN_LOCK_UNLOCKED};
22
23/*
24 * Very primitive algorithm for picking up a lock
25 */
26static inline void iSeries_hlock(unsigned long slot)
27{
28 if (slot & 0x8)
29 slot = ~slot;
30 spin_lock(&iSeries_hlocks[(slot >> 4) & 0x3f]);
31}
32
33static inline void iSeries_hunlock(unsigned long slot)
34{
35 if (slot & 0x8)
36 slot = ~slot;
37 spin_unlock(&iSeries_hlocks[(slot >> 4) & 0x3f]);
38}
39
40static long iSeries_hpte_insert(unsigned long hpte_group, unsigned long va,
41 unsigned long prpn, unsigned long vflags,
42 unsigned long rflags)
43{
44 unsigned long arpn;
45 long slot;
46 hpte_t lhpte;
47 int secondary = 0;
48
49 /*
50 * The hypervisor tries both primary and secondary.
51 * If we are being called to insert in the secondary,
52 * it means we have already tried both primary and secondary,
53 * so we return failure immediately.
54 */
55 if (vflags & HPTE_V_SECONDARY)
56 return -1;
57
58 iSeries_hlock(hpte_group);
59
60 slot = HvCallHpt_findValid(&lhpte, va >> PAGE_SHIFT);
61 BUG_ON(lhpte.v & HPTE_V_VALID);
62
63 if (slot == -1) { /* No available entry found in either group */
64 iSeries_hunlock(hpte_group);
65 return -1;
66 }
67
68 if (slot < 0) { /* MSB set means secondary group */
69 vflags |= HPTE_V_SECONDARY;
70 secondary = 1;
71 slot &= 0x7fffffffffffffff;
72 }
73
74 arpn = phys_to_abs(prpn << PAGE_SHIFT) >> PAGE_SHIFT;
75
76 lhpte.v = (va >> 23) << HPTE_V_AVPN_SHIFT | vflags | HPTE_V_VALID;
77 lhpte.r = (arpn << HPTE_R_RPN_SHIFT) | rflags;
78
79 /* Now fill in the actual HPTE */
80 HvCallHpt_addValidate(slot, secondary, &lhpte);
81
82 iSeries_hunlock(hpte_group);
83
84 return (secondary << 3) | (slot & 7);
85}
86
87static unsigned long iSeries_hpte_getword0(unsigned long slot)
88{
89 hpte_t hpte;
90
91 HvCallHpt_get(&hpte, slot);
92 return hpte.v;
93}
94
95static long iSeries_hpte_remove(unsigned long hpte_group)
96{
97 unsigned long slot_offset;
98 int i;
99 unsigned long hpte_v;
100
101 /* Pick a random slot to start at */
102 slot_offset = mftb() & 0x7;
103
104 iSeries_hlock(hpte_group);
105
106 for (i = 0; i < HPTES_PER_GROUP; i++) {
107 hpte_v = iSeries_hpte_getword0(hpte_group + slot_offset);
108
109 if (! (hpte_v & HPTE_V_BOLTED)) {
110 HvCallHpt_invalidateSetSwBitsGet(hpte_group +
111 slot_offset, 0, 0);
112 iSeries_hunlock(hpte_group);
113 return i;
114 }
115
116 slot_offset++;
117 slot_offset &= 0x7;
118 }
119
120 iSeries_hunlock(hpte_group);
121
122 return -1;
123}
124
125/*
126 * The HyperVisor expects the "flags" argument in this form:
127 * bits 0..59 : reserved
128 * bit 60 : N
129 * bits 61..63 : PP2,PP1,PP0
130 */
131static long iSeries_hpte_updatepp(unsigned long slot, unsigned long newpp,
132 unsigned long va, int large, int local)
133{
134 hpte_t hpte;
135 unsigned long avpn = va >> 23;
136
137 iSeries_hlock(slot);
138
139 HvCallHpt_get(&hpte, slot);
140 if ((HPTE_V_AVPN_VAL(hpte.v) == avpn) && (hpte.v & HPTE_V_VALID)) {
141 /*
142 * Hypervisor expects bits as NPPP, which is
143 * different from how they are mapped in our PP.
144 */
145 HvCallHpt_setPp(slot, (newpp & 0x3) | ((newpp & 0x4) << 1));
146 iSeries_hunlock(slot);
147 return 0;
148 }
149 iSeries_hunlock(slot);
150
151 return -1;
152}
153
154/*
155 * Functions used to find the PTE for a particular virtual address.
156 * Only used during boot when bolting pages.
157 *
158 * Input : vpn : virtual page number
159 * Output: PTE index within the page table of the entry
160 * -1 on failure
161 */
162static long iSeries_hpte_find(unsigned long vpn)
163{
164 hpte_t hpte;
165 long slot;
166
167 /*
168 * The HvCallHpt_findValid interface is as follows:
169 * 0xffffffffffffffff : No entry found.
170 * 0x00000000xxxxxxxx : Entry found in primary group, slot x
171 * 0x80000000xxxxxxxx : Entry found in secondary group, slot x
172 */
173 slot = HvCallHpt_findValid(&hpte, vpn);
174 if (hpte.v & HPTE_V_VALID) {
175 if (slot < 0) {
176 slot &= 0x7fffffffffffffff;
177 slot = -slot;
178 }
179 } else
180 slot = -1;
181 return slot;
182}
183
184/*
185 * Update the page protection bits. Intended to be used to create
186 * guard pages for kernel data structures on pages which are bolted
187 * in the HPT. Assumes pages being operated on will not be stolen.
188 * Does not work on large pages.
189 *
190 * No need to lock here because we should be the only user.
191 */
192static void iSeries_hpte_updateboltedpp(unsigned long newpp, unsigned long ea)
193{
194 unsigned long vsid,va,vpn;
195 long slot;
196
197 vsid = get_kernel_vsid(ea);
198 va = (vsid << 28) | (ea & 0x0fffffff);
199 vpn = va >> PAGE_SHIFT;
200 slot = iSeries_hpte_find(vpn);
201 if (slot == -1)
202 panic("updateboltedpp: Could not find page to bolt\n");
203 HvCallHpt_setPp(slot, newpp);
204}
205
206static void iSeries_hpte_invalidate(unsigned long slot, unsigned long va,
207 int large, int local)
208{
209 unsigned long hpte_v;
210 unsigned long avpn = va >> 23;
211 unsigned long flags;
212
213 local_irq_save(flags);
214
215 iSeries_hlock(slot);
216
217 hpte_v = iSeries_hpte_getword0(slot);
218
219 if ((HPTE_V_AVPN_VAL(hpte_v) == avpn) && (hpte_v & HPTE_V_VALID))
220 HvCallHpt_invalidateSetSwBitsGet(slot, 0, 0);
221
222 iSeries_hunlock(slot);
223
224 local_irq_restore(flags);
225}
226
227void hpte_init_iSeries(void)
228{
229 ppc_md.hpte_invalidate = iSeries_hpte_invalidate;
230 ppc_md.hpte_updatepp = iSeries_hpte_updatepp;
231 ppc_md.hpte_updateboltedpp = iSeries_hpte_updateboltedpp;
232 ppc_md.hpte_insert = iSeries_hpte_insert;
233 ppc_md.hpte_remove = iSeries_hpte_remove;
234
235 htab_finish_init();
236}
diff --git a/arch/ppc64/kernel/iSeries_iommu.c b/arch/ppc64/kernel/iSeries_iommu.c
deleted file mode 100644
index f8ff1bb054dc..000000000000
--- a/arch/ppc64/kernel/iSeries_iommu.c
+++ /dev/null
@@ -1,176 +0,0 @@
1/*
2 * arch/ppc64/kernel/iSeries_iommu.c
3 *
4 * Copyright (C) 2001 Mike Corrigan & Dave Engebretsen, IBM Corporation
5 *
6 * Rewrite, cleanup:
7 *
8 * Copyright (C) 2004 Olof Johansson <olof@austin.ibm.com>, IBM Corporation
9 *
10 * Dynamic DMA mapping support, iSeries-specific parts.
11 *
12 *
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License as published by
15 * the Free Software Foundation; either version 2 of the License, or
16 * (at your option) any later version.
17 *
18 * This program is distributed in the hope that it will be useful,
19 * but WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 * GNU General Public License for more details.
22 *
23 * You should have received a copy of the GNU General Public License
24 * along with this program; if not, write to the Free Software
25 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
26 */
27
28#include <linux/types.h>
29#include <linux/dma-mapping.h>
30#include <linux/list.h>
31
32#include <asm/iommu.h>
33#include <asm/machdep.h>
34#include <asm/iSeries/HvCallXm.h>
35#include <asm/iSeries/iSeries_pci.h>
36
37extern struct list_head iSeries_Global_Device_List;
38
39
40static void tce_build_iSeries(struct iommu_table *tbl, long index, long npages,
41 unsigned long uaddr, enum dma_data_direction direction)
42{
43 u64 rc;
44 union tce_entry tce;
45
46 while (npages--) {
47 tce.te_word = 0;
48 tce.te_bits.tb_rpn = virt_to_abs(uaddr) >> PAGE_SHIFT;
49
50 if (tbl->it_type == TCE_VB) {
51 /* Virtual Bus */
52 tce.te_bits.tb_valid = 1;
53 tce.te_bits.tb_allio = 1;
54 if (direction != DMA_TO_DEVICE)
55 tce.te_bits.tb_rdwr = 1;
56 } else {
57 /* PCI Bus */
58 tce.te_bits.tb_rdwr = 1; /* Read allowed */
59 if (direction != DMA_TO_DEVICE)
60 tce.te_bits.tb_pciwr = 1;
61 }
62
63 rc = HvCallXm_setTce((u64)tbl->it_index, (u64)index,
64 tce.te_word);
65 if (rc)
66 panic("PCI_DMA: HvCallXm_setTce failed, Rc: 0x%lx\n",
67 rc);
68 index++;
69 uaddr += PAGE_SIZE;
70 }
71}
72
73static void tce_free_iSeries(struct iommu_table *tbl, long index, long npages)
74{
75 u64 rc;
76
77 while (npages--) {
78 rc = HvCallXm_setTce((u64)tbl->it_index, (u64)index, 0);
79 if (rc)
80 panic("PCI_DMA: HvCallXm_setTce failed, Rc: 0x%lx\n",
81 rc);
82 index++;
83 }
84}
85
86#ifdef CONFIG_PCI
87/*
88 * This function compares the known tables to find an iommu_table
89 * that has already been built for hardware TCEs.
90 */
91static struct iommu_table *iommu_table_find(struct iommu_table * tbl)
92{
93 struct iSeries_Device_Node *dp;
94
95 list_for_each_entry(dp, &iSeries_Global_Device_List, Device_List) {
96 if ((dp->iommu_table != NULL) &&
97 (dp->iommu_table->it_type == TCE_PCI) &&
98 (dp->iommu_table->it_offset == tbl->it_offset) &&
99 (dp->iommu_table->it_index == tbl->it_index) &&
100 (dp->iommu_table->it_size == tbl->it_size))
101 return dp->iommu_table;
102 }
103 return NULL;
104}
105
106/*
107 * Call Hv with the architected data structure to get TCE table info.
108 * info. Put the returned data into the Linux representation of the
109 * TCE table data.
110 * The Hardware Tce table comes in three flavors.
111 * 1. TCE table shared between Buses.
112 * 2. TCE table per Bus.
113 * 3. TCE Table per IOA.
114 */
115static void iommu_table_getparms(struct iSeries_Device_Node* dn,
116 struct iommu_table* tbl)
117{
118 struct iommu_table_cb *parms;
119
120 parms = kmalloc(sizeof(*parms), GFP_KERNEL);
121 if (parms == NULL)
122 panic("PCI_DMA: TCE Table Allocation failed.");
123
124 memset(parms, 0, sizeof(*parms));
125
126 parms->itc_busno = ISERIES_BUS(dn);
127 parms->itc_slotno = dn->LogicalSlot;
128 parms->itc_virtbus = 0;
129
130 HvCallXm_getTceTableParms(ISERIES_HV_ADDR(parms));
131
132 if (parms->itc_size == 0)
133 panic("PCI_DMA: parms->size is zero, parms is 0x%p", parms);
134
135 /* itc_size is in pages worth of table, it_size is in # of entries */
136 tbl->it_size = (parms->itc_size * PAGE_SIZE) / sizeof(union tce_entry);
137 tbl->it_busno = parms->itc_busno;
138 tbl->it_offset = parms->itc_offset;
139 tbl->it_index = parms->itc_index;
140 tbl->it_blocksize = 1;
141 tbl->it_type = TCE_PCI;
142
143 kfree(parms);
144}
145
146
147void iommu_devnode_init_iSeries(struct iSeries_Device_Node *dn)
148{
149 struct iommu_table *tbl;
150
151 tbl = kmalloc(sizeof(struct iommu_table), GFP_KERNEL);
152
153 iommu_table_getparms(dn, tbl);
154
155 /* Look for existing tce table */
156 dn->iommu_table = iommu_table_find(tbl);
157 if (dn->iommu_table == NULL)
158 dn->iommu_table = iommu_init_table(tbl);
159 else
160 kfree(tbl);
161}
162#endif
163
164static void iommu_dev_setup_iSeries(struct pci_dev *dev) { }
165static void iommu_bus_setup_iSeries(struct pci_bus *bus) { }
166
167void iommu_init_early_iSeries(void)
168{
169 ppc_md.tce_build = tce_build_iSeries;
170 ppc_md.tce_free = tce_free_iSeries;
171
172 ppc_md.iommu_dev_setup = iommu_dev_setup_iSeries;
173 ppc_md.iommu_bus_setup = iommu_bus_setup_iSeries;
174
175 pci_iommu_init();
176}
diff --git a/arch/ppc64/kernel/iSeries_irq.c b/arch/ppc64/kernel/iSeries_irq.c
deleted file mode 100644
index 77376c1bd611..000000000000
--- a/arch/ppc64/kernel/iSeries_irq.c
+++ /dev/null
@@ -1,353 +0,0 @@
1/*
2 * This module supports the iSeries PCI bus interrupt handling
3 * Copyright (C) 20yy <Robert L Holtorf> <IBM Corp>
4 * Copyright (C) 2004-2005 IBM Corporation
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the:
18 * Free Software Foundation, Inc.,
19 * 59 Temple Place, Suite 330,
20 * Boston, MA 02111-1307 USA
21 *
22 * Change Activity:
23 * Created, December 13, 2000 by Wayne Holm
24 * End Change Activity
25 */
26#include <linux/config.h>
27#include <linux/pci.h>
28#include <linux/init.h>
29#include <linux/threads.h>
30#include <linux/smp.h>
31#include <linux/param.h>
32#include <linux/string.h>
33#include <linux/bootmem.h>
34#include <linux/ide.h>
35#include <linux/irq.h>
36#include <linux/spinlock.h>
37
38#include <asm/ppcdebug.h>
39#include <asm/iSeries/HvTypes.h>
40#include <asm/iSeries/HvLpEvent.h>
41#include <asm/iSeries/HvCallPci.h>
42#include <asm/iSeries/HvCallXm.h>
43#include <asm/iSeries/iSeries_irq.h>
44
45/* This maps virtual irq numbers to real irqs */
46unsigned int virt_irq_to_real_map[NR_IRQS];
47
48/* The next available virtual irq number */
49/* Note: the pcnet32 driver assumes irq numbers < 2 aren't valid. :( */
50static int next_virtual_irq = 2;
51
52static long Pci_Interrupt_Count;
53static long Pci_Event_Count;
54
55enum XmPciLpEvent_Subtype {
56 XmPciLpEvent_BusCreated = 0, // PHB has been created
57 XmPciLpEvent_BusError = 1, // PHB has failed
58 XmPciLpEvent_BusFailed = 2, // Msg to Secondary, Primary failed bus
59 XmPciLpEvent_NodeFailed = 4, // Multi-adapter bridge has failed
60 XmPciLpEvent_NodeRecovered = 5, // Multi-adapter bridge has recovered
61 XmPciLpEvent_BusRecovered = 12, // PHB has been recovered
62 XmPciLpEvent_UnQuiesceBus = 18, // Secondary bus unqiescing
63 XmPciLpEvent_BridgeError = 21, // Bridge Error
64 XmPciLpEvent_SlotInterrupt = 22 // Slot interrupt
65};
66
67struct XmPciLpEvent_BusInterrupt {
68 HvBusNumber busNumber;
69 HvSubBusNumber subBusNumber;
70};
71
72struct XmPciLpEvent_NodeInterrupt {
73 HvBusNumber busNumber;
74 HvSubBusNumber subBusNumber;
75 HvAgentId deviceId;
76};
77
78struct XmPciLpEvent {
79 struct HvLpEvent hvLpEvent;
80
81 union {
82 u64 alignData; // Align on an 8-byte boundary
83
84 struct {
85 u32 fisr;
86 HvBusNumber busNumber;
87 HvSubBusNumber subBusNumber;
88 HvAgentId deviceId;
89 } slotInterrupt;
90
91 struct XmPciLpEvent_BusInterrupt busFailed;
92 struct XmPciLpEvent_BusInterrupt busRecovered;
93 struct XmPciLpEvent_BusInterrupt busCreated;
94
95 struct XmPciLpEvent_NodeInterrupt nodeFailed;
96 struct XmPciLpEvent_NodeInterrupt nodeRecovered;
97
98 } eventData;
99
100};
101
102static void intReceived(struct XmPciLpEvent *eventParm,
103 struct pt_regs *regsParm)
104{
105 int irq;
106
107 ++Pci_Interrupt_Count;
108
109 switch (eventParm->hvLpEvent.xSubtype) {
110 case XmPciLpEvent_SlotInterrupt:
111 irq = eventParm->hvLpEvent.xCorrelationToken;
112 /* Dispatch the interrupt handlers for this irq */
113 ppc_irq_dispatch_handler(regsParm, irq);
114 HvCallPci_eoi(eventParm->eventData.slotInterrupt.busNumber,
115 eventParm->eventData.slotInterrupt.subBusNumber,
116 eventParm->eventData.slotInterrupt.deviceId);
117 break;
118 /* Ignore error recovery events for now */
119 case XmPciLpEvent_BusCreated:
120 printk(KERN_INFO "intReceived: system bus %d created\n",
121 eventParm->eventData.busCreated.busNumber);
122 break;
123 case XmPciLpEvent_BusError:
124 case XmPciLpEvent_BusFailed:
125 printk(KERN_INFO "intReceived: system bus %d failed\n",
126 eventParm->eventData.busFailed.busNumber);
127 break;
128 case XmPciLpEvent_BusRecovered:
129 case XmPciLpEvent_UnQuiesceBus:
130 printk(KERN_INFO "intReceived: system bus %d recovered\n",
131 eventParm->eventData.busRecovered.busNumber);
132 break;
133 case XmPciLpEvent_NodeFailed:
134 case XmPciLpEvent_BridgeError:
135 printk(KERN_INFO
136 "intReceived: multi-adapter bridge %d/%d/%d failed\n",
137 eventParm->eventData.nodeFailed.busNumber,
138 eventParm->eventData.nodeFailed.subBusNumber,
139 eventParm->eventData.nodeFailed.deviceId);
140 break;
141 case XmPciLpEvent_NodeRecovered:
142 printk(KERN_INFO
143 "intReceived: multi-adapter bridge %d/%d/%d recovered\n",
144 eventParm->eventData.nodeRecovered.busNumber,
145 eventParm->eventData.nodeRecovered.subBusNumber,
146 eventParm->eventData.nodeRecovered.deviceId);
147 break;
148 default:
149 printk(KERN_ERR
150 "intReceived: unrecognized event subtype 0x%x\n",
151 eventParm->hvLpEvent.xSubtype);
152 break;
153 }
154}
155
156static void XmPciLpEvent_handler(struct HvLpEvent *eventParm,
157 struct pt_regs *regsParm)
158{
159#ifdef CONFIG_PCI
160 ++Pci_Event_Count;
161
162 if (eventParm && (eventParm->xType == HvLpEvent_Type_PciIo)) {
163 switch (eventParm->xFlags.xFunction) {
164 case HvLpEvent_Function_Int:
165 intReceived((struct XmPciLpEvent *)eventParm, regsParm);
166 break;
167 case HvLpEvent_Function_Ack:
168 printk(KERN_ERR
169 "XmPciLpEvent_handler: unexpected ack received\n");
170 break;
171 default:
172 printk(KERN_ERR
173 "XmPciLpEvent_handler: unexpected event function %d\n",
174 (int)eventParm->xFlags.xFunction);
175 break;
176 }
177 } else if (eventParm)
178 printk(KERN_ERR
179 "XmPciLpEvent_handler: Unrecognized PCI event type 0x%x\n",
180 (int)eventParm->xType);
181 else
182 printk(KERN_ERR "XmPciLpEvent_handler: NULL event received\n");
183#endif
184}
185
186/*
187 * This is called by init_IRQ. set in ppc_md.init_IRQ by iSeries_setup.c
188 * It must be called before the bus walk.
189 */
190void __init iSeries_init_IRQ(void)
191{
192 /* Register PCI event handler and open an event path */
193 int xRc;
194
195 xRc = HvLpEvent_registerHandler(HvLpEvent_Type_PciIo,
196 &XmPciLpEvent_handler);
197 if (xRc == 0) {
198 xRc = HvLpEvent_openPath(HvLpEvent_Type_PciIo, 0);
199 if (xRc != 0)
200 printk(KERN_ERR "iSeries_init_IRQ: open event path "
201 "failed with rc 0x%x\n", xRc);
202 } else
203 printk(KERN_ERR "iSeries_init_IRQ: register handler "
204 "failed with rc 0x%x\n", xRc);
205}
206
207#define REAL_IRQ_TO_BUS(irq) ((((irq) >> 6) & 0xff) + 1)
208#define REAL_IRQ_TO_IDSEL(irq) ((((irq) >> 3) & 7) + 1)
209#define REAL_IRQ_TO_FUNC(irq) ((irq) & 7)
210
211/*
212 * This will be called by device drivers (via enable_IRQ)
213 * to enable INTA in the bridge interrupt status register.
214 */
215static void iSeries_enable_IRQ(unsigned int irq)
216{
217 u32 bus, deviceId, function, mask;
218 const u32 subBus = 0;
219 unsigned int rirq = virt_irq_to_real_map[irq];
220
221 /* The IRQ has already been locked by the caller */
222 bus = REAL_IRQ_TO_BUS(rirq);
223 function = REAL_IRQ_TO_FUNC(rirq);
224 deviceId = (REAL_IRQ_TO_IDSEL(rirq) << 4) + function;
225
226 /* Unmask secondary INTA */
227 mask = 0x80000000;
228 HvCallPci_unmaskInterrupts(bus, subBus, deviceId, mask);
229 PPCDBG(PPCDBG_BUSWALK, "iSeries_enable_IRQ 0x%02X.%02X.%02X 0x%04X\n",
230 bus, subBus, deviceId, irq);
231}
232
233/* This is called by iSeries_activate_IRQs */
234static unsigned int iSeries_startup_IRQ(unsigned int irq)
235{
236 u32 bus, deviceId, function, mask;
237 const u32 subBus = 0;
238 unsigned int rirq = virt_irq_to_real_map[irq];
239
240 bus = REAL_IRQ_TO_BUS(rirq);
241 function = REAL_IRQ_TO_FUNC(rirq);
242 deviceId = (REAL_IRQ_TO_IDSEL(rirq) << 4) + function;
243
244 /* Link the IRQ number to the bridge */
245 HvCallXm_connectBusUnit(bus, subBus, deviceId, irq);
246
247 /* Unmask bridge interrupts in the FISR */
248 mask = 0x01010000 << function;
249 HvCallPci_unmaskFisr(bus, subBus, deviceId, mask);
250 iSeries_enable_IRQ(irq);
251 return 0;
252}
253
254/*
255 * This is called out of iSeries_fixup to activate interrupt
256 * generation for usable slots
257 */
258void __init iSeries_activate_IRQs()
259{
260 int irq;
261 unsigned long flags;
262
263 for_each_irq (irq) {
264 irq_desc_t *desc = get_irq_desc(irq);
265
266 if (desc && desc->handler && desc->handler->startup) {
267 spin_lock_irqsave(&desc->lock, flags);
268 desc->handler->startup(irq);
269 spin_unlock_irqrestore(&desc->lock, flags);
270 }
271 }
272}
273
274/* this is not called anywhere currently */
275static void iSeries_shutdown_IRQ(unsigned int irq)
276{
277 u32 bus, deviceId, function, mask;
278 const u32 subBus = 0;
279 unsigned int rirq = virt_irq_to_real_map[irq];
280
281 /* irq should be locked by the caller */
282 bus = REAL_IRQ_TO_BUS(rirq);
283 function = REAL_IRQ_TO_FUNC(rirq);
284 deviceId = (REAL_IRQ_TO_IDSEL(rirq) << 4) + function;
285
286 /* Invalidate the IRQ number in the bridge */
287 HvCallXm_connectBusUnit(bus, subBus, deviceId, 0);
288
289 /* Mask bridge interrupts in the FISR */
290 mask = 0x01010000 << function;
291 HvCallPci_maskFisr(bus, subBus, deviceId, mask);
292}
293
294/*
295 * This will be called by device drivers (via disable_IRQ)
296 * to disable INTA in the bridge interrupt status register.
297 */
298static void iSeries_disable_IRQ(unsigned int irq)
299{
300 u32 bus, deviceId, function, mask;
301 const u32 subBus = 0;
302 unsigned int rirq = virt_irq_to_real_map[irq];
303
304 /* The IRQ has already been locked by the caller */
305 bus = REAL_IRQ_TO_BUS(rirq);
306 function = REAL_IRQ_TO_FUNC(rirq);
307 deviceId = (REAL_IRQ_TO_IDSEL(rirq) << 4) + function;
308
309 /* Mask secondary INTA */
310 mask = 0x80000000;
311 HvCallPci_maskInterrupts(bus, subBus, deviceId, mask);
312 PPCDBG(PPCDBG_BUSWALK, "iSeries_disable_IRQ 0x%02X.%02X.%02X 0x%04X\n",
313 bus, subBus, deviceId, irq);
314}
315
316/*
317 * Need to define this so ppc_irq_dispatch_handler will NOT call
318 * enable_IRQ at the end of interrupt handling. However, this does
319 * nothing because there is not enough information provided to do
320 * the EOI HvCall. This is done by XmPciLpEvent.c
321 */
322static void iSeries_end_IRQ(unsigned int irq)
323{
324}
325
326static hw_irq_controller iSeries_IRQ_handler = {
327 .typename = "iSeries irq controller",
328 .startup = iSeries_startup_IRQ,
329 .shutdown = iSeries_shutdown_IRQ,
330 .enable = iSeries_enable_IRQ,
331 .disable = iSeries_disable_IRQ,
332 .end = iSeries_end_IRQ
333};
334
335/*
336 * This is called out of iSeries_scan_slot to allocate an IRQ for an EADS slot
337 * It calculates the irq value for the slot.
338 * Note that subBusNumber is always 0 (at the moment at least).
339 */
340int __init iSeries_allocate_IRQ(HvBusNumber busNumber,
341 HvSubBusNumber subBusNumber, HvAgentId deviceId)
342{
343 unsigned int realirq, virtirq;
344 u8 idsel = (deviceId >> 4);
345 u8 function = deviceId & 7;
346
347 virtirq = next_virtual_irq++;
348 realirq = ((busNumber - 1) << 6) + ((idsel - 1) << 3) + function;
349 virt_irq_to_real_map[virtirq] = realirq;
350
351 irq_desc[virtirq].handler = &iSeries_IRQ_handler;
352 return virtirq;
353}
diff --git a/arch/ppc64/kernel/iSeries_pci.c b/arch/ppc64/kernel/iSeries_pci.c
deleted file mode 100644
index fbc273c32bcc..000000000000
--- a/arch/ppc64/kernel/iSeries_pci.c
+++ /dev/null
@@ -1,905 +0,0 @@
1/*
2 * iSeries_pci.c
3 *
4 * Copyright (C) 2001 Allan Trautman, IBM Corporation
5 *
6 * iSeries specific routines for PCI.
7 *
8 * Based on code from pci.c and iSeries_pci.c 32bit
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 */
24#include <linux/kernel.h>
25#include <linux/list.h>
26#include <linux/string.h>
27#include <linux/init.h>
28#include <linux/module.h>
29#include <linux/ide.h>
30#include <linux/pci.h>
31
32#include <asm/io.h>
33#include <asm/irq.h>
34#include <asm/prom.h>
35#include <asm/machdep.h>
36#include <asm/pci-bridge.h>
37#include <asm/ppcdebug.h>
38#include <asm/iommu.h>
39
40#include <asm/iSeries/HvCallPci.h>
41#include <asm/iSeries/HvCallXm.h>
42#include <asm/iSeries/iSeries_irq.h>
43#include <asm/iSeries/iSeries_pci.h>
44#include <asm/iSeries/mf.h>
45
46#include "pci.h"
47
48extern unsigned long io_page_mask;
49
50/*
51 * Forward declares of prototypes.
52 */
53static struct iSeries_Device_Node *find_Device_Node(int bus, int devfn);
54static void scan_PHB_slots(struct pci_controller *Phb);
55static void scan_EADS_bridge(HvBusNumber Bus, HvSubBusNumber SubBus, int IdSel);
56static int scan_bridge_slot(HvBusNumber Bus, struct HvCallPci_BridgeInfo *Info);
57
58LIST_HEAD(iSeries_Global_Device_List);
59
60static int DeviceCount;
61
62/* Counters and control flags. */
63static long Pci_Io_Read_Count;
64static long Pci_Io_Write_Count;
65#if 0
66static long Pci_Cfg_Read_Count;
67static long Pci_Cfg_Write_Count;
68#endif
69static long Pci_Error_Count;
70
71static int Pci_Retry_Max = 3; /* Only retry 3 times */
72static int Pci_Error_Flag = 1; /* Set Retry Error on. */
73
74static struct pci_ops iSeries_pci_ops;
75
76/*
77 * Table defines
78 * Each Entry size is 4 MB * 1024 Entries = 4GB I/O address space.
79 */
80#define IOMM_TABLE_MAX_ENTRIES 1024
81#define IOMM_TABLE_ENTRY_SIZE 0x0000000000400000UL
82#define BASE_IO_MEMORY 0xE000000000000000UL
83
84static unsigned long max_io_memory = 0xE000000000000000UL;
85static long current_iomm_table_entry;
86
87/*
88 * Lookup Tables.
89 */
90static struct iSeries_Device_Node **iomm_table;
91static u8 *iobar_table;
92
93/*
94 * Static and Global variables
95 */
96static char *pci_io_text = "iSeries PCI I/O";
97static DEFINE_SPINLOCK(iomm_table_lock);
98
99/*
100 * iomm_table_initialize
101 *
102 * Allocates and initalizes the Address Translation Table and Bar
103 * Tables to get them ready for use. Must be called before any
104 * I/O space is handed out to the device BARs.
105 */
106static void iomm_table_initialize(void)
107{
108 spin_lock(&iomm_table_lock);
109 iomm_table = kmalloc(sizeof(*iomm_table) * IOMM_TABLE_MAX_ENTRIES,
110 GFP_KERNEL);
111 iobar_table = kmalloc(sizeof(*iobar_table) * IOMM_TABLE_MAX_ENTRIES,
112 GFP_KERNEL);
113 spin_unlock(&iomm_table_lock);
114 if ((iomm_table == NULL) || (iobar_table == NULL))
115 panic("PCI: I/O tables allocation failed.\n");
116}
117
118/*
119 * iomm_table_allocate_entry
120 *
121 * Adds pci_dev entry in address translation table
122 *
123 * - Allocates the number of entries required in table base on BAR
124 * size.
125 * - Allocates starting at BASE_IO_MEMORY and increases.
126 * - The size is round up to be a multiple of entry size.
127 * - CurrentIndex is incremented to keep track of the last entry.
128 * - Builds the resource entry for allocated BARs.
129 */
130static void iomm_table_allocate_entry(struct pci_dev *dev, int bar_num)
131{
132 struct resource *bar_res = &dev->resource[bar_num];
133 long bar_size = pci_resource_len(dev, bar_num);
134
135 /*
136 * No space to allocate, quick exit, skip Allocation.
137 */
138 if (bar_size == 0)
139 return;
140 /*
141 * Set Resource values.
142 */
143 spin_lock(&iomm_table_lock);
144 bar_res->name = pci_io_text;
145 bar_res->start =
146 IOMM_TABLE_ENTRY_SIZE * current_iomm_table_entry;
147 bar_res->start += BASE_IO_MEMORY;
148 bar_res->end = bar_res->start + bar_size - 1;
149 /*
150 * Allocate the number of table entries needed for BAR.
151 */
152 while (bar_size > 0 ) {
153 iomm_table[current_iomm_table_entry] = dev->sysdata;
154 iobar_table[current_iomm_table_entry] = bar_num;
155 bar_size -= IOMM_TABLE_ENTRY_SIZE;
156 ++current_iomm_table_entry;
157 }
158 max_io_memory = BASE_IO_MEMORY +
159 (IOMM_TABLE_ENTRY_SIZE * current_iomm_table_entry);
160 spin_unlock(&iomm_table_lock);
161}
162
163/*
164 * allocate_device_bars
165 *
166 * - Allocates ALL pci_dev BAR's and updates the resources with the
167 * BAR value. BARS with zero length will have the resources
168 * The HvCallPci_getBarParms is used to get the size of the BAR
169 * space. It calls iomm_table_allocate_entry to allocate
170 * each entry.
171 * - Loops through The Bar resources(0 - 5) including the ROM
172 * is resource(6).
173 */
174static void allocate_device_bars(struct pci_dev *dev)
175{
176 struct resource *bar_res;
177 int bar_num;
178
179 for (bar_num = 0; bar_num <= PCI_ROM_RESOURCE; ++bar_num) {
180 bar_res = &dev->resource[bar_num];
181 iomm_table_allocate_entry(dev, bar_num);
182 }
183}
184
185/*
186 * Log error information to system console.
187 * Filter out the device not there errors.
188 * PCI: EADs Connect Failed 0x18.58.10 Rc: 0x00xx
189 * PCI: Read Vendor Failed 0x18.58.10 Rc: 0x00xx
190 * PCI: Connect Bus Unit Failed 0x18.58.10 Rc: 0x00xx
191 */
192static void pci_Log_Error(char *Error_Text, int Bus, int SubBus,
193 int AgentId, int HvRc)
194{
195 if (HvRc == 0x0302)
196 return;
197 printk(KERN_ERR "PCI: %s Failed: 0x%02X.%02X.%02X Rc: 0x%04X",
198 Error_Text, Bus, SubBus, AgentId, HvRc);
199}
200
201/*
202 * build_device_node(u16 Bus, int SubBus, u8 DevFn)
203 */
204static struct iSeries_Device_Node *build_device_node(HvBusNumber Bus,
205 HvSubBusNumber SubBus, int AgentId, int Function)
206{
207 struct iSeries_Device_Node *node;
208
209 PPCDBG(PPCDBG_BUSWALK,
210 "-build_device_node 0x%02X.%02X.%02X Function: %02X\n",
211 Bus, SubBus, AgentId, Function);
212
213 node = kmalloc(sizeof(struct iSeries_Device_Node), GFP_KERNEL);
214 if (node == NULL)
215 return NULL;
216
217 memset(node, 0, sizeof(struct iSeries_Device_Node));
218 list_add_tail(&node->Device_List, &iSeries_Global_Device_List);
219#if 0
220 node->DsaAddr = ((u64)Bus << 48) + ((u64)SubBus << 40) + ((u64)0x10 << 32);
221#endif
222 node->DsaAddr.DsaAddr = 0;
223 node->DsaAddr.Dsa.busNumber = Bus;
224 node->DsaAddr.Dsa.subBusNumber = SubBus;
225 node->DsaAddr.Dsa.deviceId = 0x10;
226 node->DevFn = PCI_DEVFN(ISERIES_ENCODE_DEVICE(AgentId), Function);
227 return node;
228}
229
230/*
231 * unsigned long __init find_and_init_phbs(void)
232 *
233 * Description:
234 * This function checks for all possible system PCI host bridges that connect
235 * PCI buses. The system hypervisor is queried as to the guest partition
236 * ownership status. A pci_controller is built for any bus which is partially
237 * owned or fully owned by this guest partition.
238 */
239unsigned long __init find_and_init_phbs(void)
240{
241 struct pci_controller *phb;
242 HvBusNumber bus;
243
244 PPCDBG(PPCDBG_BUSWALK, "find_and_init_phbs Entry\n");
245
246 /* Check all possible buses. */
247 for (bus = 0; bus < 256; bus++) {
248 int ret = HvCallXm_testBus(bus);
249 if (ret == 0) {
250 printk("bus %d appears to exist\n", bus);
251
252 phb = (struct pci_controller *)kmalloc(sizeof(struct pci_controller), GFP_KERNEL);
253 if (phb == NULL)
254 return -ENOMEM;
255 pci_setup_pci_controller(phb);
256
257 phb->pci_mem_offset = phb->local_number = bus;
258 phb->first_busno = bus;
259 phb->last_busno = bus;
260 phb->ops = &iSeries_pci_ops;
261
262 PPCDBG(PPCDBG_BUSWALK, "PCI:Create iSeries pci_controller(%p), Bus: %04X\n",
263 phb, bus);
264
265 /* Find and connect the devices. */
266 scan_PHB_slots(phb);
267 }
268 /*
269 * Check for Unexpected Return code, a clue that something
270 * has gone wrong.
271 */
272 else if (ret != 0x0301)
273 printk(KERN_ERR "Unexpected Return on Probe(0x%04X): 0x%04X",
274 bus, ret);
275 }
276 return 0;
277}
278
279/*
280 * iSeries_pcibios_init
281 *
282 * Chance to initialize and structures or variable before PCI Bus walk.
283 */
284void iSeries_pcibios_init(void)
285{
286 PPCDBG(PPCDBG_BUSWALK, "iSeries_pcibios_init Entry.\n");
287 iomm_table_initialize();
288 find_and_init_phbs();
289 io_page_mask = -1;
290 PPCDBG(PPCDBG_BUSWALK, "iSeries_pcibios_init Exit.\n");
291}
292
293/*
294 * iSeries_pci_final_fixup(void)
295 */
296void __init iSeries_pci_final_fixup(void)
297{
298 struct pci_dev *pdev = NULL;
299 struct iSeries_Device_Node *node;
300 int DeviceCount = 0;
301
302 PPCDBG(PPCDBG_BUSWALK, "iSeries_pcibios_fixup Entry.\n");
303
304 /* Fix up at the device node and pci_dev relationship */
305 mf_display_src(0xC9000100);
306
307 printk("pcibios_final_fixup\n");
308 for_each_pci_dev(pdev) {
309 node = find_Device_Node(pdev->bus->number, pdev->devfn);
310 printk("pci dev %p (%x.%x), node %p\n", pdev,
311 pdev->bus->number, pdev->devfn, node);
312
313 if (node != NULL) {
314 ++DeviceCount;
315 pdev->sysdata = (void *)node;
316 node->PciDev = pdev;
317 PPCDBG(PPCDBG_BUSWALK,
318 "pdev 0x%p <==> DevNode 0x%p\n",
319 pdev, node);
320 allocate_device_bars(pdev);
321 iSeries_Device_Information(pdev, DeviceCount);
322 iommu_devnode_init_iSeries(node);
323 } else
324 printk("PCI: Device Tree not found for 0x%016lX\n",
325 (unsigned long)pdev);
326 pdev->irq = node->Irq;
327 }
328 iSeries_activate_IRQs();
329 mf_display_src(0xC9000200);
330}
331
332void pcibios_fixup_bus(struct pci_bus *PciBus)
333{
334 PPCDBG(PPCDBG_BUSWALK, "iSeries_pcibios_fixup_bus(0x%04X) Entry.\n",
335 PciBus->number);
336}
337
338void pcibios_fixup_resources(struct pci_dev *pdev)
339{
340 PPCDBG(PPCDBG_BUSWALK, "fixup_resources pdev %p\n", pdev);
341}
342
343/*
344 * Loop through each node function to find usable EADs bridges.
345 */
346static void scan_PHB_slots(struct pci_controller *Phb)
347{
348 struct HvCallPci_DeviceInfo *DevInfo;
349 HvBusNumber bus = Phb->local_number; /* System Bus */
350 const HvSubBusNumber SubBus = 0; /* EADs is always 0. */
351 int HvRc = 0;
352 int IdSel;
353 const int MaxAgents = 8;
354
355 DevInfo = (struct HvCallPci_DeviceInfo*)
356 kmalloc(sizeof(struct HvCallPci_DeviceInfo), GFP_KERNEL);
357 if (DevInfo == NULL)
358 return;
359
360 /*
361 * Probe for EADs Bridges
362 */
363 for (IdSel = 1; IdSel < MaxAgents; ++IdSel) {
364 HvRc = HvCallPci_getDeviceInfo(bus, SubBus, IdSel,
365 ISERIES_HV_ADDR(DevInfo),
366 sizeof(struct HvCallPci_DeviceInfo));
367 if (HvRc == 0) {
368 if (DevInfo->deviceType == HvCallPci_NodeDevice)
369 scan_EADS_bridge(bus, SubBus, IdSel);
370 else
371 printk("PCI: Invalid System Configuration(0x%02X)"
372 " for bus 0x%02x id 0x%02x.\n",
373 DevInfo->deviceType, bus, IdSel);
374 }
375 else
376 pci_Log_Error("getDeviceInfo", bus, SubBus, IdSel, HvRc);
377 }
378 kfree(DevInfo);
379}
380
381static void scan_EADS_bridge(HvBusNumber bus, HvSubBusNumber SubBus,
382 int IdSel)
383{
384 struct HvCallPci_BridgeInfo *BridgeInfo;
385 HvAgentId AgentId;
386 int Function;
387 int HvRc;
388
389 BridgeInfo = (struct HvCallPci_BridgeInfo *)
390 kmalloc(sizeof(struct HvCallPci_BridgeInfo), GFP_KERNEL);
391 if (BridgeInfo == NULL)
392 return;
393
394 /* Note: hvSubBus and irq is always be 0 at this level! */
395 for (Function = 0; Function < 8; ++Function) {
396 AgentId = ISERIES_PCI_AGENTID(IdSel, Function);
397 HvRc = HvCallXm_connectBusUnit(bus, SubBus, AgentId, 0);
398 if (HvRc == 0) {
399 printk("found device at bus %d idsel %d func %d (AgentId %x)\n",
400 bus, IdSel, Function, AgentId);
401 /* Connect EADs: 0x18.00.12 = 0x00 */
402 PPCDBG(PPCDBG_BUSWALK,
403 "PCI:Connect EADs: 0x%02X.%02X.%02X\n",
404 bus, SubBus, AgentId);
405 HvRc = HvCallPci_getBusUnitInfo(bus, SubBus, AgentId,
406 ISERIES_HV_ADDR(BridgeInfo),
407 sizeof(struct HvCallPci_BridgeInfo));
408 if (HvRc == 0) {
409 printk("bridge info: type %x subbus %x maxAgents %x maxsubbus %x logslot %x\n",
410 BridgeInfo->busUnitInfo.deviceType,
411 BridgeInfo->subBusNumber,
412 BridgeInfo->maxAgents,
413 BridgeInfo->maxSubBusNumber,
414 BridgeInfo->logicalSlotNumber);
415 PPCDBG(PPCDBG_BUSWALK,
416 "PCI: BridgeInfo, Type:0x%02X, SubBus:0x%02X, MaxAgents:0x%02X, MaxSubBus: 0x%02X, LSlot: 0x%02X\n",
417 BridgeInfo->busUnitInfo.deviceType,
418 BridgeInfo->subBusNumber,
419 BridgeInfo->maxAgents,
420 BridgeInfo->maxSubBusNumber,
421 BridgeInfo->logicalSlotNumber);
422
423 if (BridgeInfo->busUnitInfo.deviceType ==
424 HvCallPci_BridgeDevice) {
425 /* Scan_Bridge_Slot...: 0x18.00.12 */
426 scan_bridge_slot(bus, BridgeInfo);
427 } else
428 printk("PCI: Invalid Bridge Configuration(0x%02X)",
429 BridgeInfo->busUnitInfo.deviceType);
430 }
431 } else if (HvRc != 0x000B)
432 pci_Log_Error("EADs Connect",
433 bus, SubBus, AgentId, HvRc);
434 }
435 kfree(BridgeInfo);
436}
437
438/*
439 * This assumes that the node slot is always on the primary bus!
440 */
441static int scan_bridge_slot(HvBusNumber Bus,
442 struct HvCallPci_BridgeInfo *BridgeInfo)
443{
444 struct iSeries_Device_Node *node;
445 HvSubBusNumber SubBus = BridgeInfo->subBusNumber;
446 u16 VendorId = 0;
447 int HvRc = 0;
448 u8 Irq = 0;
449 int IdSel = ISERIES_GET_DEVICE_FROM_SUBBUS(SubBus);
450 int Function = ISERIES_GET_FUNCTION_FROM_SUBBUS(SubBus);
451 HvAgentId EADsIdSel = ISERIES_PCI_AGENTID(IdSel, Function);
452
453 /* iSeries_allocate_IRQ.: 0x18.00.12(0xA3) */
454 Irq = iSeries_allocate_IRQ(Bus, 0, EADsIdSel);
455 PPCDBG(PPCDBG_BUSWALK,
456 "PCI:- allocate and assign IRQ 0x%02X.%02X.%02X = 0x%02X\n",
457 Bus, 0, EADsIdSel, Irq);
458
459 /*
460 * Connect all functions of any device found.
461 */
462 for (IdSel = 1; IdSel <= BridgeInfo->maxAgents; ++IdSel) {
463 for (Function = 0; Function < 8; ++Function) {
464 HvAgentId AgentId = ISERIES_PCI_AGENTID(IdSel, Function);
465 HvRc = HvCallXm_connectBusUnit(Bus, SubBus,
466 AgentId, Irq);
467 if (HvRc != 0) {
468 pci_Log_Error("Connect Bus Unit",
469 Bus, SubBus, AgentId, HvRc);
470 continue;
471 }
472
473 HvRc = HvCallPci_configLoad16(Bus, SubBus, AgentId,
474 PCI_VENDOR_ID, &VendorId);
475 if (HvRc != 0) {
476 pci_Log_Error("Read Vendor",
477 Bus, SubBus, AgentId, HvRc);
478 continue;
479 }
480 printk("read vendor ID: %x\n", VendorId);
481
482 /* FoundDevice: 0x18.28.10 = 0x12AE */
483 PPCDBG(PPCDBG_BUSWALK,
484 "PCI:- FoundDevice: 0x%02X.%02X.%02X = 0x%04X, irq %d\n",
485 Bus, SubBus, AgentId, VendorId, Irq);
486 HvRc = HvCallPci_configStore8(Bus, SubBus, AgentId,
487 PCI_INTERRUPT_LINE, Irq);
488 if (HvRc != 0)
489 pci_Log_Error("PciCfgStore Irq Failed!",
490 Bus, SubBus, AgentId, HvRc);
491
492 ++DeviceCount;
493 node = build_device_node(Bus, SubBus, EADsIdSel, Function);
494 node->Irq = Irq;
495 node->LogicalSlot = BridgeInfo->logicalSlotNumber;
496
497 } /* for (Function = 0; Function < 8; ++Function) */
498 } /* for (IdSel = 1; IdSel <= MaxAgents; ++IdSel) */
499 return HvRc;
500}
501
502/*
503 * I/0 Memory copy MUST use mmio commands on iSeries
504 * To do; For performance, include the hv call directly
505 */
506void iSeries_memset_io(volatile void __iomem *dest, char c, size_t Count)
507{
508 u8 ByteValue = c;
509 long NumberOfBytes = Count;
510
511 while (NumberOfBytes > 0) {
512 iSeries_Write_Byte(ByteValue, dest++);
513 -- NumberOfBytes;
514 }
515}
516EXPORT_SYMBOL(iSeries_memset_io);
517
518void iSeries_memcpy_toio(volatile void __iomem *dest, void *source, size_t count)
519{
520 char *src = source;
521 long NumberOfBytes = count;
522
523 while (NumberOfBytes > 0) {
524 iSeries_Write_Byte(*src++, dest++);
525 -- NumberOfBytes;
526 }
527}
528EXPORT_SYMBOL(iSeries_memcpy_toio);
529
530void iSeries_memcpy_fromio(void *dest, const volatile void __iomem *src, size_t count)
531{
532 char *dst = dest;
533 long NumberOfBytes = count;
534
535 while (NumberOfBytes > 0) {
536 *dst++ = iSeries_Read_Byte(src++);
537 -- NumberOfBytes;
538 }
539}
540EXPORT_SYMBOL(iSeries_memcpy_fromio);
541
542/*
543 * Look down the chain to find the matching Device Device
544 */
545static struct iSeries_Device_Node *find_Device_Node(int bus, int devfn)
546{
547 struct list_head *pos;
548
549 list_for_each(pos, &iSeries_Global_Device_List) {
550 struct iSeries_Device_Node *node =
551 list_entry(pos, struct iSeries_Device_Node, Device_List);
552
553 if ((bus == ISERIES_BUS(node)) && (devfn == node->DevFn))
554 return node;
555 }
556 return NULL;
557}
558
559#if 0
560/*
561 * Returns the device node for the passed pci_dev
562 * Sanity Check Node PciDev to passed pci_dev
563 * If none is found, returns a NULL which the client must handle.
564 */
565static struct iSeries_Device_Node *get_Device_Node(struct pci_dev *pdev)
566{
567 struct iSeries_Device_Node *node;
568
569 node = pdev->sysdata;
570 if (node == NULL || node->PciDev != pdev)
571 node = find_Device_Node(pdev->bus->number, pdev->devfn);
572 return node;
573}
574#endif
575
576/*
577 * Config space read and write functions.
578 * For now at least, we look for the device node for the bus and devfn
579 * that we are asked to access. It may be possible to translate the devfn
580 * to a subbus and deviceid more directly.
581 */
582static u64 hv_cfg_read_func[4] = {
583 HvCallPciConfigLoad8, HvCallPciConfigLoad16,
584 HvCallPciConfigLoad32, HvCallPciConfigLoad32
585};
586
587static u64 hv_cfg_write_func[4] = {
588 HvCallPciConfigStore8, HvCallPciConfigStore16,
589 HvCallPciConfigStore32, HvCallPciConfigStore32
590};
591
592/*
593 * Read PCI config space
594 */
595static int iSeries_pci_read_config(struct pci_bus *bus, unsigned int devfn,
596 int offset, int size, u32 *val)
597{
598 struct iSeries_Device_Node *node = find_Device_Node(bus->number, devfn);
599 u64 fn;
600 struct HvCallPci_LoadReturn ret;
601
602 if (node == NULL)
603 return PCIBIOS_DEVICE_NOT_FOUND;
604 if (offset > 255) {
605 *val = ~0;
606 return PCIBIOS_BAD_REGISTER_NUMBER;
607 }
608
609 fn = hv_cfg_read_func[(size - 1) & 3];
610 HvCall3Ret16(fn, &ret, node->DsaAddr.DsaAddr, offset, 0);
611
612 if (ret.rc != 0) {
613 *val = ~0;
614 return PCIBIOS_DEVICE_NOT_FOUND; /* or something */
615 }
616
617 *val = ret.value;
618 return 0;
619}
620
621/*
622 * Write PCI config space
623 */
624
625static int iSeries_pci_write_config(struct pci_bus *bus, unsigned int devfn,
626 int offset, int size, u32 val)
627{
628 struct iSeries_Device_Node *node = find_Device_Node(bus->number, devfn);
629 u64 fn;
630 u64 ret;
631
632 if (node == NULL)
633 return PCIBIOS_DEVICE_NOT_FOUND;
634 if (offset > 255)
635 return PCIBIOS_BAD_REGISTER_NUMBER;
636
637 fn = hv_cfg_write_func[(size - 1) & 3];
638 ret = HvCall4(fn, node->DsaAddr.DsaAddr, offset, val, 0);
639
640 if (ret != 0)
641 return PCIBIOS_DEVICE_NOT_FOUND;
642
643 return 0;
644}
645
646static struct pci_ops iSeries_pci_ops = {
647 .read = iSeries_pci_read_config,
648 .write = iSeries_pci_write_config
649};
650
651/*
652 * Check Return Code
653 * -> On Failure, print and log information.
654 * Increment Retry Count, if exceeds max, panic partition.
655 *
656 * PCI: Device 23.90 ReadL I/O Error( 0): 0x1234
657 * PCI: Device 23.90 ReadL Retry( 1)
658 * PCI: Device 23.90 ReadL Retry Successful(1)
659 */
660static int CheckReturnCode(char *TextHdr, struct iSeries_Device_Node *DevNode,
661 int *retry, u64 ret)
662{
663 if (ret != 0) {
664 ++Pci_Error_Count;
665 (*retry)++;
666 printk("PCI: %s: Device 0x%04X:%02X I/O Error(%2d): 0x%04X\n",
667 TextHdr, DevNode->DsaAddr.Dsa.busNumber, DevNode->DevFn,
668 *retry, (int)ret);
669 /*
670 * Bump the retry and check for retry count exceeded.
671 * If, Exceeded, panic the system.
672 */
673 if (((*retry) > Pci_Retry_Max) &&
674 (Pci_Error_Flag > 0)) {
675 mf_display_src(0xB6000103);
676 panic_timeout = 0;
677 panic("PCI: Hardware I/O Error, SRC B6000103, "
678 "Automatic Reboot Disabled.\n");
679 }
680 return -1; /* Retry Try */
681 }
682 return 0;
683}
684
685/*
686 * Translate the I/O Address into a device node, bar, and bar offset.
687 * Note: Make sure the passed variable end up on the stack to avoid
688 * the exposure of being device global.
689 */
690static inline struct iSeries_Device_Node *xlate_iomm_address(
691 const volatile void __iomem *IoAddress,
692 u64 *dsaptr, u64 *BarOffsetPtr)
693{
694 unsigned long OrigIoAddr;
695 unsigned long BaseIoAddr;
696 unsigned long TableIndex;
697 struct iSeries_Device_Node *DevNode;
698
699 OrigIoAddr = (unsigned long __force)IoAddress;
700 if ((OrigIoAddr < BASE_IO_MEMORY) || (OrigIoAddr >= max_io_memory))
701 return NULL;
702 BaseIoAddr = OrigIoAddr - BASE_IO_MEMORY;
703 TableIndex = BaseIoAddr / IOMM_TABLE_ENTRY_SIZE;
704 DevNode = iomm_table[TableIndex];
705
706 if (DevNode != NULL) {
707 int barnum = iobar_table[TableIndex];
708 *dsaptr = DevNode->DsaAddr.DsaAddr | (barnum << 24);
709 *BarOffsetPtr = BaseIoAddr % IOMM_TABLE_ENTRY_SIZE;
710 } else
711 panic("PCI: Invalid PCI IoAddress detected!\n");
712 return DevNode;
713}
714
715/*
716 * Read MM I/O Instructions for the iSeries
717 * On MM I/O error, all ones are returned and iSeries_pci_IoError is cal
718 * else, data is returned in big Endian format.
719 *
720 * iSeries_Read_Byte = Read Byte ( 8 bit)
721 * iSeries_Read_Word = Read Word (16 bit)
722 * iSeries_Read_Long = Read Long (32 bit)
723 */
724u8 iSeries_Read_Byte(const volatile void __iomem *IoAddress)
725{
726 u64 BarOffset;
727 u64 dsa;
728 int retry = 0;
729 struct HvCallPci_LoadReturn ret;
730 struct iSeries_Device_Node *DevNode =
731 xlate_iomm_address(IoAddress, &dsa, &BarOffset);
732
733 if (DevNode == NULL) {
734 static unsigned long last_jiffies;
735 static int num_printed;
736
737 if ((jiffies - last_jiffies) > 60 * HZ) {
738 last_jiffies = jiffies;
739 num_printed = 0;
740 }
741 if (num_printed++ < 10)
742 printk(KERN_ERR "iSeries_Read_Byte: invalid access at IO address %p\n", IoAddress);
743 return 0xff;
744 }
745 do {
746 ++Pci_Io_Read_Count;
747 HvCall3Ret16(HvCallPciBarLoad8, &ret, dsa, BarOffset, 0);
748 } while (CheckReturnCode("RDB", DevNode, &retry, ret.rc) != 0);
749
750 return (u8)ret.value;
751}
752EXPORT_SYMBOL(iSeries_Read_Byte);
753
754u16 iSeries_Read_Word(const volatile void __iomem *IoAddress)
755{
756 u64 BarOffset;
757 u64 dsa;
758 int retry = 0;
759 struct HvCallPci_LoadReturn ret;
760 struct iSeries_Device_Node *DevNode =
761 xlate_iomm_address(IoAddress, &dsa, &BarOffset);
762
763 if (DevNode == NULL) {
764 static unsigned long last_jiffies;
765 static int num_printed;
766
767 if ((jiffies - last_jiffies) > 60 * HZ) {
768 last_jiffies = jiffies;
769 num_printed = 0;
770 }
771 if (num_printed++ < 10)
772 printk(KERN_ERR "iSeries_Read_Word: invalid access at IO address %p\n", IoAddress);
773 return 0xffff;
774 }
775 do {
776 ++Pci_Io_Read_Count;
777 HvCall3Ret16(HvCallPciBarLoad16, &ret, dsa,
778 BarOffset, 0);
779 } while (CheckReturnCode("RDW", DevNode, &retry, ret.rc) != 0);
780
781 return swab16((u16)ret.value);
782}
783EXPORT_SYMBOL(iSeries_Read_Word);
784
785u32 iSeries_Read_Long(const volatile void __iomem *IoAddress)
786{
787 u64 BarOffset;
788 u64 dsa;
789 int retry = 0;
790 struct HvCallPci_LoadReturn ret;
791 struct iSeries_Device_Node *DevNode =
792 xlate_iomm_address(IoAddress, &dsa, &BarOffset);
793
794 if (DevNode == NULL) {
795 static unsigned long last_jiffies;
796 static int num_printed;
797
798 if ((jiffies - last_jiffies) > 60 * HZ) {
799 last_jiffies = jiffies;
800 num_printed = 0;
801 }
802 if (num_printed++ < 10)
803 printk(KERN_ERR "iSeries_Read_Long: invalid access at IO address %p\n", IoAddress);
804 return 0xffffffff;
805 }
806 do {
807 ++Pci_Io_Read_Count;
808 HvCall3Ret16(HvCallPciBarLoad32, &ret, dsa,
809 BarOffset, 0);
810 } while (CheckReturnCode("RDL", DevNode, &retry, ret.rc) != 0);
811
812 return swab32((u32)ret.value);
813}
814EXPORT_SYMBOL(iSeries_Read_Long);
815
816/*
817 * Write MM I/O Instructions for the iSeries
818 *
819 * iSeries_Write_Byte = Write Byte (8 bit)
820 * iSeries_Write_Word = Write Word(16 bit)
821 * iSeries_Write_Long = Write Long(32 bit)
822 */
823void iSeries_Write_Byte(u8 data, volatile void __iomem *IoAddress)
824{
825 u64 BarOffset;
826 u64 dsa;
827 int retry = 0;
828 u64 rc;
829 struct iSeries_Device_Node *DevNode =
830 xlate_iomm_address(IoAddress, &dsa, &BarOffset);
831
832 if (DevNode == NULL) {
833 static unsigned long last_jiffies;
834 static int num_printed;
835
836 if ((jiffies - last_jiffies) > 60 * HZ) {
837 last_jiffies = jiffies;
838 num_printed = 0;
839 }
840 if (num_printed++ < 10)
841 printk(KERN_ERR "iSeries_Write_Byte: invalid access at IO address %p\n", IoAddress);
842 return;
843 }
844 do {
845 ++Pci_Io_Write_Count;
846 rc = HvCall4(HvCallPciBarStore8, dsa, BarOffset, data, 0);
847 } while (CheckReturnCode("WWB", DevNode, &retry, rc) != 0);
848}
849EXPORT_SYMBOL(iSeries_Write_Byte);
850
851void iSeries_Write_Word(u16 data, volatile void __iomem *IoAddress)
852{
853 u64 BarOffset;
854 u64 dsa;
855 int retry = 0;
856 u64 rc;
857 struct iSeries_Device_Node *DevNode =
858 xlate_iomm_address(IoAddress, &dsa, &BarOffset);
859
860 if (DevNode == NULL) {
861 static unsigned long last_jiffies;
862 static int num_printed;
863
864 if ((jiffies - last_jiffies) > 60 * HZ) {
865 last_jiffies = jiffies;
866 num_printed = 0;
867 }
868 if (num_printed++ < 10)
869 printk(KERN_ERR "iSeries_Write_Word: invalid access at IO address %p\n", IoAddress);
870 return;
871 }
872 do {
873 ++Pci_Io_Write_Count;
874 rc = HvCall4(HvCallPciBarStore16, dsa, BarOffset, swab16(data), 0);
875 } while (CheckReturnCode("WWW", DevNode, &retry, rc) != 0);
876}
877EXPORT_SYMBOL(iSeries_Write_Word);
878
879void iSeries_Write_Long(u32 data, volatile void __iomem *IoAddress)
880{
881 u64 BarOffset;
882 u64 dsa;
883 int retry = 0;
884 u64 rc;
885 struct iSeries_Device_Node *DevNode =
886 xlate_iomm_address(IoAddress, &dsa, &BarOffset);
887
888 if (DevNode == NULL) {
889 static unsigned long last_jiffies;
890 static int num_printed;
891
892 if ((jiffies - last_jiffies) > 60 * HZ) {
893 last_jiffies = jiffies;
894 num_printed = 0;
895 }
896 if (num_printed++ < 10)
897 printk(KERN_ERR "iSeries_Write_Long: invalid access at IO address %p\n", IoAddress);
898 return;
899 }
900 do {
901 ++Pci_Io_Write_Count;
902 rc = HvCall4(HvCallPciBarStore32, dsa, BarOffset, swab32(data), 0);
903 } while (CheckReturnCode("WWL", DevNode, &retry, rc) != 0);
904}
905EXPORT_SYMBOL(iSeries_Write_Long);
diff --git a/arch/ppc64/kernel/iSeries_proc.c b/arch/ppc64/kernel/iSeries_proc.c
deleted file mode 100644
index 0fe3116eba29..000000000000
--- a/arch/ppc64/kernel/iSeries_proc.c
+++ /dev/null
@@ -1,113 +0,0 @@
1/*
2 * iSeries_proc.c
3 * Copyright (C) 2001 Kyle A. Lucke IBM Corporation
4 * Copyright (C) 2001 Mike Corrigan & Dave Engebretsen IBM Corporation
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20#include <linux/init.h>
21#include <linux/proc_fs.h>
22#include <linux/seq_file.h>
23#include <linux/param.h> /* for HZ */
24#include <asm/paca.h>
25#include <asm/processor.h>
26#include <asm/time.h>
27#include <asm/lppaca.h>
28#include <asm/iSeries/ItLpQueue.h>
29#include <asm/iSeries/HvCallXm.h>
30#include <asm/iSeries/IoHriMainStore.h>
31#include <asm/iSeries/IoHriProcessorVpd.h>
32
33static int __init iseries_proc_create(void)
34{
35 struct proc_dir_entry *e = proc_mkdir("iSeries", 0);
36 if (!e)
37 return 1;
38
39 return 0;
40}
41core_initcall(iseries_proc_create);
42
43static unsigned long startTitan = 0;
44static unsigned long startTb = 0;
45
46static int proc_titantod_show(struct seq_file *m, void *v)
47{
48 unsigned long tb0, titan_tod;
49
50 tb0 = get_tb();
51 titan_tod = HvCallXm_loadTod();
52
53 seq_printf(m, "Titan\n" );
54 seq_printf(m, " time base = %016lx\n", tb0);
55 seq_printf(m, " titan tod = %016lx\n", titan_tod);
56 seq_printf(m, " xProcFreq = %016x\n",
57 xIoHriProcessorVpd[0].xProcFreq);
58 seq_printf(m, " xTimeBaseFreq = %016x\n",
59 xIoHriProcessorVpd[0].xTimeBaseFreq);
60 seq_printf(m, " tb_ticks_per_jiffy = %lu\n", tb_ticks_per_jiffy);
61 seq_printf(m, " tb_ticks_per_usec = %lu\n", tb_ticks_per_usec);
62
63 if (!startTitan) {
64 startTitan = titan_tod;
65 startTb = tb0;
66 } else {
67 unsigned long titan_usec = (titan_tod - startTitan) >> 12;
68 unsigned long tb_ticks = (tb0 - startTb);
69 unsigned long titan_jiffies = titan_usec / (1000000/HZ);
70 unsigned long titan_jiff_usec = titan_jiffies * (1000000/HZ);
71 unsigned long titan_jiff_rem_usec = titan_usec - titan_jiff_usec;
72 unsigned long tb_jiffies = tb_ticks / tb_ticks_per_jiffy;
73 unsigned long tb_jiff_ticks = tb_jiffies * tb_ticks_per_jiffy;
74 unsigned long tb_jiff_rem_ticks = tb_ticks - tb_jiff_ticks;
75 unsigned long tb_jiff_rem_usec = tb_jiff_rem_ticks / tb_ticks_per_usec;
76 unsigned long new_tb_ticks_per_jiffy = (tb_ticks * (1000000/HZ))/titan_usec;
77
78 seq_printf(m, " titan elapsed = %lu uSec\n", titan_usec);
79 seq_printf(m, " tb elapsed = %lu ticks\n", tb_ticks);
80 seq_printf(m, " titan jiffies = %lu.%04lu \n", titan_jiffies,
81 titan_jiff_rem_usec);
82 seq_printf(m, " tb jiffies = %lu.%04lu\n", tb_jiffies,
83 tb_jiff_rem_usec);
84 seq_printf(m, " new tb_ticks_per_jiffy = %lu\n",
85 new_tb_ticks_per_jiffy);
86 }
87
88 return 0;
89}
90
91static int proc_titantod_open(struct inode *inode, struct file *file)
92{
93 return single_open(file, proc_titantod_show, NULL);
94}
95
96static struct file_operations proc_titantod_operations = {
97 .open = proc_titantod_open,
98 .read = seq_read,
99 .llseek = seq_lseek,
100 .release = single_release,
101};
102
103static int __init iseries_proc_init(void)
104{
105 struct proc_dir_entry *e;
106
107 e = create_proc_entry("iSeries/titanTod", S_IFREG|S_IRUGO, NULL);
108 if (e)
109 e->proc_fops = &proc_titantod_operations;
110
111 return 0;
112}
113__initcall(iseries_proc_init);
diff --git a/arch/ppc64/kernel/iSeries_setup.c b/arch/ppc64/kernel/iSeries_setup.c
deleted file mode 100644
index 3ffefbbc6623..000000000000
--- a/arch/ppc64/kernel/iSeries_setup.c
+++ /dev/null
@@ -1,977 +0,0 @@
1/*
2 * Copyright (c) 2000 Mike Corrigan <mikejc@us.ibm.com>
3 * Copyright (c) 1999-2000 Grant Erickson <grant@lcse.umn.edu>
4 *
5 * Module name: iSeries_setup.c
6 *
7 * Description:
8 * Architecture- / platform-specific boot-time initialization code for
9 * the IBM iSeries LPAR. Adapted from original code by Grant Erickson and
10 * code by Gary Thomas, Cort Dougan <cort@fsmlabs.com>, and Dan Malek
11 * <dan@net4x.com>.
12 *
13 * This program is free software; you can redistribute it and/or
14 * modify it under the terms of the GNU General Public License
15 * as published by the Free Software Foundation; either version
16 * 2 of the License, or (at your option) any later version.
17 */
18
19#undef DEBUG
20
21#include <linux/config.h>
22#include <linux/init.h>
23#include <linux/threads.h>
24#include <linux/smp.h>
25#include <linux/param.h>
26#include <linux/string.h>
27#include <linux/initrd.h>
28#include <linux/seq_file.h>
29#include <linux/kdev_t.h>
30#include <linux/major.h>
31#include <linux/root_dev.h>
32
33#include <asm/processor.h>
34#include <asm/machdep.h>
35#include <asm/page.h>
36#include <asm/mmu.h>
37#include <asm/pgtable.h>
38#include <asm/mmu_context.h>
39#include <asm/cputable.h>
40#include <asm/sections.h>
41#include <asm/iommu.h>
42#include <asm/firmware.h>
43
44#include <asm/time.h>
45#include "iSeries_setup.h"
46#include <asm/naca.h>
47#include <asm/paca.h>
48#include <asm/cache.h>
49#include <asm/sections.h>
50#include <asm/abs_addr.h>
51#include <asm/iSeries/HvCallHpt.h>
52#include <asm/iSeries/HvLpConfig.h>
53#include <asm/iSeries/HvCallEvent.h>
54#include <asm/iSeries/HvCallSm.h>
55#include <asm/iSeries/HvCallXm.h>
56#include <asm/iSeries/ItLpQueue.h>
57#include <asm/iSeries/IoHriMainStore.h>
58#include <asm/iSeries/mf.h>
59#include <asm/iSeries/HvLpEvent.h>
60#include <asm/iSeries/iSeries_irq.h>
61#include <asm/iSeries/IoHriProcessorVpd.h>
62#include <asm/iSeries/ItVpdAreas.h>
63#include <asm/iSeries/LparMap.h>
64
65extern void hvlog(char *fmt, ...);
66
67#ifdef DEBUG
68#define DBG(fmt...) hvlog(fmt)
69#else
70#define DBG(fmt...)
71#endif
72
73/* Function Prototypes */
74extern void ppcdbg_initialize(void);
75
76static void build_iSeries_Memory_Map(void);
77static void setup_iSeries_cache_sizes(void);
78static void iSeries_bolt_kernel(unsigned long saddr, unsigned long eaddr);
79#ifdef CONFIG_PCI
80extern void iSeries_pci_final_fixup(void);
81#else
82static void iSeries_pci_final_fixup(void) { }
83#endif
84
85/* Global Variables */
86static unsigned long procFreqHz;
87static unsigned long procFreqMhz;
88static unsigned long procFreqMhzHundreths;
89
90static unsigned long tbFreqHz;
91static unsigned long tbFreqMhz;
92static unsigned long tbFreqMhzHundreths;
93
94int piranha_simulator;
95
96extern int rd_size; /* Defined in drivers/block/rd.c */
97extern unsigned long klimit;
98extern unsigned long embedded_sysmap_start;
99extern unsigned long embedded_sysmap_end;
100
101extern unsigned long iSeries_recal_tb;
102extern unsigned long iSeries_recal_titan;
103
104static int mf_initialized;
105
106struct MemoryBlock {
107 unsigned long absStart;
108 unsigned long absEnd;
109 unsigned long logicalStart;
110 unsigned long logicalEnd;
111};
112
113/*
114 * Process the main store vpd to determine where the holes in memory are
115 * and return the number of physical blocks and fill in the array of
116 * block data.
117 */
118static unsigned long iSeries_process_Condor_mainstore_vpd(
119 struct MemoryBlock *mb_array, unsigned long max_entries)
120{
121 unsigned long holeFirstChunk, holeSizeChunks;
122 unsigned long numMemoryBlocks = 1;
123 struct IoHriMainStoreSegment4 *msVpd =
124 (struct IoHriMainStoreSegment4 *)xMsVpd;
125 unsigned long holeStart = msVpd->nonInterleavedBlocksStartAdr;
126 unsigned long holeEnd = msVpd->nonInterleavedBlocksEndAdr;
127 unsigned long holeSize = holeEnd - holeStart;
128
129 printk("Mainstore_VPD: Condor\n");
130 /*
131 * Determine if absolute memory has any
132 * holes so that we can interpret the
133 * access map we get back from the hypervisor
134 * correctly.
135 */
136 mb_array[0].logicalStart = 0;
137 mb_array[0].logicalEnd = 0x100000000;
138 mb_array[0].absStart = 0;
139 mb_array[0].absEnd = 0x100000000;
140
141 if (holeSize) {
142 numMemoryBlocks = 2;
143 holeStart = holeStart & 0x000fffffffffffff;
144 holeStart = addr_to_chunk(holeStart);
145 holeFirstChunk = holeStart;
146 holeSize = addr_to_chunk(holeSize);
147 holeSizeChunks = holeSize;
148 printk( "Main store hole: start chunk = %0lx, size = %0lx chunks\n",
149 holeFirstChunk, holeSizeChunks );
150 mb_array[0].logicalEnd = holeFirstChunk;
151 mb_array[0].absEnd = holeFirstChunk;
152 mb_array[1].logicalStart = holeFirstChunk;
153 mb_array[1].logicalEnd = 0x100000000 - holeSizeChunks;
154 mb_array[1].absStart = holeFirstChunk + holeSizeChunks;
155 mb_array[1].absEnd = 0x100000000;
156 }
157 return numMemoryBlocks;
158}
159
160#define MaxSegmentAreas 32
161#define MaxSegmentAdrRangeBlocks 128
162#define MaxAreaRangeBlocks 4
163
164static unsigned long iSeries_process_Regatta_mainstore_vpd(
165 struct MemoryBlock *mb_array, unsigned long max_entries)
166{
167 struct IoHriMainStoreSegment5 *msVpdP =
168 (struct IoHriMainStoreSegment5 *)xMsVpd;
169 unsigned long numSegmentBlocks = 0;
170 u32 existsBits = msVpdP->msAreaExists;
171 unsigned long area_num;
172
173 printk("Mainstore_VPD: Regatta\n");
174
175 for (area_num = 0; area_num < MaxSegmentAreas; ++area_num ) {
176 unsigned long numAreaBlocks;
177 struct IoHriMainStoreArea4 *currentArea;
178
179 if (existsBits & 0x80000000) {
180 unsigned long block_num;
181
182 currentArea = &msVpdP->msAreaArray[area_num];
183 numAreaBlocks = currentArea->numAdrRangeBlocks;
184 printk("ms_vpd: processing area %2ld blocks=%ld",
185 area_num, numAreaBlocks);
186 for (block_num = 0; block_num < numAreaBlocks;
187 ++block_num ) {
188 /* Process an address range block */
189 struct MemoryBlock tempBlock;
190 unsigned long i;
191
192 tempBlock.absStart =
193 (unsigned long)currentArea->xAdrRangeBlock[block_num].blockStart;
194 tempBlock.absEnd =
195 (unsigned long)currentArea->xAdrRangeBlock[block_num].blockEnd;
196 tempBlock.logicalStart = 0;
197 tempBlock.logicalEnd = 0;
198 printk("\n block %ld absStart=%016lx absEnd=%016lx",
199 block_num, tempBlock.absStart,
200 tempBlock.absEnd);
201
202 for (i = 0; i < numSegmentBlocks; ++i) {
203 if (mb_array[i].absStart ==
204 tempBlock.absStart)
205 break;
206 }
207 if (i == numSegmentBlocks) {
208 if (numSegmentBlocks == max_entries)
209 panic("iSeries_process_mainstore_vpd: too many memory blocks");
210 mb_array[numSegmentBlocks] = tempBlock;
211 ++numSegmentBlocks;
212 } else
213 printk(" (duplicate)");
214 }
215 printk("\n");
216 }
217 existsBits <<= 1;
218 }
219 /* Now sort the blocks found into ascending sequence */
220 if (numSegmentBlocks > 1) {
221 unsigned long m, n;
222
223 for (m = 0; m < numSegmentBlocks - 1; ++m) {
224 for (n = numSegmentBlocks - 1; m < n; --n) {
225 if (mb_array[n].absStart <
226 mb_array[n-1].absStart) {
227 struct MemoryBlock tempBlock;
228
229 tempBlock = mb_array[n];
230 mb_array[n] = mb_array[n-1];
231 mb_array[n-1] = tempBlock;
232 }
233 }
234 }
235 }
236 /*
237 * Assign "logical" addresses to each block. These
238 * addresses correspond to the hypervisor "bitmap" space.
239 * Convert all addresses into units of 256K chunks.
240 */
241 {
242 unsigned long i, nextBitmapAddress;
243
244 printk("ms_vpd: %ld sorted memory blocks\n", numSegmentBlocks);
245 nextBitmapAddress = 0;
246 for (i = 0; i < numSegmentBlocks; ++i) {
247 unsigned long length = mb_array[i].absEnd -
248 mb_array[i].absStart;
249
250 mb_array[i].logicalStart = nextBitmapAddress;
251 mb_array[i].logicalEnd = nextBitmapAddress + length;
252 nextBitmapAddress += length;
253 printk(" Bitmap range: %016lx - %016lx\n"
254 " Absolute range: %016lx - %016lx\n",
255 mb_array[i].logicalStart,
256 mb_array[i].logicalEnd,
257 mb_array[i].absStart, mb_array[i].absEnd);
258 mb_array[i].absStart = addr_to_chunk(mb_array[i].absStart &
259 0x000fffffffffffff);
260 mb_array[i].absEnd = addr_to_chunk(mb_array[i].absEnd &
261 0x000fffffffffffff);
262 mb_array[i].logicalStart =
263 addr_to_chunk(mb_array[i].logicalStart);
264 mb_array[i].logicalEnd = addr_to_chunk(mb_array[i].logicalEnd);
265 }
266 }
267
268 return numSegmentBlocks;
269}
270
271static unsigned long iSeries_process_mainstore_vpd(struct MemoryBlock *mb_array,
272 unsigned long max_entries)
273{
274 unsigned long i;
275 unsigned long mem_blocks = 0;
276
277 if (cpu_has_feature(CPU_FTR_SLB))
278 mem_blocks = iSeries_process_Regatta_mainstore_vpd(mb_array,
279 max_entries);
280 else
281 mem_blocks = iSeries_process_Condor_mainstore_vpd(mb_array,
282 max_entries);
283
284 printk("Mainstore_VPD: numMemoryBlocks = %ld \n", mem_blocks);
285 for (i = 0; i < mem_blocks; ++i) {
286 printk("Mainstore_VPD: block %3ld logical chunks %016lx - %016lx\n"
287 " abs chunks %016lx - %016lx\n",
288 i, mb_array[i].logicalStart, mb_array[i].logicalEnd,
289 mb_array[i].absStart, mb_array[i].absEnd);
290 }
291 return mem_blocks;
292}
293
294static void __init iSeries_get_cmdline(void)
295{
296 char *p, *q;
297
298 /* copy the command line parameter from the primary VSP */
299 HvCallEvent_dmaToSp(cmd_line, 2 * 64* 1024, 256,
300 HvLpDma_Direction_RemoteToLocal);
301
302 p = cmd_line;
303 q = cmd_line + 255;
304 while(p < q) {
305 if (!*p || *p == '\n')
306 break;
307 ++p;
308 }
309 *p = 0;
310}
311
312static void __init iSeries_init_early(void)
313{
314 extern unsigned long memory_limit;
315
316 DBG(" -> iSeries_init_early()\n");
317
318 ppc64_firmware_features = FW_FEATURE_ISERIES;
319
320 ppcdbg_initialize();
321
322#if defined(CONFIG_BLK_DEV_INITRD)
323 /*
324 * If the init RAM disk has been configured and there is
325 * a non-zero starting address for it, set it up
326 */
327 if (naca.xRamDisk) {
328 initrd_start = (unsigned long)__va(naca.xRamDisk);
329 initrd_end = initrd_start + naca.xRamDiskSize * PAGE_SIZE;
330 initrd_below_start_ok = 1; // ramdisk in kernel space
331 ROOT_DEV = Root_RAM0;
332 if (((rd_size * 1024) / PAGE_SIZE) < naca.xRamDiskSize)
333 rd_size = (naca.xRamDiskSize * PAGE_SIZE) / 1024;
334 } else
335#endif /* CONFIG_BLK_DEV_INITRD */
336 {
337 /* ROOT_DEV = MKDEV(VIODASD_MAJOR, 1); */
338 }
339
340 iSeries_recal_tb = get_tb();
341 iSeries_recal_titan = HvCallXm_loadTod();
342
343 /*
344 * Cache sizes must be initialized before hpte_init_iSeries is called
345 * as the later need them for flush_icache_range()
346 */
347 setup_iSeries_cache_sizes();
348
349 /*
350 * Initialize the hash table management pointers
351 */
352 hpte_init_iSeries();
353
354 /*
355 * Initialize the DMA/TCE management
356 */
357 iommu_init_early_iSeries();
358
359 /*
360 * Initialize the table which translate Linux physical addresses to
361 * AS/400 absolute addresses
362 */
363 build_iSeries_Memory_Map();
364
365 iSeries_get_cmdline();
366
367 /* Save unparsed command line copy for /proc/cmdline */
368 strlcpy(saved_command_line, cmd_line, COMMAND_LINE_SIZE);
369
370 /* Parse early parameters, in particular mem=x */
371 parse_early_param();
372
373 if (memory_limit) {
374 if (memory_limit < systemcfg->physicalMemorySize)
375 systemcfg->physicalMemorySize = memory_limit;
376 else {
377 printk("Ignoring mem=%lu >= ram_top.\n", memory_limit);
378 memory_limit = 0;
379 }
380 }
381
382 /* Bolt kernel mappings for all of memory (or just a bit if we've got a limit) */
383 iSeries_bolt_kernel(0, systemcfg->physicalMemorySize);
384
385 lmb_init();
386 lmb_add(0, systemcfg->physicalMemorySize);
387 lmb_analyze();
388 lmb_reserve(0, __pa(klimit));
389
390 /* Initialize machine-dependency vectors */
391#ifdef CONFIG_SMP
392 smp_init_iSeries();
393#endif
394 if (itLpNaca.xPirEnvironMode == 0)
395 piranha_simulator = 1;
396
397 /* Associate Lp Event Queue 0 with processor 0 */
398 HvCallEvent_setLpEventQueueInterruptProc(0, 0);
399
400 mf_init();
401 mf_initialized = 1;
402 mb();
403
404 /* If we were passed an initrd, set the ROOT_DEV properly if the values
405 * look sensible. If not, clear initrd reference.
406 */
407#ifdef CONFIG_BLK_DEV_INITRD
408 if (initrd_start >= KERNELBASE && initrd_end >= KERNELBASE &&
409 initrd_end > initrd_start)
410 ROOT_DEV = Root_RAM0;
411 else
412 initrd_start = initrd_end = 0;
413#endif /* CONFIG_BLK_DEV_INITRD */
414
415 DBG(" <- iSeries_init_early()\n");
416}
417
418struct mschunks_map mschunks_map = {
419 /* XXX We don't use these, but Piranha might need them. */
420 .chunk_size = MSCHUNKS_CHUNK_SIZE,
421 .chunk_shift = MSCHUNKS_CHUNK_SHIFT,
422 .chunk_mask = MSCHUNKS_OFFSET_MASK,
423};
424EXPORT_SYMBOL(mschunks_map);
425
426void mschunks_alloc(unsigned long num_chunks)
427{
428 klimit = _ALIGN(klimit, sizeof(u32));
429 mschunks_map.mapping = (u32 *)klimit;
430 klimit += num_chunks * sizeof(u32);
431 mschunks_map.num_chunks = num_chunks;
432}
433
434/*
435 * The iSeries may have very large memories ( > 128 GB ) and a partition
436 * may get memory in "chunks" that may be anywhere in the 2**52 real
437 * address space. The chunks are 256K in size. To map this to the
438 * memory model Linux expects, the AS/400 specific code builds a
439 * translation table to translate what Linux thinks are "physical"
440 * addresses to the actual real addresses. This allows us to make
441 * it appear to Linux that we have contiguous memory starting at
442 * physical address zero while in fact this could be far from the truth.
443 * To avoid confusion, I'll let the words physical and/or real address
444 * apply to the Linux addresses while I'll use "absolute address" to
445 * refer to the actual hardware real address.
446 *
447 * build_iSeries_Memory_Map gets information from the Hypervisor and
448 * looks at the Main Store VPD to determine the absolute addresses
449 * of the memory that has been assigned to our partition and builds
450 * a table used to translate Linux's physical addresses to these
451 * absolute addresses. Absolute addresses are needed when
452 * communicating with the hypervisor (e.g. to build HPT entries)
453 */
454
455static void __init build_iSeries_Memory_Map(void)
456{
457 u32 loadAreaFirstChunk, loadAreaLastChunk, loadAreaSize;
458 u32 nextPhysChunk;
459 u32 hptFirstChunk, hptLastChunk, hptSizeChunks, hptSizePages;
460 u32 num_ptegs;
461 u32 totalChunks,moreChunks;
462 u32 currChunk, thisChunk, absChunk;
463 u32 currDword;
464 u32 chunkBit;
465 u64 map;
466 struct MemoryBlock mb[32];
467 unsigned long numMemoryBlocks, curBlock;
468
469 /* Chunk size on iSeries is 256K bytes */
470 totalChunks = (u32)HvLpConfig_getMsChunks();
471 mschunks_alloc(totalChunks);
472
473 /*
474 * Get absolute address of our load area
475 * and map it to physical address 0
476 * This guarantees that the loadarea ends up at physical 0
477 * otherwise, it might not be returned by PLIC as the first
478 * chunks
479 */
480
481 loadAreaFirstChunk = (u32)addr_to_chunk(itLpNaca.xLoadAreaAddr);
482 loadAreaSize = itLpNaca.xLoadAreaChunks;
483
484 /*
485 * Only add the pages already mapped here.
486 * Otherwise we might add the hpt pages
487 * The rest of the pages of the load area
488 * aren't in the HPT yet and can still
489 * be assigned an arbitrary physical address
490 */
491 if ((loadAreaSize * 64) > HvPagesToMap)
492 loadAreaSize = HvPagesToMap / 64;
493
494 loadAreaLastChunk = loadAreaFirstChunk + loadAreaSize - 1;
495
496 /*
497 * TODO Do we need to do something if the HPT is in the 64MB load area?
498 * This would be required if the itLpNaca.xLoadAreaChunks includes
499 * the HPT size
500 */
501
502 printk("Mapping load area - physical addr = 0000000000000000\n"
503 " absolute addr = %016lx\n",
504 chunk_to_addr(loadAreaFirstChunk));
505 printk("Load area size %dK\n", loadAreaSize * 256);
506
507 for (nextPhysChunk = 0; nextPhysChunk < loadAreaSize; ++nextPhysChunk)
508 mschunks_map.mapping[nextPhysChunk] =
509 loadAreaFirstChunk + nextPhysChunk;
510
511 /*
512 * Get absolute address of our HPT and remember it so
513 * we won't map it to any physical address
514 */
515 hptFirstChunk = (u32)addr_to_chunk(HvCallHpt_getHptAddress());
516 hptSizePages = (u32)HvCallHpt_getHptPages();
517 hptSizeChunks = hptSizePages >> (MSCHUNKS_CHUNK_SHIFT - PAGE_SHIFT);
518 hptLastChunk = hptFirstChunk + hptSizeChunks - 1;
519
520 printk("HPT absolute addr = %016lx, size = %dK\n",
521 chunk_to_addr(hptFirstChunk), hptSizeChunks * 256);
522
523 /* Fill in the hashed page table hash mask */
524 num_ptegs = hptSizePages *
525 (PAGE_SIZE / (sizeof(hpte_t) * HPTES_PER_GROUP));
526 htab_hash_mask = num_ptegs - 1;
527
528 /*
529 * The actual hashed page table is in the hypervisor,
530 * we have no direct access
531 */
532 htab_address = NULL;
533
534 /*
535 * Determine if absolute memory has any
536 * holes so that we can interpret the
537 * access map we get back from the hypervisor
538 * correctly.
539 */
540 numMemoryBlocks = iSeries_process_mainstore_vpd(mb, 32);
541
542 /*
543 * Process the main store access map from the hypervisor
544 * to build up our physical -> absolute translation table
545 */
546 curBlock = 0;
547 currChunk = 0;
548 currDword = 0;
549 moreChunks = totalChunks;
550
551 while (moreChunks) {
552 map = HvCallSm_get64BitsOfAccessMap(itLpNaca.xLpIndex,
553 currDword);
554 thisChunk = currChunk;
555 while (map) {
556 chunkBit = map >> 63;
557 map <<= 1;
558 if (chunkBit) {
559 --moreChunks;
560 while (thisChunk >= mb[curBlock].logicalEnd) {
561 ++curBlock;
562 if (curBlock >= numMemoryBlocks)
563 panic("out of memory blocks");
564 }
565 if (thisChunk < mb[curBlock].logicalStart)
566 panic("memory block error");
567
568 absChunk = mb[curBlock].absStart +
569 (thisChunk - mb[curBlock].logicalStart);
570 if (((absChunk < hptFirstChunk) ||
571 (absChunk > hptLastChunk)) &&
572 ((absChunk < loadAreaFirstChunk) ||
573 (absChunk > loadAreaLastChunk))) {
574 mschunks_map.mapping[nextPhysChunk] =
575 absChunk;
576 ++nextPhysChunk;
577 }
578 }
579 ++thisChunk;
580 }
581 ++currDword;
582 currChunk += 64;
583 }
584
585 /*
586 * main store size (in chunks) is
587 * totalChunks - hptSizeChunks
588 * which should be equal to
589 * nextPhysChunk
590 */
591 systemcfg->physicalMemorySize = chunk_to_addr(nextPhysChunk);
592}
593
594/*
595 * Set up the variables that describe the cache line sizes
596 * for this machine.
597 */
598static void __init setup_iSeries_cache_sizes(void)
599{
600 unsigned int i, n;
601 unsigned int procIx = get_paca()->lppaca.dyn_hv_phys_proc_index;
602
603 systemcfg->icache_size =
604 ppc64_caches.isize = xIoHriProcessorVpd[procIx].xInstCacheSize * 1024;
605 systemcfg->icache_line_size =
606 ppc64_caches.iline_size =
607 xIoHriProcessorVpd[procIx].xInstCacheOperandSize;
608 systemcfg->dcache_size =
609 ppc64_caches.dsize =
610 xIoHriProcessorVpd[procIx].xDataL1CacheSizeKB * 1024;
611 systemcfg->dcache_line_size =
612 ppc64_caches.dline_size =
613 xIoHriProcessorVpd[procIx].xDataCacheOperandSize;
614 ppc64_caches.ilines_per_page = PAGE_SIZE / ppc64_caches.iline_size;
615 ppc64_caches.dlines_per_page = PAGE_SIZE / ppc64_caches.dline_size;
616
617 i = ppc64_caches.iline_size;
618 n = 0;
619 while ((i = (i / 2)))
620 ++n;
621 ppc64_caches.log_iline_size = n;
622
623 i = ppc64_caches.dline_size;
624 n = 0;
625 while ((i = (i / 2)))
626 ++n;
627 ppc64_caches.log_dline_size = n;
628
629 printk("D-cache line size = %d\n",
630 (unsigned int)ppc64_caches.dline_size);
631 printk("I-cache line size = %d\n",
632 (unsigned int)ppc64_caches.iline_size);
633}
634
635/*
636 * Create a pte. Used during initialization only.
637 */
638static void iSeries_make_pte(unsigned long va, unsigned long pa,
639 int mode)
640{
641 hpte_t local_hpte, rhpte;
642 unsigned long hash, vpn;
643 long slot;
644
645 vpn = va >> PAGE_SHIFT;
646 hash = hpt_hash(vpn, 0);
647
648 local_hpte.r = pa | mode;
649 local_hpte.v = ((va >> 23) << HPTE_V_AVPN_SHIFT)
650 | HPTE_V_BOLTED | HPTE_V_VALID;
651
652 slot = HvCallHpt_findValid(&rhpte, vpn);
653 if (slot < 0) {
654 /* Must find space in primary group */
655 panic("hash_page: hpte already exists\n");
656 }
657 HvCallHpt_addValidate(slot, 0, &local_hpte);
658}
659
660/*
661 * Bolt the kernel addr space into the HPT
662 */
663static void __init iSeries_bolt_kernel(unsigned long saddr, unsigned long eaddr)
664{
665 unsigned long pa;
666 unsigned long mode_rw = _PAGE_ACCESSED | _PAGE_COHERENT | PP_RWXX;
667 hpte_t hpte;
668
669 for (pa = saddr; pa < eaddr ;pa += PAGE_SIZE) {
670 unsigned long ea = (unsigned long)__va(pa);
671 unsigned long vsid = get_kernel_vsid(ea);
672 unsigned long va = (vsid << 28) | (pa & 0xfffffff);
673 unsigned long vpn = va >> PAGE_SHIFT;
674 unsigned long slot = HvCallHpt_findValid(&hpte, vpn);
675
676 /* Make non-kernel text non-executable */
677 if (!in_kernel_text(ea))
678 mode_rw |= HW_NO_EXEC;
679
680 if (hpte.v & HPTE_V_VALID) {
681 /* HPTE exists, so just bolt it */
682 HvCallHpt_setSwBits(slot, 0x10, 0);
683 /* And make sure the pp bits are correct */
684 HvCallHpt_setPp(slot, PP_RWXX);
685 } else
686 /* No HPTE exists, so create a new bolted one */
687 iSeries_make_pte(va, phys_to_abs(pa), mode_rw);
688 }
689}
690
691/*
692 * Document me.
693 */
694static void __init iSeries_setup_arch(void)
695{
696 unsigned procIx = get_paca()->lppaca.dyn_hv_phys_proc_index;
697
698 /* Add an eye catcher and the systemcfg layout version number */
699 strcpy(systemcfg->eye_catcher, "SYSTEMCFG:PPC64");
700 systemcfg->version.major = SYSTEMCFG_MAJOR;
701 systemcfg->version.minor = SYSTEMCFG_MINOR;
702
703 /* Setup the Lp Event Queue */
704 setup_hvlpevent_queue();
705
706 /* Compute processor frequency */
707 procFreqHz = ((1UL << 34) * 1000000) /
708 xIoHriProcessorVpd[procIx].xProcFreq;
709 procFreqMhz = procFreqHz / 1000000;
710 procFreqMhzHundreths = (procFreqHz / 10000) - (procFreqMhz * 100);
711 ppc_proc_freq = procFreqHz;
712
713 /* Compute time base frequency */
714 tbFreqHz = ((1UL << 32) * 1000000) /
715 xIoHriProcessorVpd[procIx].xTimeBaseFreq;
716 tbFreqMhz = tbFreqHz / 1000000;
717 tbFreqMhzHundreths = (tbFreqHz / 10000) - (tbFreqMhz * 100);
718 ppc_tb_freq = tbFreqHz;
719
720 printk("Max logical processors = %d\n",
721 itVpdAreas.xSlicMaxLogicalProcs);
722 printk("Max physical processors = %d\n",
723 itVpdAreas.xSlicMaxPhysicalProcs);
724 printk("Processor frequency = %lu.%02lu\n", procFreqMhz,
725 procFreqMhzHundreths);
726 printk("Time base frequency = %lu.%02lu\n", tbFreqMhz,
727 tbFreqMhzHundreths);
728 systemcfg->processor = xIoHriProcessorVpd[procIx].xPVR;
729 printk("Processor version = %x\n", systemcfg->processor);
730}
731
732static void iSeries_get_cpuinfo(struct seq_file *m)
733{
734 seq_printf(m, "machine\t\t: 64-bit iSeries Logical Partition\n");
735}
736
737/*
738 * Document me.
739 * and Implement me.
740 */
741static int iSeries_get_irq(struct pt_regs *regs)
742{
743 /* -2 means ignore this interrupt */
744 return -2;
745}
746
747/*
748 * Document me.
749 */
750static void iSeries_restart(char *cmd)
751{
752 mf_reboot();
753}
754
755/*
756 * Document me.
757 */
758static void iSeries_power_off(void)
759{
760 mf_power_off();
761}
762
763/*
764 * Document me.
765 */
766static void iSeries_halt(void)
767{
768 mf_power_off();
769}
770
771/*
772 * void __init iSeries_calibrate_decr()
773 *
774 * Description:
775 * This routine retrieves the internal processor frequency from the VPD,
776 * and sets up the kernel timer decrementer based on that value.
777 *
778 */
779static void __init iSeries_calibrate_decr(void)
780{
781 unsigned long cyclesPerUsec;
782 struct div_result divres;
783
784 /* Compute decrementer (and TB) frequency in cycles/sec */
785 cyclesPerUsec = ppc_tb_freq / 1000000;
786
787 /*
788 * Set the amount to refresh the decrementer by. This
789 * is the number of decrementer ticks it takes for
790 * 1/HZ seconds.
791 */
792 tb_ticks_per_jiffy = ppc_tb_freq / HZ;
793
794#if 0
795 /* TEST CODE FOR ADJTIME */
796 tb_ticks_per_jiffy += tb_ticks_per_jiffy / 5000;
797 /* END OF TEST CODE */
798#endif
799
800 /*
801 * tb_ticks_per_sec = freq; would give better accuracy
802 * but tb_ticks_per_sec = tb_ticks_per_jiffy*HZ; assures
803 * that jiffies (and xtime) will match the time returned
804 * by do_gettimeofday.
805 */
806 tb_ticks_per_sec = tb_ticks_per_jiffy * HZ;
807 tb_ticks_per_usec = cyclesPerUsec;
808 tb_to_us = mulhwu_scale_factor(ppc_tb_freq, 1000000);
809 div128_by_32(1024 * 1024, 0, tb_ticks_per_sec, &divres);
810 tb_to_xs = divres.result_low;
811 setup_default_decr();
812}
813
814static void __init iSeries_progress(char * st, unsigned short code)
815{
816 printk("Progress: [%04x] - %s\n", (unsigned)code, st);
817 if (!piranha_simulator && mf_initialized) {
818 if (code != 0xffff)
819 mf_display_progress(code);
820 else
821 mf_clear_src();
822 }
823}
824
825static void __init iSeries_fixup_klimit(void)
826{
827 /*
828 * Change klimit to take into account any ram disk
829 * that may be included
830 */
831 if (naca.xRamDisk)
832 klimit = KERNELBASE + (u64)naca.xRamDisk +
833 (naca.xRamDiskSize * PAGE_SIZE);
834 else {
835 /*
836 * No ram disk was included - check and see if there
837 * was an embedded system map. Change klimit to take
838 * into account any embedded system map
839 */
840 if (embedded_sysmap_end)
841 klimit = KERNELBASE + ((embedded_sysmap_end + 4095) &
842 0xfffffffffffff000);
843 }
844}
845
846static int __init iSeries_src_init(void)
847{
848 /* clear the progress line */
849 ppc_md.progress(" ", 0xffff);
850 return 0;
851}
852
853late_initcall(iSeries_src_init);
854
855static inline void process_iSeries_events(void)
856{
857 asm volatile ("li 0,0x5555; sc" : : : "r0", "r3");
858}
859
860static void yield_shared_processor(void)
861{
862 unsigned long tb;
863
864 HvCall_setEnabledInterrupts(HvCall_MaskIPI |
865 HvCall_MaskLpEvent |
866 HvCall_MaskLpProd |
867 HvCall_MaskTimeout);
868
869 tb = get_tb();
870 /* Compute future tb value when yield should expire */
871 HvCall_yieldProcessor(HvCall_YieldTimed, tb+tb_ticks_per_jiffy);
872
873 /*
874 * The decrementer stops during the yield. Force a fake decrementer
875 * here and let the timer_interrupt code sort out the actual time.
876 */
877 get_paca()->lppaca.int_dword.fields.decr_int = 1;
878 process_iSeries_events();
879}
880
881static int iseries_shared_idle(void)
882{
883 while (1) {
884 while (!need_resched() && !hvlpevent_is_pending()) {
885 local_irq_disable();
886 ppc64_runlatch_off();
887
888 /* Recheck with irqs off */
889 if (!need_resched() && !hvlpevent_is_pending())
890 yield_shared_processor();
891
892 HMT_medium();
893 local_irq_enable();
894 }
895
896 ppc64_runlatch_on();
897
898 if (hvlpevent_is_pending())
899 process_iSeries_events();
900
901 schedule();
902 }
903
904 return 0;
905}
906
907static int iseries_dedicated_idle(void)
908{
909 long oldval;
910
911 while (1) {
912 oldval = test_and_clear_thread_flag(TIF_NEED_RESCHED);
913
914 if (!oldval) {
915 set_thread_flag(TIF_POLLING_NRFLAG);
916
917 while (!need_resched()) {
918 ppc64_runlatch_off();
919 HMT_low();
920
921 if (hvlpevent_is_pending()) {
922 HMT_medium();
923 ppc64_runlatch_on();
924 process_iSeries_events();
925 }
926 }
927
928 HMT_medium();
929 clear_thread_flag(TIF_POLLING_NRFLAG);
930 } else {
931 set_need_resched();
932 }
933
934 ppc64_runlatch_on();
935 schedule();
936 }
937
938 return 0;
939}
940
941#ifndef CONFIG_PCI
942void __init iSeries_init_IRQ(void) { }
943#endif
944
945void __init iSeries_early_setup(void)
946{
947 iSeries_fixup_klimit();
948
949 ppc_md.setup_arch = iSeries_setup_arch;
950 ppc_md.get_cpuinfo = iSeries_get_cpuinfo;
951 ppc_md.init_IRQ = iSeries_init_IRQ;
952 ppc_md.get_irq = iSeries_get_irq;
953 ppc_md.init_early = iSeries_init_early,
954
955 ppc_md.pcibios_fixup = iSeries_pci_final_fixup;
956
957 ppc_md.restart = iSeries_restart;
958 ppc_md.power_off = iSeries_power_off;
959 ppc_md.halt = iSeries_halt;
960
961 ppc_md.get_boot_time = iSeries_get_boot_time;
962 ppc_md.set_rtc_time = iSeries_set_rtc_time;
963 ppc_md.get_rtc_time = iSeries_get_rtc_time;
964 ppc_md.calibrate_decr = iSeries_calibrate_decr;
965 ppc_md.progress = iSeries_progress;
966
967 /* XXX Implement enable_pmcs for iSeries */
968
969 if (get_paca()->lppaca.shared_proc) {
970 ppc_md.idle_loop = iseries_shared_idle;
971 printk(KERN_INFO "Using shared processor idle loop\n");
972 } else {
973 ppc_md.idle_loop = iseries_dedicated_idle;
974 printk(KERN_INFO "Using dedicated idle loop\n");
975 }
976}
977
diff --git a/arch/ppc64/kernel/iSeries_setup.h b/arch/ppc64/kernel/iSeries_setup.h
deleted file mode 100644
index c6eb29a245ac..000000000000
--- a/arch/ppc64/kernel/iSeries_setup.h
+++ /dev/null
@@ -1,26 +0,0 @@
1/*
2 * Copyright (c) 2000 Mike Corrigan <mikejc@us.ibm.com>
3 * Copyright (c) 1999-2000 Grant Erickson <grant@lcse.umn.edu>
4 *
5 * Module name: as400_setup.h
6 *
7 * Description:
8 * Architecture- / platform-specific boot-time initialization code for
9 * the IBM AS/400 LPAR. Adapted from original code by Grant Erickson and
10 * code by Gary Thomas, Cort Dougan <cort@cs.nmt.edu>, and Dan Malek
11 * <dan@netx4.com>.
12 *
13 * This program is free software; you can redistribute it and/or
14 * modify it under the terms of the GNU General Public License
15 * as published by the Free Software Foundation; either version
16 * 2 of the License, or (at your option) any later version.
17 */
18
19#ifndef __ISERIES_SETUP_H__
20#define __ISERIES_SETUP_H__
21
22extern void iSeries_get_boot_time(struct rtc_time *tm);
23extern int iSeries_set_rtc_time(struct rtc_time *tm);
24extern void iSeries_get_rtc_time(struct rtc_time *tm);
25
26#endif /* __ISERIES_SETUP_H__ */
diff --git a/arch/ppc64/kernel/iSeries_smp.c b/arch/ppc64/kernel/iSeries_smp.c
deleted file mode 100644
index f74386e31638..000000000000
--- a/arch/ppc64/kernel/iSeries_smp.c
+++ /dev/null
@@ -1,149 +0,0 @@
1/*
2 * SMP support for iSeries machines.
3 *
4 * Dave Engebretsen, Peter Bergner, and
5 * Mike Corrigan {engebret|bergner|mikec}@us.ibm.com
6 *
7 * Plus various changes from other IBM teams...
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; either version
12 * 2 of the License, or (at your option) any later version.
13 */
14
15#undef DEBUG
16
17#include <linux/config.h>
18#include <linux/kernel.h>
19#include <linux/module.h>
20#include <linux/sched.h>
21#include <linux/smp.h>
22#include <linux/smp_lock.h>
23#include <linux/interrupt.h>
24#include <linux/kernel_stat.h>
25#include <linux/delay.h>
26#include <linux/init.h>
27#include <linux/spinlock.h>
28#include <linux/cache.h>
29#include <linux/err.h>
30#include <linux/sysdev.h>
31#include <linux/cpu.h>
32
33#include <asm/ptrace.h>
34#include <asm/atomic.h>
35#include <asm/irq.h>
36#include <asm/page.h>
37#include <asm/pgtable.h>
38#include <asm/io.h>
39#include <asm/smp.h>
40#include <asm/paca.h>
41#include <asm/iSeries/HvCall.h>
42#include <asm/time.h>
43#include <asm/ppcdebug.h>
44#include <asm/machdep.h>
45#include <asm/cputable.h>
46#include <asm/system.h>
47
48static unsigned long iSeries_smp_message[NR_CPUS];
49
50void iSeries_smp_message_recv( struct pt_regs * regs )
51{
52 int cpu = smp_processor_id();
53 int msg;
54
55 if ( num_online_cpus() < 2 )
56 return;
57
58 for ( msg = 0; msg < 4; ++msg )
59 if ( test_and_clear_bit( msg, &iSeries_smp_message[cpu] ) )
60 smp_message_recv( msg, regs );
61}
62
63static inline void smp_iSeries_do_message(int cpu, int msg)
64{
65 set_bit(msg, &iSeries_smp_message[cpu]);
66 HvCall_sendIPI(&(paca[cpu]));
67}
68
69static void smp_iSeries_message_pass(int target, int msg)
70{
71 int i;
72
73 if (target < NR_CPUS)
74 smp_iSeries_do_message(target, msg);
75 else {
76 for_each_online_cpu(i) {
77 if (target == MSG_ALL_BUT_SELF
78 && i == smp_processor_id())
79 continue;
80 smp_iSeries_do_message(i, msg);
81 }
82 }
83}
84
85static int smp_iSeries_numProcs(void)
86{
87 unsigned np, i;
88
89 np = 0;
90 for (i=0; i < NR_CPUS; ++i) {
91 if (paca[i].lppaca.dyn_proc_status < 2) {
92 cpu_set(i, cpu_possible_map);
93 cpu_set(i, cpu_present_map);
94 cpu_set(i, cpu_sibling_map[i]);
95 ++np;
96 }
97 }
98 return np;
99}
100
101static int smp_iSeries_probe(void)
102{
103 unsigned i;
104 unsigned np = 0;
105
106 for (i=0; i < NR_CPUS; ++i) {
107 if (paca[i].lppaca.dyn_proc_status < 2) {
108 /*paca[i].active = 1;*/
109 ++np;
110 }
111 }
112
113 return np;
114}
115
116static void smp_iSeries_kick_cpu(int nr)
117{
118 BUG_ON(nr < 0 || nr >= NR_CPUS);
119
120 /* Verify that our partition has a processor nr */
121 if (paca[nr].lppaca.dyn_proc_status >= 2)
122 return;
123
124 /* The processor is currently spinning, waiting
125 * for the cpu_start field to become non-zero
126 * After we set cpu_start, the processor will
127 * continue on to secondary_start in iSeries_head.S
128 */
129 paca[nr].cpu_start = 1;
130}
131
132static void __devinit smp_iSeries_setup_cpu(int nr)
133{
134}
135
136static struct smp_ops_t iSeries_smp_ops = {
137 .message_pass = smp_iSeries_message_pass,
138 .probe = smp_iSeries_probe,
139 .kick_cpu = smp_iSeries_kick_cpu,
140 .setup_cpu = smp_iSeries_setup_cpu,
141};
142
143/* This is called very early. */
144void __init smp_init_iSeries(void)
145{
146 smp_ops = &iSeries_smp_ops;
147 systemcfg->processorCount = smp_iSeries_numProcs();
148}
149
diff --git a/arch/ppc64/kernel/iSeries_vio.c b/arch/ppc64/kernel/iSeries_vio.c
deleted file mode 100644
index 6b754b0c8344..000000000000
--- a/arch/ppc64/kernel/iSeries_vio.c
+++ /dev/null
@@ -1,155 +0,0 @@
1/*
2 * IBM PowerPC iSeries Virtual I/O Infrastructure Support.
3 *
4 * Copyright (c) 2005 Stephen Rothwell, IBM Corp.
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11#include <linux/types.h>
12#include <linux/device.h>
13#include <linux/init.h>
14
15#include <asm/vio.h>
16#include <asm/iommu.h>
17#include <asm/abs_addr.h>
18#include <asm/page.h>
19#include <asm/iSeries/vio.h>
20#include <asm/iSeries/HvTypes.h>
21#include <asm/iSeries/HvLpConfig.h>
22#include <asm/iSeries/HvCallXm.h>
23
24struct device *iSeries_vio_dev = &vio_bus_device.dev;
25EXPORT_SYMBOL(iSeries_vio_dev);
26
27static struct iommu_table veth_iommu_table;
28static struct iommu_table vio_iommu_table;
29
30static void __init iommu_vio_init(void)
31{
32 struct iommu_table *t;
33 struct iommu_table_cb cb;
34 unsigned long cbp;
35 unsigned long itc_entries;
36
37 cb.itc_busno = 255; /* Bus 255 is the virtual bus */
38 cb.itc_virtbus = 0xff; /* Ask for virtual bus */
39
40 cbp = virt_to_abs(&cb);
41 HvCallXm_getTceTableParms(cbp);
42
43 itc_entries = cb.itc_size * PAGE_SIZE / sizeof(union tce_entry);
44 veth_iommu_table.it_size = itc_entries / 2;
45 veth_iommu_table.it_busno = cb.itc_busno;
46 veth_iommu_table.it_offset = cb.itc_offset;
47 veth_iommu_table.it_index = cb.itc_index;
48 veth_iommu_table.it_type = TCE_VB;
49 veth_iommu_table.it_blocksize = 1;
50
51 t = iommu_init_table(&veth_iommu_table);
52
53 if (!t)
54 printk("Virtual Bus VETH TCE table failed.\n");
55
56 vio_iommu_table.it_size = itc_entries - veth_iommu_table.it_size;
57 vio_iommu_table.it_busno = cb.itc_busno;
58 vio_iommu_table.it_offset = cb.itc_offset +
59 veth_iommu_table.it_size;
60 vio_iommu_table.it_index = cb.itc_index;
61 vio_iommu_table.it_type = TCE_VB;
62 vio_iommu_table.it_blocksize = 1;
63
64 t = iommu_init_table(&vio_iommu_table);
65
66 if (!t)
67 printk("Virtual Bus VIO TCE table failed.\n");
68}
69
70/**
71 * vio_register_device_iseries: - Register a new iSeries vio device.
72 * @voidev: The device to register.
73 */
74static struct vio_dev *__init vio_register_device_iseries(char *type,
75 uint32_t unit_num)
76{
77 struct vio_dev *viodev;
78
79 /* allocate a vio_dev for this device */
80 viodev = kmalloc(sizeof(struct vio_dev), GFP_KERNEL);
81 if (!viodev)
82 return NULL;
83 memset(viodev, 0, sizeof(struct vio_dev));
84
85 snprintf(viodev->dev.bus_id, BUS_ID_SIZE, "%s%d", type, unit_num);
86
87 viodev->name = viodev->dev.bus_id;
88 viodev->type = type;
89 viodev->unit_address = unit_num;
90 viodev->iommu_table = &vio_iommu_table;
91 if (vio_register_device(viodev) == NULL) {
92 kfree(viodev);
93 return NULL;
94 }
95 return viodev;
96}
97
98void __init probe_bus_iseries(void)
99{
100 HvLpIndexMap vlan_map;
101 struct vio_dev *viodev;
102 int i;
103
104 /* there is only one of each of these */
105 vio_register_device_iseries("viocons", 0);
106 vio_register_device_iseries("vscsi", 0);
107
108 vlan_map = HvLpConfig_getVirtualLanIndexMap();
109 for (i = 0; i < HVMAXARCHITECTEDVIRTUALLANS; i++) {
110 if ((vlan_map & (0x8000 >> i)) == 0)
111 continue;
112 viodev = vio_register_device_iseries("vlan", i);
113 /* veth is special and has it own iommu_table */
114 viodev->iommu_table = &veth_iommu_table;
115 }
116 for (i = 0; i < HVMAXARCHITECTEDVIRTUALDISKS; i++)
117 vio_register_device_iseries("viodasd", i);
118 for (i = 0; i < HVMAXARCHITECTEDVIRTUALCDROMS; i++)
119 vio_register_device_iseries("viocd", i);
120 for (i = 0; i < HVMAXARCHITECTEDVIRTUALTAPES; i++)
121 vio_register_device_iseries("viotape", i);
122}
123
124/**
125 * vio_match_device_iseries: - Tell if a iSeries VIO device matches a
126 * vio_device_id
127 */
128static int vio_match_device_iseries(const struct vio_device_id *id,
129 const struct vio_dev *dev)
130{
131 return strncmp(dev->type, id->type, strlen(id->type)) == 0;
132}
133
134static struct vio_bus_ops vio_bus_ops_iseries = {
135 .match = vio_match_device_iseries,
136};
137
138/**
139 * vio_bus_init_iseries: - Initialize the iSeries virtual IO bus
140 */
141static int __init vio_bus_init_iseries(void)
142{
143 int err;
144
145 err = vio_bus_init(&vio_bus_ops_iseries);
146 if (err == 0) {
147 iommu_vio_init();
148 vio_bus_device.iommu_table = &vio_iommu_table;
149 iSeries_vio_dev = &vio_bus_device.dev;
150 probe_bus_iseries();
151 }
152 return err;
153}
154
155__initcall(vio_bus_init_iseries);
diff --git a/arch/ppc64/kernel/idle.c b/arch/ppc64/kernel/idle.c
index 954395d42636..8abd2ad92832 100644
--- a/arch/ppc64/kernel/idle.c
+++ b/arch/ppc64/kernel/idle.c
@@ -31,7 +31,7 @@
31 31
32extern void power4_idle(void); 32extern void power4_idle(void);
33 33
34int default_idle(void) 34void default_idle(void)
35{ 35{
36 long oldval; 36 long oldval;
37 unsigned int cpu = smp_processor_id(); 37 unsigned int cpu = smp_processor_id();
@@ -64,11 +64,9 @@ int default_idle(void)
64 if (cpu_is_offline(cpu) && system_state == SYSTEM_RUNNING) 64 if (cpu_is_offline(cpu) && system_state == SYSTEM_RUNNING)
65 cpu_die(); 65 cpu_die();
66 } 66 }
67
68 return 0;
69} 67}
70 68
71int native_idle(void) 69void native_idle(void)
72{ 70{
73 while (1) { 71 while (1) {
74 ppc64_runlatch_off(); 72 ppc64_runlatch_off();
@@ -85,8 +83,6 @@ int native_idle(void)
85 system_state == SYSTEM_RUNNING) 83 system_state == SYSTEM_RUNNING)
86 cpu_die(); 84 cpu_die();
87 } 85 }
88
89 return 0;
90} 86}
91 87
92void cpu_idle(void) 88void cpu_idle(void)
diff --git a/arch/ppc64/kernel/idle_power4.S b/arch/ppc64/kernel/idle_power4.S
deleted file mode 100644
index ca02afe2a795..000000000000
--- a/arch/ppc64/kernel/idle_power4.S
+++ /dev/null
@@ -1,79 +0,0 @@
1/*
2 * This file contains the power_save function for 6xx & 7xxx CPUs
3 * rewritten in assembler
4 *
5 * Warning ! This code assumes that if your machine has a 750fx
6 * it will have PLL 1 set to low speed mode (used during NAP/DOZE).
7 * if this is not the case some additional changes will have to
8 * be done to check a runtime var (a bit like powersave-nap)
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version
13 * 2 of the License, or (at your option) any later version.
14 */
15
16#include <linux/config.h>
17#include <linux/threads.h>
18#include <asm/processor.h>
19#include <asm/page.h>
20#include <asm/cputable.h>
21#include <asm/thread_info.h>
22#include <asm/ppc_asm.h>
23#include <asm/asm-offsets.h>
24
25#undef DEBUG
26
27 .text
28
29/*
30 * Here is the power_save_6xx function. This could eventually be
31 * split into several functions & changing the function pointer
32 * depending on the various features.
33 */
34_GLOBAL(power4_idle)
35BEGIN_FTR_SECTION
36 blr
37END_FTR_SECTION_IFCLR(CPU_FTR_CAN_NAP)
38 /* We must dynamically check for the NAP feature as it
39 * can be cleared by CPU init after the fixups are done
40 */
41 LOADBASE(r3,cur_cpu_spec)
42 ld r4,cur_cpu_spec@l(r3)
43 ld r4,CPU_SPEC_FEATURES(r4)
44 andi. r0,r4,CPU_FTR_CAN_NAP
45 beqlr
46 /* Now check if user or arch enabled NAP mode */
47 LOADBASE(r3,powersave_nap)
48 lwz r4,powersave_nap@l(r3)
49 cmpwi 0,r4,0
50 beqlr
51
52 /* Clear MSR:EE */
53 mfmsr r7
54 li r4,0
55 ori r4,r4,MSR_EE
56 andc r0,r7,r4
57 mtmsrd r0
58
59 /* Check current_thread_info()->flags */
60 clrrdi r4,r1,THREAD_SHIFT
61 ld r4,TI_FLAGS(r4)
62 andi. r0,r4,_TIF_NEED_RESCHED
63 beq 1f
64 mtmsrd r7 /* out of line this ? */
65 blr
661:
67 /* Go to NAP now */
68BEGIN_FTR_SECTION
69 DSSALL
70 sync
71END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
72 oris r7,r7,MSR_POW@h
73 sync
74 isync
75 mtmsrd r7
76 isync
77 sync
78 blr
79
diff --git a/arch/ppc64/kernel/init_task.c b/arch/ppc64/kernel/init_task.c
deleted file mode 100644
index 941043ae040f..000000000000
--- a/arch/ppc64/kernel/init_task.c
+++ /dev/null
@@ -1,36 +0,0 @@
1#include <linux/mm.h>
2#include <linux/module.h>
3#include <linux/sched.h>
4#include <linux/init.h>
5#include <linux/init_task.h>
6#include <linux/fs.h>
7#include <linux/mqueue.h>
8#include <asm/uaccess.h>
9
10static struct fs_struct init_fs = INIT_FS;
11static struct files_struct init_files = INIT_FILES;
12static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
13static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
14struct mm_struct init_mm = INIT_MM(init_mm);
15
16EXPORT_SYMBOL(init_mm);
17
18/*
19 * Initial thread structure.
20 *
21 * We need to make sure that this is 16384-byte aligned due to the
22 * way process stacks are handled. This is done by having a special
23 * "init_task" linker map entry..
24 */
25union thread_union init_thread_union
26 __attribute__((__section__(".data.init_task"))) =
27 { INIT_THREAD_INFO(init_task) };
28
29/*
30 * Initial task structure.
31 *
32 * All other task structs will be allocated on slabs in fork.c
33 */
34struct task_struct init_task = INIT_TASK(init_task);
35
36EXPORT_SYMBOL(init_task);
diff --git a/arch/ppc64/kernel/ioctl32.c b/arch/ppc64/kernel/ioctl32.c
index a8005db23ec5..ba4a899045c2 100644
--- a/arch/ppc64/kernel/ioctl32.c
+++ b/arch/ppc64/kernel/ioctl32.c
@@ -39,9 +39,7 @@ IOCTL_TABLE_START
39#include <linux/compat_ioctl.h> 39#include <linux/compat_ioctl.h>
40#define DECLARES 40#define DECLARES
41#include "compat_ioctl.c" 41#include "compat_ioctl.c"
42COMPATIBLE_IOCTL(TIOCSTART) 42
43COMPATIBLE_IOCTL(TIOCSTOP)
44COMPATIBLE_IOCTL(TIOCSLTC)
45/* Little p (/dev/rtc, /dev/envctrl, etc.) */ 43/* Little p (/dev/rtc, /dev/envctrl, etc.) */
46COMPATIBLE_IOCTL(_IOR('p', 20, int[7])) /* RTCGET */ 44COMPATIBLE_IOCTL(_IOR('p', 20, int[7])) /* RTCGET */
47COMPATIBLE_IOCTL(_IOW('p', 21, int[7])) /* RTCSET */ 45COMPATIBLE_IOCTL(_IOW('p', 21, int[7])) /* RTCSET */
diff --git a/arch/ppc64/kernel/kprobes.c b/arch/ppc64/kernel/kprobes.c
index 9c6facc24f70..ed876a5178ae 100644
--- a/arch/ppc64/kernel/kprobes.c
+++ b/arch/ppc64/kernel/kprobes.c
@@ -395,7 +395,6 @@ int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
395 if (post_kprobe_handler(args->regs)) 395 if (post_kprobe_handler(args->regs))
396 ret = NOTIFY_STOP; 396 ret = NOTIFY_STOP;
397 break; 397 break;
398 case DIE_GPF:
399 case DIE_PAGE_FAULT: 398 case DIE_PAGE_FAULT:
400 if (kprobe_running() && 399 if (kprobe_running() &&
401 kprobe_fault_handler(args->regs, args->trapnr)) 400 kprobe_fault_handler(args->regs, args->trapnr))
diff --git a/arch/ppc64/kernel/lmb.c b/arch/ppc64/kernel/lmb.c
deleted file mode 100644
index 5adaca2ddc9d..000000000000
--- a/arch/ppc64/kernel/lmb.c
+++ /dev/null
@@ -1,299 +0,0 @@
1/*
2 * Procedures for interfacing to Open Firmware.
3 *
4 * Peter Bergner, IBM Corp. June 2001.
5 * Copyright (C) 2001 Peter Bergner.
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
11 */
12
13#include <linux/config.h>
14#include <linux/kernel.h>
15#include <linux/init.h>
16#include <linux/bitops.h>
17#include <asm/types.h>
18#include <asm/page.h>
19#include <asm/prom.h>
20#include <asm/lmb.h>
21#include <asm/abs_addr.h>
22
23struct lmb lmb;
24
25#undef DEBUG
26
27void lmb_dump_all(void)
28{
29#ifdef DEBUG
30 unsigned long i;
31
32 udbg_printf("lmb_dump_all:\n");
33 udbg_printf(" memory.cnt = 0x%lx\n",
34 lmb.memory.cnt);
35 udbg_printf(" memory.size = 0x%lx\n",
36 lmb.memory.size);
37 for (i=0; i < lmb.memory.cnt ;i++) {
38 udbg_printf(" memory.region[0x%x].base = 0x%lx\n",
39 i, lmb.memory.region[i].base);
40 udbg_printf(" .size = 0x%lx\n",
41 lmb.memory.region[i].size);
42 }
43
44 udbg_printf("\n reserved.cnt = 0x%lx\n",
45 lmb.reserved.cnt);
46 udbg_printf(" reserved.size = 0x%lx\n",
47 lmb.reserved.size);
48 for (i=0; i < lmb.reserved.cnt ;i++) {
49 udbg_printf(" reserved.region[0x%x].base = 0x%lx\n",
50 i, lmb.reserved.region[i].base);
51 udbg_printf(" .size = 0x%lx\n",
52 lmb.reserved.region[i].size);
53 }
54#endif /* DEBUG */
55}
56
57static unsigned long __init
58lmb_addrs_overlap(unsigned long base1, unsigned long size1,
59 unsigned long base2, unsigned long size2)
60{
61 return ((base1 < (base2+size2)) && (base2 < (base1+size1)));
62}
63
64static long __init
65lmb_addrs_adjacent(unsigned long base1, unsigned long size1,
66 unsigned long base2, unsigned long size2)
67{
68 if (base2 == base1 + size1)
69 return 1;
70 else if (base1 == base2 + size2)
71 return -1;
72
73 return 0;
74}
75
76static long __init
77lmb_regions_adjacent(struct lmb_region *rgn, unsigned long r1, unsigned long r2)
78{
79 unsigned long base1 = rgn->region[r1].base;
80 unsigned long size1 = rgn->region[r1].size;
81 unsigned long base2 = rgn->region[r2].base;
82 unsigned long size2 = rgn->region[r2].size;
83
84 return lmb_addrs_adjacent(base1, size1, base2, size2);
85}
86
87/* Assumption: base addr of region 1 < base addr of region 2 */
88static void __init
89lmb_coalesce_regions(struct lmb_region *rgn, unsigned long r1, unsigned long r2)
90{
91 unsigned long i;
92
93 rgn->region[r1].size += rgn->region[r2].size;
94 for (i=r2; i < rgn->cnt-1; i++) {
95 rgn->region[i].base = rgn->region[i+1].base;
96 rgn->region[i].size = rgn->region[i+1].size;
97 }
98 rgn->cnt--;
99}
100
101/* This routine called with relocation disabled. */
102void __init
103lmb_init(void)
104{
105 /* Create a dummy zero size LMB which will get coalesced away later.
106 * This simplifies the lmb_add() code below...
107 */
108 lmb.memory.region[0].base = 0;
109 lmb.memory.region[0].size = 0;
110 lmb.memory.cnt = 1;
111
112 /* Ditto. */
113 lmb.reserved.region[0].base = 0;
114 lmb.reserved.region[0].size = 0;
115 lmb.reserved.cnt = 1;
116}
117
118/* This routine called with relocation disabled. */
119void __init
120lmb_analyze(void)
121{
122 int i;
123
124 lmb.memory.size = 0;
125
126 for (i = 0; i < lmb.memory.cnt; i++)
127 lmb.memory.size += lmb.memory.region[i].size;
128}
129
130/* This routine called with relocation disabled. */
131static long __init
132lmb_add_region(struct lmb_region *rgn, unsigned long base, unsigned long size)
133{
134 unsigned long i, coalesced = 0;
135 long adjacent;
136
137 /* First try and coalesce this LMB with another. */
138 for (i=0; i < rgn->cnt; i++) {
139 unsigned long rgnbase = rgn->region[i].base;
140 unsigned long rgnsize = rgn->region[i].size;
141
142 adjacent = lmb_addrs_adjacent(base,size,rgnbase,rgnsize);
143 if ( adjacent > 0 ) {
144 rgn->region[i].base -= size;
145 rgn->region[i].size += size;
146 coalesced++;
147 break;
148 }
149 else if ( adjacent < 0 ) {
150 rgn->region[i].size += size;
151 coalesced++;
152 break;
153 }
154 }
155
156 if ((i < rgn->cnt-1) && lmb_regions_adjacent(rgn, i, i+1) ) {
157 lmb_coalesce_regions(rgn, i, i+1);
158 coalesced++;
159 }
160
161 if ( coalesced ) {
162 return coalesced;
163 } else if ( rgn->cnt >= MAX_LMB_REGIONS ) {
164 return -1;
165 }
166
167 /* Couldn't coalesce the LMB, so add it to the sorted table. */
168 for (i=rgn->cnt-1; i >= 0; i--) {
169 if (base < rgn->region[i].base) {
170 rgn->region[i+1].base = rgn->region[i].base;
171 rgn->region[i+1].size = rgn->region[i].size;
172 } else {
173 rgn->region[i+1].base = base;
174 rgn->region[i+1].size = size;
175 break;
176 }
177 }
178 rgn->cnt++;
179
180 return 0;
181}
182
183/* This routine called with relocation disabled. */
184long __init
185lmb_add(unsigned long base, unsigned long size)
186{
187 struct lmb_region *_rgn = &(lmb.memory);
188
189 /* On pSeries LPAR systems, the first LMB is our RMO region. */
190 if ( base == 0 )
191 lmb.rmo_size = size;
192
193 return lmb_add_region(_rgn, base, size);
194
195}
196
197long __init
198lmb_reserve(unsigned long base, unsigned long size)
199{
200 struct lmb_region *_rgn = &(lmb.reserved);
201
202 return lmb_add_region(_rgn, base, size);
203}
204
205long __init
206lmb_overlaps_region(struct lmb_region *rgn, unsigned long base, unsigned long size)
207{
208 unsigned long i;
209
210 for (i=0; i < rgn->cnt; i++) {
211 unsigned long rgnbase = rgn->region[i].base;
212 unsigned long rgnsize = rgn->region[i].size;
213 if ( lmb_addrs_overlap(base,size,rgnbase,rgnsize) ) {
214 break;
215 }
216 }
217
218 return (i < rgn->cnt) ? i : -1;
219}
220
221unsigned long __init
222lmb_alloc(unsigned long size, unsigned long align)
223{
224 return lmb_alloc_base(size, align, LMB_ALLOC_ANYWHERE);
225}
226
227unsigned long __init
228lmb_alloc_base(unsigned long size, unsigned long align, unsigned long max_addr)
229{
230 long i, j;
231 unsigned long base = 0;
232
233 for (i=lmb.memory.cnt-1; i >= 0; i--) {
234 unsigned long lmbbase = lmb.memory.region[i].base;
235 unsigned long lmbsize = lmb.memory.region[i].size;
236
237 if ( max_addr == LMB_ALLOC_ANYWHERE )
238 base = _ALIGN_DOWN(lmbbase+lmbsize-size, align);
239 else if ( lmbbase < max_addr )
240 base = _ALIGN_DOWN(min(lmbbase+lmbsize,max_addr)-size, align);
241 else
242 continue;
243
244 while ( (lmbbase <= base) &&
245 ((j = lmb_overlaps_region(&lmb.reserved,base,size)) >= 0) ) {
246 base = _ALIGN_DOWN(lmb.reserved.region[j].base-size, align);
247 }
248
249 if ( (base != 0) && (lmbbase <= base) )
250 break;
251 }
252
253 if ( i < 0 )
254 return 0;
255
256 lmb_add_region(&lmb.reserved, base, size);
257
258 return base;
259}
260
261/* You must call lmb_analyze() before this. */
262unsigned long __init
263lmb_phys_mem_size(void)
264{
265 return lmb.memory.size;
266}
267
268unsigned long __init
269lmb_end_of_DRAM(void)
270{
271 int idx = lmb.memory.cnt - 1;
272
273 return (lmb.memory.region[idx].base + lmb.memory.region[idx].size);
274}
275
276/*
277 * Truncate the lmb list to memory_limit if it's set
278 * You must call lmb_analyze() after this.
279 */
280void __init lmb_enforce_memory_limit(void)
281{
282 extern unsigned long memory_limit;
283 unsigned long i, limit;
284
285 if (! memory_limit)
286 return;
287
288 limit = memory_limit;
289 for (i = 0; i < lmb.memory.cnt; i++) {
290 if (limit > lmb.memory.region[i].size) {
291 limit -= lmb.memory.region[i].size;
292 continue;
293 }
294
295 lmb.memory.region[i].size = limit;
296 lmb.memory.cnt = i + 1;
297 break;
298 }
299}
diff --git a/arch/ppc64/kernel/lparmap.c b/arch/ppc64/kernel/lparmap.c
deleted file mode 100644
index b81de286df5e..000000000000
--- a/arch/ppc64/kernel/lparmap.c
+++ /dev/null
@@ -1,31 +0,0 @@
1/*
2 * Copyright (C) 2005 Stephen Rothwell IBM Corp.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 */
9#include <asm/mmu.h>
10#include <asm/page.h>
11#include <asm/iSeries/LparMap.h>
12
13const struct LparMap __attribute__((__section__(".text"))) xLparMap = {
14 .xNumberEsids = HvEsidsToMap,
15 .xNumberRanges = HvRangesToMap,
16 .xSegmentTableOffs = STAB0_PAGE,
17
18 .xEsids = {
19 { .xKernelEsid = GET_ESID(KERNELBASE),
20 .xKernelVsid = KERNEL_VSID(KERNELBASE), },
21 { .xKernelEsid = GET_ESID(VMALLOCBASE),
22 .xKernelVsid = KERNEL_VSID(VMALLOCBASE), },
23 },
24
25 .xRanges = {
26 { .xPages = HvPagesToMap,
27 .xOffset = 0,
28 .xVPN = KERNEL_VSID(KERNELBASE) << (SID_SHIFT - PAGE_SHIFT),
29 },
30 },
31};
diff --git a/arch/ppc64/kernel/maple_pci.c b/arch/ppc64/kernel/maple_pci.c
deleted file mode 100644
index 1d297e0edfc0..000000000000
--- a/arch/ppc64/kernel/maple_pci.c
+++ /dev/null
@@ -1,521 +0,0 @@
1/*
2 * Copyright (C) 2004 Benjamin Herrenschmuidt (benh@kernel.crashing.org),
3 * IBM Corp.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version
8 * 2 of the License, or (at your option) any later version.
9 */
10
11#define DEBUG
12
13#include <linux/kernel.h>
14#include <linux/pci.h>
15#include <linux/delay.h>
16#include <linux/string.h>
17#include <linux/init.h>
18#include <linux/bootmem.h>
19
20#include <asm/sections.h>
21#include <asm/io.h>
22#include <asm/prom.h>
23#include <asm/pci-bridge.h>
24#include <asm/machdep.h>
25#include <asm/iommu.h>
26
27#include "pci.h"
28
29#ifdef DEBUG
30#define DBG(x...) printk(x)
31#else
32#define DBG(x...)
33#endif
34
35static struct pci_controller *u3_agp, *u3_ht;
36
37static int __init fixup_one_level_bus_range(struct device_node *node, int higher)
38{
39 for (; node != 0;node = node->sibling) {
40 int * bus_range;
41 unsigned int *class_code;
42 int len;
43
44 /* For PCI<->PCI bridges or CardBus bridges, we go down */
45 class_code = (unsigned int *) get_property(node, "class-code", NULL);
46 if (!class_code || ((*class_code >> 8) != PCI_CLASS_BRIDGE_PCI &&
47 (*class_code >> 8) != PCI_CLASS_BRIDGE_CARDBUS))
48 continue;
49 bus_range = (int *) get_property(node, "bus-range", &len);
50 if (bus_range != NULL && len > 2 * sizeof(int)) {
51 if (bus_range[1] > higher)
52 higher = bus_range[1];
53 }
54 higher = fixup_one_level_bus_range(node->child, higher);
55 }
56 return higher;
57}
58
59/* This routine fixes the "bus-range" property of all bridges in the
60 * system since they tend to have their "last" member wrong on macs
61 *
62 * Note that the bus numbers manipulated here are OF bus numbers, they
63 * are not Linux bus numbers.
64 */
65static void __init fixup_bus_range(struct device_node *bridge)
66{
67 int * bus_range;
68 int len;
69
70 /* Lookup the "bus-range" property for the hose */
71 bus_range = (int *) get_property(bridge, "bus-range", &len);
72 if (bus_range == NULL || len < 2 * sizeof(int)) {
73 printk(KERN_WARNING "Can't get bus-range for %s\n",
74 bridge->full_name);
75 return;
76 }
77 bus_range[1] = fixup_one_level_bus_range(bridge->child, bus_range[1]);
78}
79
80
81#define U3_AGP_CFA0(devfn, off) \
82 ((1 << (unsigned long)PCI_SLOT(dev_fn)) \
83 | (((unsigned long)PCI_FUNC(dev_fn)) << 8) \
84 | (((unsigned long)(off)) & 0xFCUL))
85
86#define U3_AGP_CFA1(bus, devfn, off) \
87 ((((unsigned long)(bus)) << 16) \
88 |(((unsigned long)(devfn)) << 8) \
89 |(((unsigned long)(off)) & 0xFCUL) \
90 |1UL)
91
92static unsigned long u3_agp_cfg_access(struct pci_controller* hose,
93 u8 bus, u8 dev_fn, u8 offset)
94{
95 unsigned int caddr;
96
97 if (bus == hose->first_busno) {
98 if (dev_fn < (11 << 3))
99 return 0;
100 caddr = U3_AGP_CFA0(dev_fn, offset);
101 } else
102 caddr = U3_AGP_CFA1(bus, dev_fn, offset);
103
104 /* Uninorth will return garbage if we don't read back the value ! */
105 do {
106 out_le32(hose->cfg_addr, caddr);
107 } while (in_le32(hose->cfg_addr) != caddr);
108
109 offset &= 0x07;
110 return ((unsigned long)hose->cfg_data) + offset;
111}
112
113static int u3_agp_read_config(struct pci_bus *bus, unsigned int devfn,
114 int offset, int len, u32 *val)
115{
116 struct pci_controller *hose;
117 unsigned long addr;
118
119 hose = pci_bus_to_host(bus);
120 if (hose == NULL)
121 return PCIBIOS_DEVICE_NOT_FOUND;
122
123 addr = u3_agp_cfg_access(hose, bus->number, devfn, offset);
124 if (!addr)
125 return PCIBIOS_DEVICE_NOT_FOUND;
126 /*
127 * Note: the caller has already checked that offset is
128 * suitably aligned and that len is 1, 2 or 4.
129 */
130 switch (len) {
131 case 1:
132 *val = in_8((u8 *)addr);
133 break;
134 case 2:
135 *val = in_le16((u16 *)addr);
136 break;
137 default:
138 *val = in_le32((u32 *)addr);
139 break;
140 }
141 return PCIBIOS_SUCCESSFUL;
142}
143
144static int u3_agp_write_config(struct pci_bus *bus, unsigned int devfn,
145 int offset, int len, u32 val)
146{
147 struct pci_controller *hose;
148 unsigned long addr;
149
150 hose = pci_bus_to_host(bus);
151 if (hose == NULL)
152 return PCIBIOS_DEVICE_NOT_FOUND;
153
154 addr = u3_agp_cfg_access(hose, bus->number, devfn, offset);
155 if (!addr)
156 return PCIBIOS_DEVICE_NOT_FOUND;
157 /*
158 * Note: the caller has already checked that offset is
159 * suitably aligned and that len is 1, 2 or 4.
160 */
161 switch (len) {
162 case 1:
163 out_8((u8 *)addr, val);
164 (void) in_8((u8 *)addr);
165 break;
166 case 2:
167 out_le16((u16 *)addr, val);
168 (void) in_le16((u16 *)addr);
169 break;
170 default:
171 out_le32((u32 *)addr, val);
172 (void) in_le32((u32 *)addr);
173 break;
174 }
175 return PCIBIOS_SUCCESSFUL;
176}
177
178static struct pci_ops u3_agp_pci_ops =
179{
180 u3_agp_read_config,
181 u3_agp_write_config
182};
183
184
185#define U3_HT_CFA0(devfn, off) \
186 ((((unsigned long)devfn) << 8) | offset)
187#define U3_HT_CFA1(bus, devfn, off) \
188 (U3_HT_CFA0(devfn, off) \
189 + (((unsigned long)bus) << 16) \
190 + 0x01000000UL)
191
192static unsigned long u3_ht_cfg_access(struct pci_controller* hose,
193 u8 bus, u8 devfn, u8 offset)
194{
195 if (bus == hose->first_busno) {
196 if (PCI_SLOT(devfn) == 0)
197 return 0;
198 return ((unsigned long)hose->cfg_data) + U3_HT_CFA0(devfn, offset);
199 } else
200 return ((unsigned long)hose->cfg_data) + U3_HT_CFA1(bus, devfn, offset);
201}
202
203static int u3_ht_read_config(struct pci_bus *bus, unsigned int devfn,
204 int offset, int len, u32 *val)
205{
206 struct pci_controller *hose;
207 unsigned long addr;
208
209 hose = pci_bus_to_host(bus);
210 if (hose == NULL)
211 return PCIBIOS_DEVICE_NOT_FOUND;
212
213 addr = u3_ht_cfg_access(hose, bus->number, devfn, offset);
214 if (!addr)
215 return PCIBIOS_DEVICE_NOT_FOUND;
216
217 /*
218 * Note: the caller has already checked that offset is
219 * suitably aligned and that len is 1, 2 or 4.
220 */
221 switch (len) {
222 case 1:
223 *val = in_8((u8 *)addr);
224 break;
225 case 2:
226 *val = in_le16((u16 *)addr);
227 break;
228 default:
229 *val = in_le32((u32 *)addr);
230 break;
231 }
232 return PCIBIOS_SUCCESSFUL;
233}
234
235static int u3_ht_write_config(struct pci_bus *bus, unsigned int devfn,
236 int offset, int len, u32 val)
237{
238 struct pci_controller *hose;
239 unsigned long addr;
240
241 hose = pci_bus_to_host(bus);
242 if (hose == NULL)
243 return PCIBIOS_DEVICE_NOT_FOUND;
244
245 addr = u3_ht_cfg_access(hose, bus->number, devfn, offset);
246 if (!addr)
247 return PCIBIOS_DEVICE_NOT_FOUND;
248 /*
249 * Note: the caller has already checked that offset is
250 * suitably aligned and that len is 1, 2 or 4.
251 */
252 switch (len) {
253 case 1:
254 out_8((u8 *)addr, val);
255 (void) in_8((u8 *)addr);
256 break;
257 case 2:
258 out_le16((u16 *)addr, val);
259 (void) in_le16((u16 *)addr);
260 break;
261 default:
262 out_le32((u32 *)addr, val);
263 (void) in_le32((u32 *)addr);
264 break;
265 }
266 return PCIBIOS_SUCCESSFUL;
267}
268
269static struct pci_ops u3_ht_pci_ops =
270{
271 u3_ht_read_config,
272 u3_ht_write_config
273};
274
275static void __init setup_u3_agp(struct pci_controller* hose)
276{
277 /* On G5, we move AGP up to high bus number so we don't need
278 * to reassign bus numbers for HT. If we ever have P2P bridges
279 * on AGP, we'll have to move pci_assign_all_busses to the
280 * pci_controller structure so we enable it for AGP and not for
281 * HT childs.
282 * We hard code the address because of the different size of
283 * the reg address cell, we shall fix that by killing struct
284 * reg_property and using some accessor functions instead
285 */
286 hose->first_busno = 0xf0;
287 hose->last_busno = 0xff;
288 hose->ops = &u3_agp_pci_ops;
289 hose->cfg_addr = ioremap(0xf0000000 + 0x800000, 0x1000);
290 hose->cfg_data = ioremap(0xf0000000 + 0xc00000, 0x1000);
291
292 u3_agp = hose;
293}
294
295static void __init setup_u3_ht(struct pci_controller* hose)
296{
297 hose->ops = &u3_ht_pci_ops;
298
299 /* We hard code the address because of the different size of
300 * the reg address cell, we shall fix that by killing struct
301 * reg_property and using some accessor functions instead
302 */
303 hose->cfg_data = (volatile unsigned char *)ioremap(0xf2000000, 0x02000000);
304
305 hose->first_busno = 0;
306 hose->last_busno = 0xef;
307
308 u3_ht = hose;
309}
310
311static int __init add_bridge(struct device_node *dev)
312{
313 int len;
314 struct pci_controller *hose;
315 char* disp_name;
316 int *bus_range;
317 int primary = 1;
318 struct property *of_prop;
319
320 DBG("Adding PCI host bridge %s\n", dev->full_name);
321
322 bus_range = (int *) get_property(dev, "bus-range", &len);
323 if (bus_range == NULL || len < 2 * sizeof(int)) {
324 printk(KERN_WARNING "Can't get bus-range for %s, assume bus 0\n",
325 dev->full_name);
326 }
327
328 hose = alloc_bootmem(sizeof(struct pci_controller));
329 if (hose == NULL)
330 return -ENOMEM;
331 pci_setup_pci_controller(hose);
332
333 hose->arch_data = dev;
334 hose->first_busno = bus_range ? bus_range[0] : 0;
335 hose->last_busno = bus_range ? bus_range[1] : 0xff;
336
337 of_prop = alloc_bootmem(sizeof(struct property) +
338 sizeof(hose->global_number));
339 if (of_prop) {
340 memset(of_prop, 0, sizeof(struct property));
341 of_prop->name = "linux,pci-domain";
342 of_prop->length = sizeof(hose->global_number);
343 of_prop->value = (unsigned char *)&of_prop[1];
344 memcpy(of_prop->value, &hose->global_number, sizeof(hose->global_number));
345 prom_add_property(dev, of_prop);
346 }
347
348 disp_name = NULL;
349 if (device_is_compatible(dev, "u3-agp")) {
350 setup_u3_agp(hose);
351 disp_name = "U3-AGP";
352 primary = 0;
353 } else if (device_is_compatible(dev, "u3-ht")) {
354 setup_u3_ht(hose);
355 disp_name = "U3-HT";
356 primary = 1;
357 }
358 printk(KERN_INFO "Found %s PCI host bridge. Firmware bus number: %d->%d\n",
359 disp_name, hose->first_busno, hose->last_busno);
360
361 /* Interpret the "ranges" property */
362 /* This also maps the I/O region and sets isa_io/mem_base */
363 pci_process_bridge_OF_ranges(hose, dev);
364 pci_setup_phb_io(hose, primary);
365
366 /* Fixup "bus-range" OF property */
367 fixup_bus_range(dev);
368
369 return 0;
370}
371
372
373void __init maple_pcibios_fixup(void)
374{
375 struct pci_dev *dev = NULL;
376
377 DBG(" -> maple_pcibios_fixup\n");
378
379 for_each_pci_dev(dev)
380 pci_read_irq_line(dev);
381
382 /* Do the mapping of the IO space */
383 phbs_remap_io();
384
385 DBG(" <- maple_pcibios_fixup\n");
386}
387
388static void __init maple_fixup_phb_resources(void)
389{
390 struct pci_controller *hose, *tmp;
391
392 list_for_each_entry_safe(hose, tmp, &hose_list, list_node) {
393 unsigned long offset = (unsigned long)hose->io_base_virt - pci_io_base;
394 hose->io_resource.start += offset;
395 hose->io_resource.end += offset;
396 printk(KERN_INFO "PCI Host %d, io start: %lx; io end: %lx\n",
397 hose->global_number,
398 hose->io_resource.start, hose->io_resource.end);
399 }
400}
401
402void __init maple_pci_init(void)
403{
404 struct device_node *np, *root;
405 struct device_node *ht = NULL;
406
407 /* Probe root PCI hosts, that is on U3 the AGP host and the
408 * HyperTransport host. That one is actually "kept" around
409 * and actually added last as it's resource management relies
410 * on the AGP resources to have been setup first
411 */
412 root = of_find_node_by_path("/");
413 if (root == NULL) {
414 printk(KERN_CRIT "maple_find_bridges: can't find root of device tree\n");
415 return;
416 }
417 for (np = NULL; (np = of_get_next_child(root, np)) != NULL;) {
418 if (np->name == NULL)
419 continue;
420 if (strcmp(np->name, "pci") == 0) {
421 if (add_bridge(np) == 0)
422 of_node_get(np);
423 }
424 if (strcmp(np->name, "ht") == 0) {
425 of_node_get(np);
426 ht = np;
427 }
428 }
429 of_node_put(root);
430
431 /* Now setup the HyperTransport host if we found any
432 */
433 if (ht && add_bridge(ht) != 0)
434 of_node_put(ht);
435
436 /* Fixup the IO resources on our host bridges as the common code
437 * does it only for childs of the host bridges
438 */
439 maple_fixup_phb_resources();
440
441 /* Setup the linkage between OF nodes and PHBs */
442 pci_devs_phb_init();
443
444 /* Fixup the PCI<->OF mapping for U3 AGP due to bus renumbering. We
445 * assume there is no P2P bridge on the AGP bus, which should be a
446 * safe assumptions hopefully.
447 */
448 if (u3_agp) {
449 struct device_node *np = u3_agp->arch_data;
450 PCI_DN(np)->busno = 0xf0;
451 for (np = np->child; np; np = np->sibling)
452 PCI_DN(np)->busno = 0xf0;
453 }
454
455 /* Tell pci.c to use the common resource allocation mecanism */
456 pci_probe_only = 0;
457
458 /* Allow all IO */
459 io_page_mask = -1;
460}
461
462int maple_pci_get_legacy_ide_irq(struct pci_dev *pdev, int channel)
463{
464 struct device_node *np;
465 int irq = channel ? 15 : 14;
466
467 if (pdev->vendor != PCI_VENDOR_ID_AMD ||
468 pdev->device != PCI_DEVICE_ID_AMD_8111_IDE)
469 return irq;
470
471 np = pci_device_to_OF_node(pdev);
472 if (np == NULL)
473 return irq;
474 if (np->n_intrs < 2)
475 return irq;
476 return np->intrs[channel & 0x1].line;
477}
478
479/* XXX: To remove once all firmwares are ok */
480static void fixup_maple_ide(struct pci_dev* dev)
481{
482#if 0 /* Enable this to enable IDE port 0 */
483 {
484 u8 v;
485
486 pci_read_config_byte(dev, 0x40, &v);
487 v |= 2;
488 pci_write_config_byte(dev, 0x40, v);
489 }
490#endif
491#if 0 /* fix bus master base */
492 pci_write_config_dword(dev, 0x20, 0xcc01);
493 printk("old ide resource: %lx -> %lx \n",
494 dev->resource[4].start, dev->resource[4].end);
495 dev->resource[4].start = 0xcc00;
496 dev->resource[4].end = 0xcc10;
497#endif
498#if 1 /* Enable this to fixup IDE sense/polarity of irqs in IO-APICs */
499 {
500 struct pci_dev *apicdev;
501 u32 v;
502
503 apicdev = pci_get_slot (dev->bus, PCI_DEVFN(5,0));
504 if (apicdev == NULL)
505 printk("IDE Fixup IRQ: Can't find IO-APIC !\n");
506 else {
507 pci_write_config_byte(apicdev, 0xf2, 0x10 + 2*14);
508 pci_read_config_dword(apicdev, 0xf4, &v);
509 v &= ~0x00000022;
510 pci_write_config_dword(apicdev, 0xf4, v);
511 pci_write_config_byte(apicdev, 0xf2, 0x10 + 2*15);
512 pci_read_config_dword(apicdev, 0xf4, &v);
513 v &= ~0x00000022;
514 pci_write_config_dword(apicdev, 0xf4, v);
515 pci_dev_put(apicdev);
516 }
517 }
518#endif
519}
520DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8111_IDE,
521 fixup_maple_ide);
diff --git a/arch/ppc64/kernel/maple_setup.c b/arch/ppc64/kernel/maple_setup.c
deleted file mode 100644
index fc0567498a3a..000000000000
--- a/arch/ppc64/kernel/maple_setup.c
+++ /dev/null
@@ -1,300 +0,0 @@
1/*
2 * arch/ppc64/kernel/maple_setup.c
3 *
4 * (c) Copyright 2004 Benjamin Herrenschmidt (benh@kernel.crashing.org),
5 * IBM Corp.
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
11 *
12 */
13
14#define DEBUG
15
16#include <linux/config.h>
17#include <linux/init.h>
18#include <linux/errno.h>
19#include <linux/sched.h>
20#include <linux/kernel.h>
21#include <linux/mm.h>
22#include <linux/stddef.h>
23#include <linux/unistd.h>
24#include <linux/ptrace.h>
25#include <linux/slab.h>
26#include <linux/user.h>
27#include <linux/a.out.h>
28#include <linux/tty.h>
29#include <linux/string.h>
30#include <linux/delay.h>
31#include <linux/ioport.h>
32#include <linux/major.h>
33#include <linux/initrd.h>
34#include <linux/vt_kern.h>
35#include <linux/console.h>
36#include <linux/ide.h>
37#include <linux/pci.h>
38#include <linux/adb.h>
39#include <linux/cuda.h>
40#include <linux/pmu.h>
41#include <linux/irq.h>
42#include <linux/seq_file.h>
43#include <linux/root_dev.h>
44#include <linux/serial.h>
45#include <linux/smp.h>
46
47#include <asm/processor.h>
48#include <asm/sections.h>
49#include <asm/prom.h>
50#include <asm/system.h>
51#include <asm/pgtable.h>
52#include <asm/bitops.h>
53#include <asm/io.h>
54#include <asm/pci-bridge.h>
55#include <asm/iommu.h>
56#include <asm/machdep.h>
57#include <asm/dma.h>
58#include <asm/cputable.h>
59#include <asm/time.h>
60#include <asm/of_device.h>
61#include <asm/lmb.h>
62
63#include "mpic.h"
64
65#ifdef DEBUG
66#define DBG(fmt...) udbg_printf(fmt)
67#else
68#define DBG(fmt...)
69#endif
70
71extern int maple_set_rtc_time(struct rtc_time *tm);
72extern void maple_get_rtc_time(struct rtc_time *tm);
73extern void maple_get_boot_time(struct rtc_time *tm);
74extern void maple_calibrate_decr(void);
75extern void maple_pci_init(void);
76extern void maple_pcibios_fixup(void);
77extern int maple_pci_get_legacy_ide_irq(struct pci_dev *dev, int channel);
78extern void generic_find_legacy_serial_ports(u64 *physport,
79 unsigned int *default_speed);
80
81static void maple_restart(char *cmd)
82{
83 unsigned int maple_nvram_base;
84 unsigned int maple_nvram_offset;
85 unsigned int maple_nvram_command;
86 struct device_node *rtcs;
87
88 /* find NVRAM device */
89 rtcs = find_compatible_devices("nvram", "AMD8111");
90 if (rtcs && rtcs->addrs) {
91 maple_nvram_base = rtcs->addrs[0].address;
92 } else {
93 printk(KERN_EMERG "Maple: Unable to find NVRAM\n");
94 printk(KERN_EMERG "Maple: Manual Restart Required\n");
95 return;
96 }
97
98 /* find service processor device */
99 rtcs = find_devices("service-processor");
100 if (!rtcs) {
101 printk(KERN_EMERG "Maple: Unable to find Service Processor\n");
102 printk(KERN_EMERG "Maple: Manual Restart Required\n");
103 return;
104 }
105 maple_nvram_offset = *(unsigned int*) get_property(rtcs,
106 "restart-addr", NULL);
107 maple_nvram_command = *(unsigned int*) get_property(rtcs,
108 "restart-value", NULL);
109
110 /* send command */
111 outb_p(maple_nvram_command, maple_nvram_base + maple_nvram_offset);
112 for (;;) ;
113}
114
115static void maple_power_off(void)
116{
117 unsigned int maple_nvram_base;
118 unsigned int maple_nvram_offset;
119 unsigned int maple_nvram_command;
120 struct device_node *rtcs;
121
122 /* find NVRAM device */
123 rtcs = find_compatible_devices("nvram", "AMD8111");
124 if (rtcs && rtcs->addrs) {
125 maple_nvram_base = rtcs->addrs[0].address;
126 } else {
127 printk(KERN_EMERG "Maple: Unable to find NVRAM\n");
128 printk(KERN_EMERG "Maple: Manual Power-Down Required\n");
129 return;
130 }
131
132 /* find service processor device */
133 rtcs = find_devices("service-processor");
134 if (!rtcs) {
135 printk(KERN_EMERG "Maple: Unable to find Service Processor\n");
136 printk(KERN_EMERG "Maple: Manual Power-Down Required\n");
137 return;
138 }
139 maple_nvram_offset = *(unsigned int*) get_property(rtcs,
140 "power-off-addr", NULL);
141 maple_nvram_command = *(unsigned int*) get_property(rtcs,
142 "power-off-value", NULL);
143
144 /* send command */
145 outb_p(maple_nvram_command, maple_nvram_base + maple_nvram_offset);
146 for (;;) ;
147}
148
149static void maple_halt(void)
150{
151 maple_power_off();
152}
153
154#ifdef CONFIG_SMP
155struct smp_ops_t maple_smp_ops = {
156 .probe = smp_mpic_probe,
157 .message_pass = smp_mpic_message_pass,
158 .kick_cpu = smp_generic_kick_cpu,
159 .setup_cpu = smp_mpic_setup_cpu,
160 .give_timebase = smp_generic_give_timebase,
161 .take_timebase = smp_generic_take_timebase,
162};
163#endif /* CONFIG_SMP */
164
165void __init maple_setup_arch(void)
166{
167 /* init to some ~sane value until calibrate_delay() runs */
168 loops_per_jiffy = 50000000;
169
170 /* Setup SMP callback */
171#ifdef CONFIG_SMP
172 smp_ops = &maple_smp_ops;
173#endif
174 /* Lookup PCI hosts */
175 maple_pci_init();
176
177#ifdef CONFIG_DUMMY_CONSOLE
178 conswitchp = &dummy_con;
179#endif
180
181 printk(KERN_INFO "Using native/NAP idle loop\n");
182}
183
184/*
185 * Early initialization.
186 */
187static void __init maple_init_early(void)
188{
189 unsigned int default_speed;
190 u64 physport;
191
192 DBG(" -> maple_init_early\n");
193
194 /* Initialize hash table, from now on, we can take hash faults
195 * and call ioremap
196 */
197 hpte_init_native();
198
199 /* Find the serial port */
200 generic_find_legacy_serial_ports(&physport, &default_speed);
201
202 DBG("phys port addr: %lx\n", (long)physport);
203
204 if (physport) {
205 void *comport;
206 /* Map the uart for udbg. */
207 comport = (void *)ioremap(physport, 16);
208 udbg_init_uart(comport, default_speed);
209
210 DBG("Hello World !\n");
211 }
212
213 /* Setup interrupt mapping options */
214 ppc64_interrupt_controller = IC_OPEN_PIC;
215
216 iommu_init_early_u3();
217
218 DBG(" <- maple_init_early\n");
219}
220
221
222static __init void maple_init_IRQ(void)
223{
224 struct device_node *root;
225 unsigned int *opprop;
226 unsigned long opic_addr;
227 struct mpic *mpic;
228 unsigned char senses[128];
229 int n;
230
231 DBG(" -> maple_init_IRQ\n");
232
233 /* XXX: Non standard, replace that with a proper openpic/mpic node
234 * in the device-tree. Find the Open PIC if present */
235 root = of_find_node_by_path("/");
236 opprop = (unsigned int *) get_property(root,
237 "platform-open-pic", NULL);
238 if (opprop == 0)
239 panic("OpenPIC not found !\n");
240
241 n = prom_n_addr_cells(root);
242 for (opic_addr = 0; n > 0; --n)
243 opic_addr = (opic_addr << 32) + *opprop++;
244 of_node_put(root);
245
246 /* Obtain sense values from device-tree */
247 prom_get_irq_senses(senses, 0, 128);
248
249 mpic = mpic_alloc(opic_addr,
250 MPIC_PRIMARY | MPIC_BIG_ENDIAN |
251 MPIC_BROKEN_U3 | MPIC_WANTS_RESET,
252 0, 0, 128, 128, senses, 128, "U3-MPIC");
253 BUG_ON(mpic == NULL);
254 mpic_init(mpic);
255
256 DBG(" <- maple_init_IRQ\n");
257}
258
259static void __init maple_progress(char *s, unsigned short hex)
260{
261 printk("*** %04x : %s\n", hex, s ? s : "");
262}
263
264
265/*
266 * Called very early, MMU is off, device-tree isn't unflattened
267 */
268static int __init maple_probe(int platform)
269{
270 if (platform != PLATFORM_MAPLE)
271 return 0;
272 /*
273 * On U3, the DART (iommu) must be allocated now since it
274 * has an impact on htab_initialize (due to the large page it
275 * occupies having to be broken up so the DART itself is not
276 * part of the cacheable linar mapping
277 */
278 alloc_u3_dart_table();
279
280 return 1;
281}
282
283struct machdep_calls __initdata maple_md = {
284 .probe = maple_probe,
285 .setup_arch = maple_setup_arch,
286 .init_early = maple_init_early,
287 .init_IRQ = maple_init_IRQ,
288 .get_irq = mpic_get_irq,
289 .pcibios_fixup = maple_pcibios_fixup,
290 .pci_get_legacy_ide_irq = maple_pci_get_legacy_ide_irq,
291 .restart = maple_restart,
292 .power_off = maple_power_off,
293 .halt = maple_halt,
294 .get_boot_time = maple_get_boot_time,
295 .set_rtc_time = maple_set_rtc_time,
296 .get_rtc_time = maple_get_rtc_time,
297 .calibrate_decr = generic_calibrate_decr,
298 .progress = maple_progress,
299 .idle_loop = native_idle,
300};
diff --git a/arch/ppc64/kernel/maple_time.c b/arch/ppc64/kernel/maple_time.c
deleted file mode 100644
index d65210abcd03..000000000000
--- a/arch/ppc64/kernel/maple_time.c
+++ /dev/null
@@ -1,175 +0,0 @@
1/*
2 * arch/ppc64/kernel/maple_time.c
3 *
4 * (c) Copyright 2004 Benjamin Herrenschmidt (benh@kernel.crashing.org),
5 * IBM Corp.
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
11 *
12 */
13
14#undef DEBUG
15
16#include <linux/config.h>
17#include <linux/errno.h>
18#include <linux/sched.h>
19#include <linux/kernel.h>
20#include <linux/param.h>
21#include <linux/string.h>
22#include <linux/mm.h>
23#include <linux/init.h>
24#include <linux/time.h>
25#include <linux/adb.h>
26#include <linux/pmu.h>
27#include <linux/interrupt.h>
28#include <linux/mc146818rtc.h>
29#include <linux/bcd.h>
30
31#include <asm/sections.h>
32#include <asm/prom.h>
33#include <asm/system.h>
34#include <asm/io.h>
35#include <asm/pgtable.h>
36#include <asm/machdep.h>
37#include <asm/time.h>
38
39#ifdef DEBUG
40#define DBG(x...) printk(x)
41#else
42#define DBG(x...)
43#endif
44
45extern void GregorianDay(struct rtc_time * tm);
46
47static int maple_rtc_addr;
48
49static int maple_clock_read(int addr)
50{
51 outb_p(addr, maple_rtc_addr);
52 return inb_p(maple_rtc_addr+1);
53}
54
55static void maple_clock_write(unsigned long val, int addr)
56{
57 outb_p(addr, maple_rtc_addr);
58 outb_p(val, maple_rtc_addr+1);
59}
60
61void maple_get_rtc_time(struct rtc_time *tm)
62{
63 int uip, i;
64
65 /* The Linux interpretation of the CMOS clock register contents:
66 * When the Update-In-Progress (UIP) flag goes from 1 to 0, the
67 * RTC registers show the second which has precisely just started.
68 * Let's hope other operating systems interpret the RTC the same way.
69 */
70
71 /* Since the UIP flag is set for about 2.2 ms and the clock
72 * is typically written with a precision of 1 jiffy, trying
73 * to obtain a precision better than a few milliseconds is
74 * an illusion. Only consistency is interesting, this also
75 * allows to use the routine for /dev/rtc without a potential
76 * 1 second kernel busy loop triggered by any reader of /dev/rtc.
77 */
78
79 for (i = 0; i<1000000; i++) {
80 uip = maple_clock_read(RTC_FREQ_SELECT);
81 tm->tm_sec = maple_clock_read(RTC_SECONDS);
82 tm->tm_min = maple_clock_read(RTC_MINUTES);
83 tm->tm_hour = maple_clock_read(RTC_HOURS);
84 tm->tm_mday = maple_clock_read(RTC_DAY_OF_MONTH);
85 tm->tm_mon = maple_clock_read(RTC_MONTH);
86 tm->tm_year = maple_clock_read(RTC_YEAR);
87 uip |= maple_clock_read(RTC_FREQ_SELECT);
88 if ((uip & RTC_UIP)==0)
89 break;
90 }
91
92 if (!(maple_clock_read(RTC_CONTROL) & RTC_DM_BINARY)
93 || RTC_ALWAYS_BCD) {
94 BCD_TO_BIN(tm->tm_sec);
95 BCD_TO_BIN(tm->tm_min);
96 BCD_TO_BIN(tm->tm_hour);
97 BCD_TO_BIN(tm->tm_mday);
98 BCD_TO_BIN(tm->tm_mon);
99 BCD_TO_BIN(tm->tm_year);
100 }
101 if ((tm->tm_year + 1900) < 1970)
102 tm->tm_year += 100;
103
104 GregorianDay(tm);
105}
106
107int maple_set_rtc_time(struct rtc_time *tm)
108{
109 unsigned char save_control, save_freq_select;
110 int sec, min, hour, mon, mday, year;
111
112 spin_lock(&rtc_lock);
113
114 save_control = maple_clock_read(RTC_CONTROL); /* tell the clock it's being set */
115
116 maple_clock_write((save_control|RTC_SET), RTC_CONTROL);
117
118 save_freq_select = maple_clock_read(RTC_FREQ_SELECT); /* stop and reset prescaler */
119
120 maple_clock_write((save_freq_select|RTC_DIV_RESET2), RTC_FREQ_SELECT);
121
122 sec = tm->tm_sec;
123 min = tm->tm_min;
124 hour = tm->tm_hour;
125 mon = tm->tm_mon;
126 mday = tm->tm_mday;
127 year = tm->tm_year;
128
129 if (!(save_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD) {
130 BIN_TO_BCD(sec);
131 BIN_TO_BCD(min);
132 BIN_TO_BCD(hour);
133 BIN_TO_BCD(mon);
134 BIN_TO_BCD(mday);
135 BIN_TO_BCD(year);
136 }
137 maple_clock_write(sec, RTC_SECONDS);
138 maple_clock_write(min, RTC_MINUTES);
139 maple_clock_write(hour, RTC_HOURS);
140 maple_clock_write(mon, RTC_MONTH);
141 maple_clock_write(mday, RTC_DAY_OF_MONTH);
142 maple_clock_write(year, RTC_YEAR);
143
144 /* The following flags have to be released exactly in this order,
145 * otherwise the DS12887 (popular MC146818A clone with integrated
146 * battery and quartz) will not reset the oscillator and will not
147 * update precisely 500 ms later. You won't find this mentioned in
148 * the Dallas Semiconductor data sheets, but who believes data
149 * sheets anyway ... -- Markus Kuhn
150 */
151 maple_clock_write(save_control, RTC_CONTROL);
152 maple_clock_write(save_freq_select, RTC_FREQ_SELECT);
153
154 spin_unlock(&rtc_lock);
155
156 return 0;
157}
158
159void __init maple_get_boot_time(struct rtc_time *tm)
160{
161 struct device_node *rtcs;
162
163 rtcs = find_compatible_devices("rtc", "pnpPNP,b00");
164 if (rtcs && rtcs->addrs) {
165 maple_rtc_addr = rtcs->addrs[0].address;
166 printk(KERN_INFO "Maple: Found RTC at 0x%x\n", maple_rtc_addr);
167 } else {
168 maple_rtc_addr = RTC_PORT(0); /* legacy address */
169 printk(KERN_INFO "Maple: No device node for RTC, assuming "
170 "legacy address (0x%x)\n", maple_rtc_addr);
171 }
172
173 maple_get_rtc_time(tm);
174}
175
diff --git a/arch/ppc64/kernel/mf.c b/arch/ppc64/kernel/mf.c
deleted file mode 100644
index ef4a338ebd01..000000000000
--- a/arch/ppc64/kernel/mf.c
+++ /dev/null
@@ -1,1281 +0,0 @@
1/*
2 * mf.c
3 * Copyright (C) 2001 Troy D. Armstrong IBM Corporation
4 * Copyright (C) 2004-2005 Stephen Rothwell IBM Corporation
5 *
6 * This modules exists as an interface between a Linux secondary partition
7 * running on an iSeries and the primary partition's Virtual Service
8 * Processor (VSP) object. The VSP has final authority over powering on/off
9 * all partitions in the iSeries. It also provides miscellaneous low-level
10 * machine facility type operations.
11 *
12 *
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License as published by
15 * the Free Software Foundation; either version 2 of the License, or
16 * (at your option) any later version.
17 *
18 * This program is distributed in the hope that it will be useful,
19 * but WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 * GNU General Public License for more details.
22 *
23 * You should have received a copy of the GNU General Public License
24 * along with this program; if not, write to the Free Software
25 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
26 */
27
28#include <linux/types.h>
29#include <linux/errno.h>
30#include <linux/kernel.h>
31#include <linux/init.h>
32#include <linux/completion.h>
33#include <linux/delay.h>
34#include <linux/dma-mapping.h>
35#include <linux/bcd.h>
36
37#include <asm/time.h>
38#include <asm/uaccess.h>
39#include <asm/paca.h>
40#include <asm/iSeries/vio.h>
41#include <asm/iSeries/mf.h>
42#include <asm/iSeries/HvLpConfig.h>
43#include <asm/iSeries/ItLpQueue.h>
44
45/*
46 * This is the structure layout for the Machine Facilites LPAR event
47 * flows.
48 */
49struct vsp_cmd_data {
50 u64 token;
51 u16 cmd;
52 HvLpIndex lp_index;
53 u8 result_code;
54 u32 reserved;
55 union {
56 u64 state; /* GetStateOut */
57 u64 ipl_type; /* GetIplTypeOut, Function02SelectIplTypeIn */
58 u64 ipl_mode; /* GetIplModeOut, Function02SelectIplModeIn */
59 u64 page[4]; /* GetSrcHistoryIn */
60 u64 flag; /* GetAutoIplWhenPrimaryIplsOut,
61 SetAutoIplWhenPrimaryIplsIn,
62 WhiteButtonPowerOffIn,
63 Function08FastPowerOffIn,
64 IsSpcnRackPowerIncompleteOut */
65 struct {
66 u64 token;
67 u64 address_type;
68 u64 side;
69 u32 length;
70 u32 offset;
71 } kern; /* SetKernelImageIn, GetKernelImageIn,
72 SetKernelCmdLineIn, GetKernelCmdLineIn */
73 u32 length_out; /* GetKernelImageOut, GetKernelCmdLineOut */
74 u8 reserved[80];
75 } sub_data;
76};
77
78struct vsp_rsp_data {
79 struct completion com;
80 struct vsp_cmd_data *response;
81};
82
83struct alloc_data {
84 u16 size;
85 u16 type;
86 u32 count;
87 u16 reserved1;
88 u8 reserved2;
89 HvLpIndex target_lp;
90};
91
92struct ce_msg_data;
93
94typedef void (*ce_msg_comp_hdlr)(void *token, struct ce_msg_data *vsp_cmd_rsp);
95
96struct ce_msg_comp_data {
97 ce_msg_comp_hdlr handler;
98 void *token;
99};
100
101struct ce_msg_data {
102 u8 ce_msg[12];
103 char reserved[4];
104 struct ce_msg_comp_data *completion;
105};
106
107struct io_mf_lp_event {
108 struct HvLpEvent hp_lp_event;
109 u16 subtype_result_code;
110 u16 reserved1;
111 u32 reserved2;
112 union {
113 struct alloc_data alloc;
114 struct ce_msg_data ce_msg;
115 struct vsp_cmd_data vsp_cmd;
116 } data;
117};
118
119#define subtype_data(a, b, c, d) \
120 (((a) << 24) + ((b) << 16) + ((c) << 8) + (d))
121
122/*
123 * All outgoing event traffic is kept on a FIFO queue. The first
124 * pointer points to the one that is outstanding, and all new
125 * requests get stuck on the end. Also, we keep a certain number of
126 * preallocated pending events so that we can operate very early in
127 * the boot up sequence (before kmalloc is ready).
128 */
129struct pending_event {
130 struct pending_event *next;
131 struct io_mf_lp_event event;
132 MFCompleteHandler hdlr;
133 char dma_data[72];
134 unsigned dma_data_length;
135 unsigned remote_address;
136};
137static spinlock_t pending_event_spinlock;
138static struct pending_event *pending_event_head;
139static struct pending_event *pending_event_tail;
140static struct pending_event *pending_event_avail;
141static struct pending_event pending_event_prealloc[16];
142
143/*
144 * Put a pending event onto the available queue, so it can get reused.
145 * Attention! You must have the pending_event_spinlock before calling!
146 */
147static void free_pending_event(struct pending_event *ev)
148{
149 if (ev != NULL) {
150 ev->next = pending_event_avail;
151 pending_event_avail = ev;
152 }
153}
154
155/*
156 * Enqueue the outbound event onto the stack. If the queue was
157 * empty to begin with, we must also issue it via the Hypervisor
158 * interface. There is a section of code below that will touch
159 * the first stack pointer without the protection of the pending_event_spinlock.
160 * This is OK, because we know that nobody else will be modifying
161 * the first pointer when we do this.
162 */
163static int signal_event(struct pending_event *ev)
164{
165 int rc = 0;
166 unsigned long flags;
167 int go = 1;
168 struct pending_event *ev1;
169 HvLpEvent_Rc hv_rc;
170
171 /* enqueue the event */
172 if (ev != NULL) {
173 ev->next = NULL;
174 spin_lock_irqsave(&pending_event_spinlock, flags);
175 if (pending_event_head == NULL)
176 pending_event_head = ev;
177 else {
178 go = 0;
179 pending_event_tail->next = ev;
180 }
181 pending_event_tail = ev;
182 spin_unlock_irqrestore(&pending_event_spinlock, flags);
183 }
184
185 /* send the event */
186 while (go) {
187 go = 0;
188
189 /* any DMA data to send beforehand? */
190 if (pending_event_head->dma_data_length > 0)
191 HvCallEvent_dmaToSp(pending_event_head->dma_data,
192 pending_event_head->remote_address,
193 pending_event_head->dma_data_length,
194 HvLpDma_Direction_LocalToRemote);
195
196 hv_rc = HvCallEvent_signalLpEvent(
197 &pending_event_head->event.hp_lp_event);
198 if (hv_rc != HvLpEvent_Rc_Good) {
199 printk(KERN_ERR "mf.c: HvCallEvent_signalLpEvent() "
200 "failed with %d\n", (int)hv_rc);
201
202 spin_lock_irqsave(&pending_event_spinlock, flags);
203 ev1 = pending_event_head;
204 pending_event_head = pending_event_head->next;
205 if (pending_event_head != NULL)
206 go = 1;
207 spin_unlock_irqrestore(&pending_event_spinlock, flags);
208
209 if (ev1 == ev)
210 rc = -EIO;
211 else if (ev1->hdlr != NULL)
212 (*ev1->hdlr)((void *)ev1->event.hp_lp_event.xCorrelationToken, -EIO);
213
214 spin_lock_irqsave(&pending_event_spinlock, flags);
215 free_pending_event(ev1);
216 spin_unlock_irqrestore(&pending_event_spinlock, flags);
217 }
218 }
219
220 return rc;
221}
222
223/*
224 * Allocate a new pending_event structure, and initialize it.
225 */
226static struct pending_event *new_pending_event(void)
227{
228 struct pending_event *ev = NULL;
229 HvLpIndex primary_lp = HvLpConfig_getPrimaryLpIndex();
230 unsigned long flags;
231 struct HvLpEvent *hev;
232
233 spin_lock_irqsave(&pending_event_spinlock, flags);
234 if (pending_event_avail != NULL) {
235 ev = pending_event_avail;
236 pending_event_avail = pending_event_avail->next;
237 }
238 spin_unlock_irqrestore(&pending_event_spinlock, flags);
239 if (ev == NULL) {
240 ev = kmalloc(sizeof(struct pending_event), GFP_ATOMIC);
241 if (ev == NULL) {
242 printk(KERN_ERR "mf.c: unable to kmalloc %ld bytes\n",
243 sizeof(struct pending_event));
244 return NULL;
245 }
246 }
247 memset(ev, 0, sizeof(struct pending_event));
248 hev = &ev->event.hp_lp_event;
249 hev->xFlags.xValid = 1;
250 hev->xFlags.xAckType = HvLpEvent_AckType_ImmediateAck;
251 hev->xFlags.xAckInd = HvLpEvent_AckInd_DoAck;
252 hev->xFlags.xFunction = HvLpEvent_Function_Int;
253 hev->xType = HvLpEvent_Type_MachineFac;
254 hev->xSourceLp = HvLpConfig_getLpIndex();
255 hev->xTargetLp = primary_lp;
256 hev->xSizeMinus1 = sizeof(ev->event) - 1;
257 hev->xRc = HvLpEvent_Rc_Good;
258 hev->xSourceInstanceId = HvCallEvent_getSourceLpInstanceId(primary_lp,
259 HvLpEvent_Type_MachineFac);
260 hev->xTargetInstanceId = HvCallEvent_getTargetLpInstanceId(primary_lp,
261 HvLpEvent_Type_MachineFac);
262
263 return ev;
264}
265
266static int signal_vsp_instruction(struct vsp_cmd_data *vsp_cmd)
267{
268 struct pending_event *ev = new_pending_event();
269 int rc;
270 struct vsp_rsp_data response;
271
272 if (ev == NULL)
273 return -ENOMEM;
274
275 init_completion(&response.com);
276 response.response = vsp_cmd;
277 ev->event.hp_lp_event.xSubtype = 6;
278 ev->event.hp_lp_event.x.xSubtypeData =
279 subtype_data('M', 'F', 'V', 'I');
280 ev->event.data.vsp_cmd.token = (u64)&response;
281 ev->event.data.vsp_cmd.cmd = vsp_cmd->cmd;
282 ev->event.data.vsp_cmd.lp_index = HvLpConfig_getLpIndex();
283 ev->event.data.vsp_cmd.result_code = 0xFF;
284 ev->event.data.vsp_cmd.reserved = 0;
285 memcpy(&(ev->event.data.vsp_cmd.sub_data),
286 &(vsp_cmd->sub_data), sizeof(vsp_cmd->sub_data));
287 mb();
288
289 rc = signal_event(ev);
290 if (rc == 0)
291 wait_for_completion(&response.com);
292 return rc;
293}
294
295
296/*
297 * Send a 12-byte CE message to the primary partition VSP object
298 */
299static int signal_ce_msg(char *ce_msg, struct ce_msg_comp_data *completion)
300{
301 struct pending_event *ev = new_pending_event();
302
303 if (ev == NULL)
304 return -ENOMEM;
305
306 ev->event.hp_lp_event.xSubtype = 0;
307 ev->event.hp_lp_event.x.xSubtypeData =
308 subtype_data('M', 'F', 'C', 'E');
309 memcpy(ev->event.data.ce_msg.ce_msg, ce_msg, 12);
310 ev->event.data.ce_msg.completion = completion;
311 return signal_event(ev);
312}
313
314/*
315 * Send a 12-byte CE message (with no data) to the primary partition VSP object
316 */
317static int signal_ce_msg_simple(u8 ce_op, struct ce_msg_comp_data *completion)
318{
319 u8 ce_msg[12];
320
321 memset(ce_msg, 0, sizeof(ce_msg));
322 ce_msg[3] = ce_op;
323 return signal_ce_msg(ce_msg, completion);
324}
325
326/*
327 * Send a 12-byte CE message and DMA data to the primary partition VSP object
328 */
329static int dma_and_signal_ce_msg(char *ce_msg,
330 struct ce_msg_comp_data *completion, void *dma_data,
331 unsigned dma_data_length, unsigned remote_address)
332{
333 struct pending_event *ev = new_pending_event();
334
335 if (ev == NULL)
336 return -ENOMEM;
337
338 ev->event.hp_lp_event.xSubtype = 0;
339 ev->event.hp_lp_event.x.xSubtypeData =
340 subtype_data('M', 'F', 'C', 'E');
341 memcpy(ev->event.data.ce_msg.ce_msg, ce_msg, 12);
342 ev->event.data.ce_msg.completion = completion;
343 memcpy(ev->dma_data, dma_data, dma_data_length);
344 ev->dma_data_length = dma_data_length;
345 ev->remote_address = remote_address;
346 return signal_event(ev);
347}
348
349/*
350 * Initiate a nice (hopefully) shutdown of Linux. We simply are
351 * going to try and send the init process a SIGINT signal. If
352 * this fails (why?), we'll simply force it off in a not-so-nice
353 * manner.
354 */
355static int shutdown(void)
356{
357 int rc = kill_proc(1, SIGINT, 1);
358
359 if (rc) {
360 printk(KERN_ALERT "mf.c: SIGINT to init failed (%d), "
361 "hard shutdown commencing\n", rc);
362 mf_power_off();
363 } else
364 printk(KERN_INFO "mf.c: init has been successfully notified "
365 "to proceed with shutdown\n");
366 return rc;
367}
368
369/*
370 * The primary partition VSP object is sending us a new
371 * event flow. Handle it...
372 */
373static void handle_int(struct io_mf_lp_event *event)
374{
375 struct ce_msg_data *ce_msg_data;
376 struct ce_msg_data *pce_msg_data;
377 unsigned long flags;
378 struct pending_event *pev;
379
380 /* ack the interrupt */
381 event->hp_lp_event.xRc = HvLpEvent_Rc_Good;
382 HvCallEvent_ackLpEvent(&event->hp_lp_event);
383
384 /* process interrupt */
385 switch (event->hp_lp_event.xSubtype) {
386 case 0: /* CE message */
387 ce_msg_data = &event->data.ce_msg;
388 switch (ce_msg_data->ce_msg[3]) {
389 case 0x5B: /* power control notification */
390 if ((ce_msg_data->ce_msg[5] & 0x20) != 0) {
391 printk(KERN_INFO "mf.c: Commencing partition shutdown\n");
392 if (shutdown() == 0)
393 signal_ce_msg_simple(0xDB, NULL);
394 }
395 break;
396 case 0xC0: /* get time */
397 spin_lock_irqsave(&pending_event_spinlock, flags);
398 pev = pending_event_head;
399 if (pev != NULL)
400 pending_event_head = pending_event_head->next;
401 spin_unlock_irqrestore(&pending_event_spinlock, flags);
402 if (pev == NULL)
403 break;
404 pce_msg_data = &pev->event.data.ce_msg;
405 if (pce_msg_data->ce_msg[3] != 0x40)
406 break;
407 if (pce_msg_data->completion != NULL) {
408 ce_msg_comp_hdlr handler =
409 pce_msg_data->completion->handler;
410 void *token = pce_msg_data->completion->token;
411
412 if (handler != NULL)
413 (*handler)(token, ce_msg_data);
414 }
415 spin_lock_irqsave(&pending_event_spinlock, flags);
416 free_pending_event(pev);
417 spin_unlock_irqrestore(&pending_event_spinlock, flags);
418 /* send next waiting event */
419 if (pending_event_head != NULL)
420 signal_event(NULL);
421 break;
422 }
423 break;
424 case 1: /* IT sys shutdown */
425 printk(KERN_INFO "mf.c: Commencing system shutdown\n");
426 shutdown();
427 break;
428 }
429}
430
431/*
432 * The primary partition VSP object is acknowledging the receipt
433 * of a flow we sent to them. If there are other flows queued
434 * up, we must send another one now...
435 */
436static void handle_ack(struct io_mf_lp_event *event)
437{
438 unsigned long flags;
439 struct pending_event *two = NULL;
440 unsigned long free_it = 0;
441 struct ce_msg_data *ce_msg_data;
442 struct ce_msg_data *pce_msg_data;
443 struct vsp_rsp_data *rsp;
444
445 /* handle current event */
446 if (pending_event_head == NULL) {
447 printk(KERN_ERR "mf.c: stack empty for receiving ack\n");
448 return;
449 }
450
451 switch (event->hp_lp_event.xSubtype) {
452 case 0: /* CE msg */
453 ce_msg_data = &event->data.ce_msg;
454 if (ce_msg_data->ce_msg[3] != 0x40) {
455 free_it = 1;
456 break;
457 }
458 if (ce_msg_data->ce_msg[2] == 0)
459 break;
460 free_it = 1;
461 pce_msg_data = &pending_event_head->event.data.ce_msg;
462 if (pce_msg_data->completion != NULL) {
463 ce_msg_comp_hdlr handler =
464 pce_msg_data->completion->handler;
465 void *token = pce_msg_data->completion->token;
466
467 if (handler != NULL)
468 (*handler)(token, ce_msg_data);
469 }
470 break;
471 case 4: /* allocate */
472 case 5: /* deallocate */
473 if (pending_event_head->hdlr != NULL)
474 (*pending_event_head->hdlr)((void *)event->hp_lp_event.xCorrelationToken, event->data.alloc.count);
475 free_it = 1;
476 break;
477 case 6:
478 free_it = 1;
479 rsp = (struct vsp_rsp_data *)event->data.vsp_cmd.token;
480 if (rsp == NULL) {
481 printk(KERN_ERR "mf.c: no rsp\n");
482 break;
483 }
484 if (rsp->response != NULL)
485 memcpy(rsp->response, &event->data.vsp_cmd,
486 sizeof(event->data.vsp_cmd));
487 complete(&rsp->com);
488 break;
489 }
490
491 /* remove from queue */
492 spin_lock_irqsave(&pending_event_spinlock, flags);
493 if ((pending_event_head != NULL) && (free_it == 1)) {
494 struct pending_event *oldHead = pending_event_head;
495
496 pending_event_head = pending_event_head->next;
497 two = pending_event_head;
498 free_pending_event(oldHead);
499 }
500 spin_unlock_irqrestore(&pending_event_spinlock, flags);
501
502 /* send next waiting event */
503 if (two != NULL)
504 signal_event(NULL);
505}
506
507/*
508 * This is the generic event handler we are registering with
509 * the Hypervisor. Ensure the flows are for us, and then
510 * parse it enough to know if it is an interrupt or an
511 * acknowledge.
512 */
513static void hv_handler(struct HvLpEvent *event, struct pt_regs *regs)
514{
515 if ((event != NULL) && (event->xType == HvLpEvent_Type_MachineFac)) {
516 switch(event->xFlags.xFunction) {
517 case HvLpEvent_Function_Ack:
518 handle_ack((struct io_mf_lp_event *)event);
519 break;
520 case HvLpEvent_Function_Int:
521 handle_int((struct io_mf_lp_event *)event);
522 break;
523 default:
524 printk(KERN_ERR "mf.c: non ack/int event received\n");
525 break;
526 }
527 } else
528 printk(KERN_ERR "mf.c: alien event received\n");
529}
530
531/*
532 * Global kernel interface to allocate and seed events into the
533 * Hypervisor.
534 */
535void mf_allocate_lp_events(HvLpIndex target_lp, HvLpEvent_Type type,
536 unsigned size, unsigned count, MFCompleteHandler hdlr,
537 void *user_token)
538{
539 struct pending_event *ev = new_pending_event();
540 int rc;
541
542 if (ev == NULL) {
543 rc = -ENOMEM;
544 } else {
545 ev->event.hp_lp_event.xSubtype = 4;
546 ev->event.hp_lp_event.xCorrelationToken = (u64)user_token;
547 ev->event.hp_lp_event.x.xSubtypeData =
548 subtype_data('M', 'F', 'M', 'A');
549 ev->event.data.alloc.target_lp = target_lp;
550 ev->event.data.alloc.type = type;
551 ev->event.data.alloc.size = size;
552 ev->event.data.alloc.count = count;
553 ev->hdlr = hdlr;
554 rc = signal_event(ev);
555 }
556 if ((rc != 0) && (hdlr != NULL))
557 (*hdlr)(user_token, rc);
558}
559EXPORT_SYMBOL(mf_allocate_lp_events);
560
561/*
562 * Global kernel interface to unseed and deallocate events already in
563 * Hypervisor.
564 */
565void mf_deallocate_lp_events(HvLpIndex target_lp, HvLpEvent_Type type,
566 unsigned count, MFCompleteHandler hdlr, void *user_token)
567{
568 struct pending_event *ev = new_pending_event();
569 int rc;
570
571 if (ev == NULL)
572 rc = -ENOMEM;
573 else {
574 ev->event.hp_lp_event.xSubtype = 5;
575 ev->event.hp_lp_event.xCorrelationToken = (u64)user_token;
576 ev->event.hp_lp_event.x.xSubtypeData =
577 subtype_data('M', 'F', 'M', 'D');
578 ev->event.data.alloc.target_lp = target_lp;
579 ev->event.data.alloc.type = type;
580 ev->event.data.alloc.count = count;
581 ev->hdlr = hdlr;
582 rc = signal_event(ev);
583 }
584 if ((rc != 0) && (hdlr != NULL))
585 (*hdlr)(user_token, rc);
586}
587EXPORT_SYMBOL(mf_deallocate_lp_events);
588
589/*
590 * Global kernel interface to tell the VSP object in the primary
591 * partition to power this partition off.
592 */
593void mf_power_off(void)
594{
595 printk(KERN_INFO "mf.c: Down it goes...\n");
596 signal_ce_msg_simple(0x4d, NULL);
597 for (;;)
598 ;
599}
600
601/*
602 * Global kernel interface to tell the VSP object in the primary
603 * partition to reboot this partition.
604 */
605void mf_reboot(void)
606{
607 printk(KERN_INFO "mf.c: Preparing to bounce...\n");
608 signal_ce_msg_simple(0x4e, NULL);
609 for (;;)
610 ;
611}
612
613/*
614 * Display a single word SRC onto the VSP control panel.
615 */
616void mf_display_src(u32 word)
617{
618 u8 ce[12];
619
620 memset(ce, 0, sizeof(ce));
621 ce[3] = 0x4a;
622 ce[7] = 0x01;
623 ce[8] = word >> 24;
624 ce[9] = word >> 16;
625 ce[10] = word >> 8;
626 ce[11] = word;
627 signal_ce_msg(ce, NULL);
628}
629
630/*
631 * Display a single word SRC of the form "PROGXXXX" on the VSP control panel.
632 */
633void mf_display_progress(u16 value)
634{
635 u8 ce[12];
636 u8 src[72];
637
638 memcpy(ce, "\x00\x00\x04\x4A\x00\x00\x00\x48\x00\x00\x00\x00", 12);
639 memcpy(src, "\x01\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00"
640 "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"
641 "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"
642 "\x00\x00\x00\x00PROGxxxx ",
643 72);
644 src[6] = value >> 8;
645 src[7] = value & 255;
646 src[44] = "0123456789ABCDEF"[(value >> 12) & 15];
647 src[45] = "0123456789ABCDEF"[(value >> 8) & 15];
648 src[46] = "0123456789ABCDEF"[(value >> 4) & 15];
649 src[47] = "0123456789ABCDEF"[value & 15];
650 dma_and_signal_ce_msg(ce, NULL, src, sizeof(src), 9 * 64 * 1024);
651}
652
653/*
654 * Clear the VSP control panel. Used to "erase" an SRC that was
655 * previously displayed.
656 */
657void mf_clear_src(void)
658{
659 signal_ce_msg_simple(0x4b, NULL);
660}
661
662/*
663 * Initialization code here.
664 */
665void mf_init(void)
666{
667 int i;
668
669 /* initialize */
670 spin_lock_init(&pending_event_spinlock);
671 for (i = 0;
672 i < sizeof(pending_event_prealloc) / sizeof(*pending_event_prealloc);
673 ++i)
674 free_pending_event(&pending_event_prealloc[i]);
675 HvLpEvent_registerHandler(HvLpEvent_Type_MachineFac, &hv_handler);
676
677 /* virtual continue ack */
678 signal_ce_msg_simple(0x57, NULL);
679
680 /* initialization complete */
681 printk(KERN_NOTICE "mf.c: iSeries Linux LPAR Machine Facilities "
682 "initialized\n");
683}
684
685struct rtc_time_data {
686 struct completion com;
687 struct ce_msg_data ce_msg;
688 int rc;
689};
690
691static void get_rtc_time_complete(void *token, struct ce_msg_data *ce_msg)
692{
693 struct rtc_time_data *rtc = token;
694
695 memcpy(&rtc->ce_msg, ce_msg, sizeof(rtc->ce_msg));
696 rtc->rc = 0;
697 complete(&rtc->com);
698}
699
700static int rtc_set_tm(int rc, u8 *ce_msg, struct rtc_time *tm)
701{
702 tm->tm_wday = 0;
703 tm->tm_yday = 0;
704 tm->tm_isdst = 0;
705 if (rc) {
706 tm->tm_sec = 0;
707 tm->tm_min = 0;
708 tm->tm_hour = 0;
709 tm->tm_mday = 15;
710 tm->tm_mon = 5;
711 tm->tm_year = 52;
712 return rc;
713 }
714
715 if ((ce_msg[2] == 0xa9) ||
716 (ce_msg[2] == 0xaf)) {
717 /* TOD clock is not set */
718 tm->tm_sec = 1;
719 tm->tm_min = 1;
720 tm->tm_hour = 1;
721 tm->tm_mday = 10;
722 tm->tm_mon = 8;
723 tm->tm_year = 71;
724 mf_set_rtc(tm);
725 }
726 {
727 u8 year = ce_msg[5];
728 u8 sec = ce_msg[6];
729 u8 min = ce_msg[7];
730 u8 hour = ce_msg[8];
731 u8 day = ce_msg[10];
732 u8 mon = ce_msg[11];
733
734 BCD_TO_BIN(sec);
735 BCD_TO_BIN(min);
736 BCD_TO_BIN(hour);
737 BCD_TO_BIN(day);
738 BCD_TO_BIN(mon);
739 BCD_TO_BIN(year);
740
741 if (year <= 69)
742 year += 100;
743
744 tm->tm_sec = sec;
745 tm->tm_min = min;
746 tm->tm_hour = hour;
747 tm->tm_mday = day;
748 tm->tm_mon = mon;
749 tm->tm_year = year;
750 }
751
752 return 0;
753}
754
755int mf_get_rtc(struct rtc_time *tm)
756{
757 struct ce_msg_comp_data ce_complete;
758 struct rtc_time_data rtc_data;
759 int rc;
760
761 memset(&ce_complete, 0, sizeof(ce_complete));
762 memset(&rtc_data, 0, sizeof(rtc_data));
763 init_completion(&rtc_data.com);
764 ce_complete.handler = &get_rtc_time_complete;
765 ce_complete.token = &rtc_data;
766 rc = signal_ce_msg_simple(0x40, &ce_complete);
767 if (rc)
768 return rc;
769 wait_for_completion(&rtc_data.com);
770 return rtc_set_tm(rtc_data.rc, rtc_data.ce_msg.ce_msg, tm);
771}
772
773struct boot_rtc_time_data {
774 int busy;
775 struct ce_msg_data ce_msg;
776 int rc;
777};
778
779static void get_boot_rtc_time_complete(void *token, struct ce_msg_data *ce_msg)
780{
781 struct boot_rtc_time_data *rtc = token;
782
783 memcpy(&rtc->ce_msg, ce_msg, sizeof(rtc->ce_msg));
784 rtc->rc = 0;
785 rtc->busy = 0;
786}
787
788int mf_get_boot_rtc(struct rtc_time *tm)
789{
790 struct ce_msg_comp_data ce_complete;
791 struct boot_rtc_time_data rtc_data;
792 int rc;
793
794 memset(&ce_complete, 0, sizeof(ce_complete));
795 memset(&rtc_data, 0, sizeof(rtc_data));
796 rtc_data.busy = 1;
797 ce_complete.handler = &get_boot_rtc_time_complete;
798 ce_complete.token = &rtc_data;
799 rc = signal_ce_msg_simple(0x40, &ce_complete);
800 if (rc)
801 return rc;
802 /* We need to poll here as we are not yet taking interrupts */
803 while (rtc_data.busy) {
804 if (hvlpevent_is_pending())
805 process_hvlpevents(NULL);
806 }
807 return rtc_set_tm(rtc_data.rc, rtc_data.ce_msg.ce_msg, tm);
808}
809
810int mf_set_rtc(struct rtc_time *tm)
811{
812 char ce_time[12];
813 u8 day, mon, hour, min, sec, y1, y2;
814 unsigned year;
815
816 year = 1900 + tm->tm_year;
817 y1 = year / 100;
818 y2 = year % 100;
819
820 sec = tm->tm_sec;
821 min = tm->tm_min;
822 hour = tm->tm_hour;
823 day = tm->tm_mday;
824 mon = tm->tm_mon + 1;
825
826 BIN_TO_BCD(sec);
827 BIN_TO_BCD(min);
828 BIN_TO_BCD(hour);
829 BIN_TO_BCD(mon);
830 BIN_TO_BCD(day);
831 BIN_TO_BCD(y1);
832 BIN_TO_BCD(y2);
833
834 memset(ce_time, 0, sizeof(ce_time));
835 ce_time[3] = 0x41;
836 ce_time[4] = y1;
837 ce_time[5] = y2;
838 ce_time[6] = sec;
839 ce_time[7] = min;
840 ce_time[8] = hour;
841 ce_time[10] = day;
842 ce_time[11] = mon;
843
844 return signal_ce_msg(ce_time, NULL);
845}
846
847#ifdef CONFIG_PROC_FS
848
849static int proc_mf_dump_cmdline(char *page, char **start, off_t off,
850 int count, int *eof, void *data)
851{
852 int len;
853 char *p;
854 struct vsp_cmd_data vsp_cmd;
855 int rc;
856 dma_addr_t dma_addr;
857
858 /* The HV appears to return no more than 256 bytes of command line */
859 if (off >= 256)
860 return 0;
861 if ((off + count) > 256)
862 count = 256 - off;
863
864 dma_addr = dma_map_single(iSeries_vio_dev, page, off + count,
865 DMA_FROM_DEVICE);
866 if (dma_mapping_error(dma_addr))
867 return -ENOMEM;
868 memset(page, 0, off + count);
869 memset(&vsp_cmd, 0, sizeof(vsp_cmd));
870 vsp_cmd.cmd = 33;
871 vsp_cmd.sub_data.kern.token = dma_addr;
872 vsp_cmd.sub_data.kern.address_type = HvLpDma_AddressType_TceIndex;
873 vsp_cmd.sub_data.kern.side = (u64)data;
874 vsp_cmd.sub_data.kern.length = off + count;
875 mb();
876 rc = signal_vsp_instruction(&vsp_cmd);
877 dma_unmap_single(iSeries_vio_dev, dma_addr, off + count,
878 DMA_FROM_DEVICE);
879 if (rc)
880 return rc;
881 if (vsp_cmd.result_code != 0)
882 return -ENOMEM;
883 p = page;
884 len = 0;
885 while (len < (off + count)) {
886 if ((*p == '\0') || (*p == '\n')) {
887 if (*p == '\0')
888 *p = '\n';
889 p++;
890 len++;
891 *eof = 1;
892 break;
893 }
894 p++;
895 len++;
896 }
897
898 if (len < off) {
899 *eof = 1;
900 len = 0;
901 }
902 return len;
903}
904
905#if 0
906static int mf_getVmlinuxChunk(char *buffer, int *size, int offset, u64 side)
907{
908 struct vsp_cmd_data vsp_cmd;
909 int rc;
910 int len = *size;
911 dma_addr_t dma_addr;
912
913 dma_addr = dma_map_single(iSeries_vio_dev, buffer, len,
914 DMA_FROM_DEVICE);
915 memset(buffer, 0, len);
916 memset(&vsp_cmd, 0, sizeof(vsp_cmd));
917 vsp_cmd.cmd = 32;
918 vsp_cmd.sub_data.kern.token = dma_addr;
919 vsp_cmd.sub_data.kern.address_type = HvLpDma_AddressType_TceIndex;
920 vsp_cmd.sub_data.kern.side = side;
921 vsp_cmd.sub_data.kern.offset = offset;
922 vsp_cmd.sub_data.kern.length = len;
923 mb();
924 rc = signal_vsp_instruction(&vsp_cmd);
925 if (rc == 0) {
926 if (vsp_cmd.result_code == 0)
927 *size = vsp_cmd.sub_data.length_out;
928 else
929 rc = -ENOMEM;
930 }
931
932 dma_unmap_single(iSeries_vio_dev, dma_addr, len, DMA_FROM_DEVICE);
933
934 return rc;
935}
936
937static int proc_mf_dump_vmlinux(char *page, char **start, off_t off,
938 int count, int *eof, void *data)
939{
940 int sizeToGet = count;
941
942 if (!capable(CAP_SYS_ADMIN))
943 return -EACCES;
944
945 if (mf_getVmlinuxChunk(page, &sizeToGet, off, (u64)data) == 0) {
946 if (sizeToGet != 0) {
947 *start = page + off;
948 return sizeToGet;
949 }
950 *eof = 1;
951 return 0;
952 }
953 *eof = 1;
954 return 0;
955}
956#endif
957
958static int proc_mf_dump_side(char *page, char **start, off_t off,
959 int count, int *eof, void *data)
960{
961 int len;
962 char mf_current_side = ' ';
963 struct vsp_cmd_data vsp_cmd;
964
965 memset(&vsp_cmd, 0, sizeof(vsp_cmd));
966 vsp_cmd.cmd = 2;
967 vsp_cmd.sub_data.ipl_type = 0;
968 mb();
969
970 if (signal_vsp_instruction(&vsp_cmd) == 0) {
971 if (vsp_cmd.result_code == 0) {
972 switch (vsp_cmd.sub_data.ipl_type) {
973 case 0: mf_current_side = 'A';
974 break;
975 case 1: mf_current_side = 'B';
976 break;
977 case 2: mf_current_side = 'C';
978 break;
979 default: mf_current_side = 'D';
980 break;
981 }
982 }
983 }
984
985 len = sprintf(page, "%c\n", mf_current_side);
986
987 if (len <= (off + count))
988 *eof = 1;
989 *start = page + off;
990 len -= off;
991 if (len > count)
992 len = count;
993 if (len < 0)
994 len = 0;
995 return len;
996}
997
998static int proc_mf_change_side(struct file *file, const char __user *buffer,
999 unsigned long count, void *data)
1000{
1001 char side;
1002 u64 newSide;
1003 struct vsp_cmd_data vsp_cmd;
1004
1005 if (!capable(CAP_SYS_ADMIN))
1006 return -EACCES;
1007
1008 if (count == 0)
1009 return 0;
1010
1011 if (get_user(side, buffer))
1012 return -EFAULT;
1013
1014 switch (side) {
1015 case 'A': newSide = 0;
1016 break;
1017 case 'B': newSide = 1;
1018 break;
1019 case 'C': newSide = 2;
1020 break;
1021 case 'D': newSide = 3;
1022 break;
1023 default:
1024 printk(KERN_ERR "mf_proc.c: proc_mf_change_side: invalid side\n");
1025 return -EINVAL;
1026 }
1027
1028 memset(&vsp_cmd, 0, sizeof(vsp_cmd));
1029 vsp_cmd.sub_data.ipl_type = newSide;
1030 vsp_cmd.cmd = 10;
1031
1032 (void)signal_vsp_instruction(&vsp_cmd);
1033
1034 return count;
1035}
1036
1037#if 0
1038static void mf_getSrcHistory(char *buffer, int size)
1039{
1040 struct IplTypeReturnStuff return_stuff;
1041 struct pending_event *ev = new_pending_event();
1042 int rc = 0;
1043 char *pages[4];
1044
1045 pages[0] = kmalloc(4096, GFP_ATOMIC);
1046 pages[1] = kmalloc(4096, GFP_ATOMIC);
1047 pages[2] = kmalloc(4096, GFP_ATOMIC);
1048 pages[3] = kmalloc(4096, GFP_ATOMIC);
1049 if ((ev == NULL) || (pages[0] == NULL) || (pages[1] == NULL)
1050 || (pages[2] == NULL) || (pages[3] == NULL))
1051 return -ENOMEM;
1052
1053 return_stuff.xType = 0;
1054 return_stuff.xRc = 0;
1055 return_stuff.xDone = 0;
1056 ev->event.hp_lp_event.xSubtype = 6;
1057 ev->event.hp_lp_event.x.xSubtypeData =
1058 subtype_data('M', 'F', 'V', 'I');
1059 ev->event.data.vsp_cmd.xEvent = &return_stuff;
1060 ev->event.data.vsp_cmd.cmd = 4;
1061 ev->event.data.vsp_cmd.lp_index = HvLpConfig_getLpIndex();
1062 ev->event.data.vsp_cmd.result_code = 0xFF;
1063 ev->event.data.vsp_cmd.reserved = 0;
1064 ev->event.data.vsp_cmd.sub_data.page[0] = ISERIES_HV_ADDR(pages[0]);
1065 ev->event.data.vsp_cmd.sub_data.page[1] = ISERIES_HV_ADDR(pages[1]);
1066 ev->event.data.vsp_cmd.sub_data.page[2] = ISERIES_HV_ADDR(pages[2]);
1067 ev->event.data.vsp_cmd.sub_data.page[3] = ISERIES_HV_ADDR(pages[3]);
1068 mb();
1069 if (signal_event(ev) != 0)
1070 return;
1071
1072 while (return_stuff.xDone != 1)
1073 udelay(10);
1074 if (return_stuff.xRc == 0)
1075 memcpy(buffer, pages[0], size);
1076 kfree(pages[0]);
1077 kfree(pages[1]);
1078 kfree(pages[2]);
1079 kfree(pages[3]);
1080}
1081#endif
1082
1083static int proc_mf_dump_src(char *page, char **start, off_t off,
1084 int count, int *eof, void *data)
1085{
1086#if 0
1087 int len;
1088
1089 mf_getSrcHistory(page, count);
1090 len = count;
1091 len -= off;
1092 if (len < count) {
1093 *eof = 1;
1094 if (len <= 0)
1095 return 0;
1096 } else
1097 len = count;
1098 *start = page + off;
1099 return len;
1100#else
1101 return 0;
1102#endif
1103}
1104
1105static int proc_mf_change_src(struct file *file, const char __user *buffer,
1106 unsigned long count, void *data)
1107{
1108 char stkbuf[10];
1109
1110 if (!capable(CAP_SYS_ADMIN))
1111 return -EACCES;
1112
1113 if ((count < 4) && (count != 1)) {
1114 printk(KERN_ERR "mf_proc: invalid src\n");
1115 return -EINVAL;
1116 }
1117
1118 if (count > (sizeof(stkbuf) - 1))
1119 count = sizeof(stkbuf) - 1;
1120 if (copy_from_user(stkbuf, buffer, count))
1121 return -EFAULT;
1122
1123 if ((count == 1) && (*stkbuf == '\0'))
1124 mf_clear_src();
1125 else
1126 mf_display_src(*(u32 *)stkbuf);
1127
1128 return count;
1129}
1130
1131static int proc_mf_change_cmdline(struct file *file, const char __user *buffer,
1132 unsigned long count, void *data)
1133{
1134 struct vsp_cmd_data vsp_cmd;
1135 dma_addr_t dma_addr;
1136 char *page;
1137 int ret = -EACCES;
1138
1139 if (!capable(CAP_SYS_ADMIN))
1140 goto out;
1141
1142 dma_addr = 0;
1143 page = dma_alloc_coherent(iSeries_vio_dev, count, &dma_addr,
1144 GFP_ATOMIC);
1145 ret = -ENOMEM;
1146 if (page == NULL)
1147 goto out;
1148
1149 ret = -EFAULT;
1150 if (copy_from_user(page, buffer, count))
1151 goto out_free;
1152
1153 memset(&vsp_cmd, 0, sizeof(vsp_cmd));
1154 vsp_cmd.cmd = 31;
1155 vsp_cmd.sub_data.kern.token = dma_addr;
1156 vsp_cmd.sub_data.kern.address_type = HvLpDma_AddressType_TceIndex;
1157 vsp_cmd.sub_data.kern.side = (u64)data;
1158 vsp_cmd.sub_data.kern.length = count;
1159 mb();
1160 (void)signal_vsp_instruction(&vsp_cmd);
1161 ret = count;
1162
1163out_free:
1164 dma_free_coherent(iSeries_vio_dev, count, page, dma_addr);
1165out:
1166 return ret;
1167}
1168
1169static ssize_t proc_mf_change_vmlinux(struct file *file,
1170 const char __user *buf,
1171 size_t count, loff_t *ppos)
1172{
1173 struct proc_dir_entry *dp = PDE(file->f_dentry->d_inode);
1174 ssize_t rc;
1175 dma_addr_t dma_addr;
1176 char *page;
1177 struct vsp_cmd_data vsp_cmd;
1178
1179 rc = -EACCES;
1180 if (!capable(CAP_SYS_ADMIN))
1181 goto out;
1182
1183 dma_addr = 0;
1184 page = dma_alloc_coherent(iSeries_vio_dev, count, &dma_addr,
1185 GFP_ATOMIC);
1186 rc = -ENOMEM;
1187 if (page == NULL) {
1188 printk(KERN_ERR "mf.c: couldn't allocate memory to set vmlinux chunk\n");
1189 goto out;
1190 }
1191 rc = -EFAULT;
1192 if (copy_from_user(page, buf, count))
1193 goto out_free;
1194
1195 memset(&vsp_cmd, 0, sizeof(vsp_cmd));
1196 vsp_cmd.cmd = 30;
1197 vsp_cmd.sub_data.kern.token = dma_addr;
1198 vsp_cmd.sub_data.kern.address_type = HvLpDma_AddressType_TceIndex;
1199 vsp_cmd.sub_data.kern.side = (u64)dp->data;
1200 vsp_cmd.sub_data.kern.offset = *ppos;
1201 vsp_cmd.sub_data.kern.length = count;
1202 mb();
1203 rc = signal_vsp_instruction(&vsp_cmd);
1204 if (rc)
1205 goto out_free;
1206 rc = -ENOMEM;
1207 if (vsp_cmd.result_code != 0)
1208 goto out_free;
1209
1210 *ppos += count;
1211 rc = count;
1212out_free:
1213 dma_free_coherent(iSeries_vio_dev, count, page, dma_addr);
1214out:
1215 return rc;
1216}
1217
1218static struct file_operations proc_vmlinux_operations = {
1219 .write = proc_mf_change_vmlinux,
1220};
1221
1222static int __init mf_proc_init(void)
1223{
1224 struct proc_dir_entry *mf_proc_root;
1225 struct proc_dir_entry *ent;
1226 struct proc_dir_entry *mf;
1227 char name[2];
1228 int i;
1229
1230 mf_proc_root = proc_mkdir("iSeries/mf", NULL);
1231 if (!mf_proc_root)
1232 return 1;
1233
1234 name[1] = '\0';
1235 for (i = 0; i < 4; i++) {
1236 name[0] = 'A' + i;
1237 mf = proc_mkdir(name, mf_proc_root);
1238 if (!mf)
1239 return 1;
1240
1241 ent = create_proc_entry("cmdline", S_IFREG|S_IRUSR|S_IWUSR, mf);
1242 if (!ent)
1243 return 1;
1244 ent->nlink = 1;
1245 ent->data = (void *)(long)i;
1246 ent->read_proc = proc_mf_dump_cmdline;
1247 ent->write_proc = proc_mf_change_cmdline;
1248
1249 if (i == 3) /* no vmlinux entry for 'D' */
1250 continue;
1251
1252 ent = create_proc_entry("vmlinux", S_IFREG|S_IWUSR, mf);
1253 if (!ent)
1254 return 1;
1255 ent->nlink = 1;
1256 ent->data = (void *)(long)i;
1257 ent->proc_fops = &proc_vmlinux_operations;
1258 }
1259
1260 ent = create_proc_entry("side", S_IFREG|S_IRUSR|S_IWUSR, mf_proc_root);
1261 if (!ent)
1262 return 1;
1263 ent->nlink = 1;
1264 ent->data = (void *)0;
1265 ent->read_proc = proc_mf_dump_side;
1266 ent->write_proc = proc_mf_change_side;
1267
1268 ent = create_proc_entry("src", S_IFREG|S_IRUSR|S_IWUSR, mf_proc_root);
1269 if (!ent)
1270 return 1;
1271 ent->nlink = 1;
1272 ent->data = (void *)0;
1273 ent->read_proc = proc_mf_dump_src;
1274 ent->write_proc = proc_mf_change_src;
1275
1276 return 0;
1277}
1278
1279__initcall(mf_proc_init);
1280
1281#endif /* CONFIG_PROC_FS */
diff --git a/arch/ppc64/kernel/misc.S b/arch/ppc64/kernel/misc.S
index e7241ad80a08..077507ffbab8 100644
--- a/arch/ppc64/kernel/misc.S
+++ b/arch/ppc64/kernel/misc.S
@@ -28,6 +28,7 @@
28#include <asm/ppc_asm.h> 28#include <asm/ppc_asm.h>
29#include <asm/asm-offsets.h> 29#include <asm/asm-offsets.h>
30#include <asm/cputable.h> 30#include <asm/cputable.h>
31#include <asm/thread_info.h>
31 32
32 .text 33 .text
33 34
@@ -64,44 +65,6 @@ _GLOBAL(get_srr1)
64_GLOBAL(get_sp) 65_GLOBAL(get_sp)
65 mr r3,r1 66 mr r3,r1
66 blr 67 blr
67
68#ifdef CONFIG_PPC_ISERIES
69/* unsigned long local_save_flags(void) */
70_GLOBAL(local_get_flags)
71 lbz r3,PACAPROCENABLED(r13)
72 blr
73
74/* unsigned long local_irq_disable(void) */
75_GLOBAL(local_irq_disable)
76 lbz r3,PACAPROCENABLED(r13)
77 li r4,0
78 stb r4,PACAPROCENABLED(r13)
79 blr /* Done */
80
81/* void local_irq_restore(unsigned long flags) */
82_GLOBAL(local_irq_restore)
83 lbz r5,PACAPROCENABLED(r13)
84 /* Check if things are setup the way we want _already_. */
85 cmpw 0,r3,r5
86 beqlr
87 /* are we enabling interrupts? */
88 cmpdi 0,r3,0
89 stb r3,PACAPROCENABLED(r13)
90 beqlr
91 /* Check pending interrupts */
92 /* A decrementer, IPI or PMC interrupt may have occurred
93 * while we were in the hypervisor (which enables) */
94 ld r4,PACALPPACA+LPPACAANYINT(r13)
95 cmpdi r4,0
96 beqlr
97
98 /*
99 * Handle pending interrupts in interrupt context
100 */
101 li r0,0x5555
102 sc
103 blr
104#endif /* CONFIG_PPC_ISERIES */
105 68
106#ifdef CONFIG_IRQSTACKS 69#ifdef CONFIG_IRQSTACKS
107_GLOBAL(call_do_softirq) 70_GLOBAL(call_do_softirq)
@@ -329,7 +292,7 @@ _GLOBAL(__flush_dcache_icache)
329 292
330/* Flush the dcache */ 293/* Flush the dcache */
331 ld r7,PPC64_CACHES@toc(r2) 294 ld r7,PPC64_CACHES@toc(r2)
332 clrrdi r3,r3,12 /* Page align */ 295 clrrdi r3,r3,PAGE_SHIFT /* Page align */
333 lwz r4,DCACHEL1LINESPERPAGE(r7) /* Get # dcache lines per page */ 296 lwz r4,DCACHEL1LINESPERPAGE(r7) /* Get # dcache lines per page */
334 lwz r5,DCACHEL1LINESIZE(r7) /* Get dcache line size */ 297 lwz r5,DCACHEL1LINESIZE(r7) /* Get dcache line size */
335 mr r6,r3 298 mr r6,r3
@@ -488,25 +451,6 @@ _GLOBAL(_outsl_ns)
488 sync 451 sync
489 blr 452 blr
490 453
491
492_GLOBAL(cvt_fd)
493 lfd 0,0(r5) /* load up fpscr value */
494 mtfsf 0xff,0
495 lfs 0,0(r3)
496 stfd 0,0(r4)
497 mffs 0 /* save new fpscr value */
498 stfd 0,0(r5)
499 blr
500
501_GLOBAL(cvt_df)
502 lfd 0,0(r5) /* load up fpscr value */
503 mtfsf 0xff,0
504 lfd 0,0(r3)
505 stfs 0,0(r4)
506 mffs 0 /* save new fpscr value */
507 stfd 0,0(r5)
508 blr
509
510/* 454/*
511 * identify_cpu and calls setup_cpu 455 * identify_cpu and calls setup_cpu
512 * In: r3 = base of the cpu_specs array 456 * In: r3 = base of the cpu_specs array
@@ -692,38 +636,6 @@ _GLOBAL(disable_kernel_fp)
692 isync 636 isync
693 blr 637 blr
694 638
695/*
696 * giveup_fpu(tsk)
697 * Disable FP for the task given as the argument,
698 * and save the floating-point registers in its thread_struct.
699 * Enables the FPU for use in the kernel on return.
700 */
701_GLOBAL(giveup_fpu)
702 mfmsr r5
703 ori r5,r5,MSR_FP
704 mtmsrd r5 /* enable use of fpu now */
705 isync
706 cmpdi 0,r3,0
707 beqlr- /* if no previous owner, done */
708 addi r3,r3,THREAD /* want THREAD of task */
709 ld r5,PT_REGS(r3)
710 cmpdi 0,r5,0
711 SAVE_32FPRS(0, r3)
712 mffs fr0
713 stfd fr0,THREAD_FPSCR(r3)
714 beq 1f
715 ld r4,_MSR-STACK_FRAME_OVERHEAD(r5)
716 li r3,MSR_FP|MSR_FE0|MSR_FE1
717 andc r4,r4,r3 /* disable FP for previous task */
718 std r4,_MSR-STACK_FRAME_OVERHEAD(r5)
7191:
720#ifndef CONFIG_SMP
721 li r5,0
722 ld r4,last_task_used_math@got(r2)
723 std r5,0(r4)
724#endif /* CONFIG_SMP */
725 blr
726
727#ifdef CONFIG_ALTIVEC 639#ifdef CONFIG_ALTIVEC
728 640
729#if 0 /* this has no callers for now */ 641#if 0 /* this has no callers for now */
@@ -778,6 +690,13 @@ _GLOBAL(giveup_altivec)
778_GLOBAL(__setup_cpu_power3) 690_GLOBAL(__setup_cpu_power3)
779 blr 691 blr
780 692
693_GLOBAL(execve)
694 li r0,__NR_execve
695 sc
696 bnslr
697 neg r3,r3
698 blr
699
781/* kexec_wait(phys_cpu) 700/* kexec_wait(phys_cpu)
782 * 701 *
783 * wait for the flag to change, indicating this kernel is going away but 702 * wait for the flag to change, indicating this kernel is going away but
@@ -948,566 +867,3 @@ _GLOBAL(kexec_sequence)
948 li r5,0 867 li r5,0
949 blr /* image->start(physid, image->start, 0); */ 868 blr /* image->start(physid, image->start, 0); */
950#endif /* CONFIG_KEXEC */ 869#endif /* CONFIG_KEXEC */
951
952/* Why isn't this a) automatic, b) written in 'C'? */
953 .balign 8
954_GLOBAL(sys_call_table32)
955 .llong .sys_restart_syscall /* 0 */
956 .llong .sys_exit
957 .llong .ppc_fork
958 .llong .sys_read
959 .llong .sys_write
960 .llong .compat_sys_open /* 5 */
961 .llong .sys_close
962 .llong .sys32_waitpid
963 .llong .sys32_creat
964 .llong .sys_link
965 .llong .sys_unlink /* 10 */
966 .llong .sys32_execve
967 .llong .sys_chdir
968 .llong .compat_sys_time
969 .llong .sys_mknod
970 .llong .sys_chmod /* 15 */
971 .llong .sys_lchown
972 .llong .sys_ni_syscall /* old break syscall */
973 .llong .sys_ni_syscall /* old stat syscall */
974 .llong .ppc32_lseek
975 .llong .sys_getpid /* 20 */
976 .llong .compat_sys_mount
977 .llong .sys_oldumount
978 .llong .sys_setuid
979 .llong .sys_getuid
980 .llong .compat_sys_stime /* 25 */
981 .llong .sys32_ptrace
982 .llong .sys_alarm
983 .llong .sys_ni_syscall /* old fstat syscall */
984 .llong .sys32_pause
985 .llong .compat_sys_utime /* 30 */
986 .llong .sys_ni_syscall /* old stty syscall */
987 .llong .sys_ni_syscall /* old gtty syscall */
988 .llong .sys32_access
989 .llong .sys32_nice
990 .llong .sys_ni_syscall /* 35 - old ftime syscall */
991 .llong .sys_sync
992 .llong .sys32_kill
993 .llong .sys_rename
994 .llong .sys32_mkdir
995 .llong .sys_rmdir /* 40 */
996 .llong .sys_dup
997 .llong .sys_pipe
998 .llong .compat_sys_times
999 .llong .sys_ni_syscall /* old prof syscall */
1000 .llong .sys_brk /* 45 */
1001 .llong .sys_setgid
1002 .llong .sys_getgid
1003 .llong .sys_signal
1004 .llong .sys_geteuid
1005 .llong .sys_getegid /* 50 */
1006 .llong .sys_acct
1007 .llong .sys_umount
1008 .llong .sys_ni_syscall /* old lock syscall */
1009 .llong .compat_sys_ioctl
1010 .llong .compat_sys_fcntl /* 55 */
1011 .llong .sys_ni_syscall /* old mpx syscall */
1012 .llong .sys32_setpgid
1013 .llong .sys_ni_syscall /* old ulimit syscall */
1014 .llong .sys32_olduname
1015 .llong .sys32_umask /* 60 */
1016 .llong .sys_chroot
1017 .llong .sys_ustat
1018 .llong .sys_dup2
1019 .llong .sys_getppid
1020 .llong .sys_getpgrp /* 65 */
1021 .llong .sys_setsid
1022 .llong .sys32_sigaction
1023 .llong .sys_sgetmask
1024 .llong .sys32_ssetmask
1025 .llong .sys_setreuid /* 70 */
1026 .llong .sys_setregid
1027 .llong .ppc32_sigsuspend
1028 .llong .compat_sys_sigpending
1029 .llong .sys32_sethostname
1030 .llong .compat_sys_setrlimit /* 75 */
1031 .llong .compat_sys_old_getrlimit
1032 .llong .compat_sys_getrusage
1033 .llong .sys32_gettimeofday
1034 .llong .sys32_settimeofday
1035 .llong .sys32_getgroups /* 80 */
1036 .llong .sys32_setgroups
1037 .llong .sys_ni_syscall /* old select syscall */
1038 .llong .sys_symlink
1039 .llong .sys_ni_syscall /* old lstat syscall */
1040 .llong .sys32_readlink /* 85 */
1041 .llong .sys_uselib
1042 .llong .sys_swapon
1043 .llong .sys_reboot
1044 .llong .old32_readdir
1045 .llong .sys_mmap /* 90 */
1046 .llong .sys_munmap
1047 .llong .sys_truncate
1048 .llong .sys_ftruncate
1049 .llong .sys_fchmod
1050 .llong .sys_fchown /* 95 */
1051 .llong .sys32_getpriority
1052 .llong .sys32_setpriority
1053 .llong .sys_ni_syscall /* old profil syscall */
1054 .llong .compat_sys_statfs
1055 .llong .compat_sys_fstatfs /* 100 */
1056 .llong .sys_ni_syscall /* old ioperm syscall */
1057 .llong .compat_sys_socketcall
1058 .llong .sys32_syslog
1059 .llong .compat_sys_setitimer
1060 .llong .compat_sys_getitimer /* 105 */
1061 .llong .compat_sys_newstat
1062 .llong .compat_sys_newlstat
1063 .llong .compat_sys_newfstat
1064 .llong .sys32_uname
1065 .llong .sys_ni_syscall /* 110 old iopl syscall */
1066 .llong .sys_vhangup
1067 .llong .sys_ni_syscall /* old idle syscall */
1068 .llong .sys_ni_syscall /* old vm86 syscall */
1069 .llong .compat_sys_wait4
1070 .llong .sys_swapoff /* 115 */
1071 .llong .sys32_sysinfo
1072 .llong .sys32_ipc
1073 .llong .sys_fsync
1074 .llong .ppc32_sigreturn
1075 .llong .ppc_clone /* 120 */
1076 .llong .sys32_setdomainname
1077 .llong .ppc64_newuname
1078 .llong .sys_ni_syscall /* old modify_ldt syscall */
1079 .llong .sys32_adjtimex
1080 .llong .sys_mprotect /* 125 */
1081 .llong .compat_sys_sigprocmask
1082 .llong .sys_ni_syscall /* old create_module syscall */
1083 .llong .sys_init_module
1084 .llong .sys_delete_module
1085 .llong .sys_ni_syscall /* 130 old get_kernel_syms syscall */
1086 .llong .sys_quotactl
1087 .llong .sys32_getpgid
1088 .llong .sys_fchdir
1089 .llong .sys_bdflush
1090 .llong .sys32_sysfs /* 135 */
1091 .llong .ppc64_personality
1092 .llong .sys_ni_syscall /* for afs_syscall */
1093 .llong .sys_setfsuid
1094 .llong .sys_setfsgid
1095 .llong .sys_llseek /* 140 */
1096 .llong .sys32_getdents
1097 .llong .ppc32_select
1098 .llong .sys_flock
1099 .llong .sys_msync
1100 .llong .compat_sys_readv /* 145 */
1101 .llong .compat_sys_writev
1102 .llong .sys32_getsid
1103 .llong .sys_fdatasync
1104 .llong .sys32_sysctl
1105 .llong .sys_mlock /* 150 */
1106 .llong .sys_munlock
1107 .llong .sys_mlockall
1108 .llong .sys_munlockall
1109 .llong .sys32_sched_setparam
1110 .llong .sys32_sched_getparam /* 155 */
1111 .llong .sys32_sched_setscheduler
1112 .llong .sys32_sched_getscheduler
1113 .llong .sys_sched_yield
1114 .llong .sys32_sched_get_priority_max
1115 .llong .sys32_sched_get_priority_min /* 160 */
1116 .llong .sys32_sched_rr_get_interval
1117 .llong .compat_sys_nanosleep
1118 .llong .sys_mremap
1119 .llong .sys_setresuid
1120 .llong .sys_getresuid /* 165 */
1121 .llong .sys_ni_syscall /* old query_module syscall */
1122 .llong .sys_poll
1123 .llong .compat_sys_nfsservctl
1124 .llong .sys_setresgid
1125 .llong .sys_getresgid /* 170 */
1126 .llong .sys32_prctl
1127 .llong .ppc32_rt_sigreturn
1128 .llong .sys32_rt_sigaction
1129 .llong .sys32_rt_sigprocmask
1130 .llong .sys32_rt_sigpending /* 175 */
1131 .llong .compat_sys_rt_sigtimedwait
1132 .llong .sys32_rt_sigqueueinfo
1133 .llong .ppc32_rt_sigsuspend
1134 .llong .sys32_pread64
1135 .llong .sys32_pwrite64 /* 180 */
1136 .llong .sys_chown
1137 .llong .sys_getcwd
1138 .llong .sys_capget
1139 .llong .sys_capset
1140 .llong .sys32_sigaltstack /* 185 */
1141 .llong .sys32_sendfile
1142 .llong .sys_ni_syscall /* reserved for streams1 */
1143 .llong .sys_ni_syscall /* reserved for streams2 */
1144 .llong .ppc_vfork
1145 .llong .compat_sys_getrlimit /* 190 */
1146 .llong .sys32_readahead
1147 .llong .sys32_mmap2
1148 .llong .sys32_truncate64
1149 .llong .sys32_ftruncate64
1150 .llong .sys_stat64 /* 195 */
1151 .llong .sys_lstat64
1152 .llong .sys_fstat64
1153 .llong .sys32_pciconfig_read
1154 .llong .sys32_pciconfig_write
1155 .llong .sys32_pciconfig_iobase /* 200 - pciconfig_iobase */
1156 .llong .sys_ni_syscall /* reserved for MacOnLinux */
1157 .llong .sys_getdents64
1158 .llong .sys_pivot_root
1159 .llong .compat_sys_fcntl64
1160 .llong .sys_madvise /* 205 */
1161 .llong .sys_mincore
1162 .llong .sys_gettid
1163 .llong .sys_tkill
1164 .llong .sys_setxattr
1165 .llong .sys_lsetxattr /* 210 */
1166 .llong .sys_fsetxattr
1167 .llong .sys_getxattr
1168 .llong .sys_lgetxattr
1169 .llong .sys_fgetxattr
1170 .llong .sys_listxattr /* 215 */
1171 .llong .sys_llistxattr
1172 .llong .sys_flistxattr
1173 .llong .sys_removexattr
1174 .llong .sys_lremovexattr
1175 .llong .sys_fremovexattr /* 220 */
1176 .llong .compat_sys_futex
1177 .llong .compat_sys_sched_setaffinity
1178 .llong .compat_sys_sched_getaffinity
1179 .llong .sys_ni_syscall
1180 .llong .sys_ni_syscall /* 225 - reserved for tux */
1181 .llong .sys32_sendfile64
1182 .llong .compat_sys_io_setup
1183 .llong .sys_io_destroy
1184 .llong .compat_sys_io_getevents
1185 .llong .compat_sys_io_submit
1186 .llong .sys_io_cancel
1187 .llong .sys_set_tid_address
1188 .llong .ppc32_fadvise64
1189 .llong .sys_exit_group
1190 .llong .ppc32_lookup_dcookie /* 235 */
1191 .llong .sys_epoll_create
1192 .llong .sys_epoll_ctl
1193 .llong .sys_epoll_wait
1194 .llong .sys_remap_file_pages
1195 .llong .ppc32_timer_create /* 240 */
1196 .llong .compat_sys_timer_settime
1197 .llong .compat_sys_timer_gettime
1198 .llong .sys_timer_getoverrun
1199 .llong .sys_timer_delete
1200 .llong .compat_sys_clock_settime /* 245 */
1201 .llong .compat_sys_clock_gettime
1202 .llong .compat_sys_clock_getres
1203 .llong .compat_sys_clock_nanosleep
1204 .llong .ppc32_swapcontext
1205 .llong .sys32_tgkill /* 250 */
1206 .llong .sys32_utimes
1207 .llong .compat_sys_statfs64
1208 .llong .compat_sys_fstatfs64
1209 .llong .ppc32_fadvise64_64 /* 32bit only fadvise64_64 */
1210 .llong .ppc_rtas /* 255 */
1211 .llong .sys_ni_syscall /* 256 reserved for sys_debug_setcontext */
1212 .llong .sys_ni_syscall /* 257 reserved for vserver */
1213 .llong .sys_ni_syscall /* 258 reserved for new sys_remap_file_pages */
1214 .llong .compat_sys_mbind
1215 .llong .compat_sys_get_mempolicy /* 260 */
1216 .llong .compat_sys_set_mempolicy
1217 .llong .compat_sys_mq_open
1218 .llong .sys_mq_unlink
1219 .llong .compat_sys_mq_timedsend
1220 .llong .compat_sys_mq_timedreceive /* 265 */
1221 .llong .compat_sys_mq_notify
1222 .llong .compat_sys_mq_getsetattr
1223 .llong .compat_sys_kexec_load
1224 .llong .sys32_add_key
1225 .llong .sys32_request_key /* 270 */
1226 .llong .compat_sys_keyctl
1227 .llong .compat_sys_waitid
1228 .llong .sys32_ioprio_set
1229 .llong .sys32_ioprio_get
1230 .llong .sys_inotify_init /* 275 */
1231 .llong .sys_inotify_add_watch
1232 .llong .sys_inotify_rm_watch
1233
1234 .balign 8
1235_GLOBAL(sys_call_table)
1236 .llong .sys_restart_syscall /* 0 */
1237 .llong .sys_exit
1238 .llong .ppc_fork
1239 .llong .sys_read
1240 .llong .sys_write
1241 .llong .sys_open /* 5 */
1242 .llong .sys_close
1243 .llong .sys_waitpid
1244 .llong .sys_creat
1245 .llong .sys_link
1246 .llong .sys_unlink /* 10 */
1247 .llong .sys_execve
1248 .llong .sys_chdir
1249 .llong .sys64_time
1250 .llong .sys_mknod
1251 .llong .sys_chmod /* 15 */
1252 .llong .sys_lchown
1253 .llong .sys_ni_syscall /* old break syscall */
1254 .llong .sys_ni_syscall /* old stat syscall */
1255 .llong .sys_lseek
1256 .llong .sys_getpid /* 20 */
1257 .llong .sys_mount
1258 .llong .sys_ni_syscall /* old umount syscall */
1259 .llong .sys_setuid
1260 .llong .sys_getuid
1261 .llong .sys_stime /* 25 */
1262 .llong .sys_ptrace
1263 .llong .sys_alarm
1264 .llong .sys_ni_syscall /* old fstat syscall */
1265 .llong .sys_pause
1266 .llong .sys_utime /* 30 */
1267 .llong .sys_ni_syscall /* old stty syscall */
1268 .llong .sys_ni_syscall /* old gtty syscall */
1269 .llong .sys_access
1270 .llong .sys_nice
1271 .llong .sys_ni_syscall /* 35 - old ftime syscall */
1272 .llong .sys_sync
1273 .llong .sys_kill
1274 .llong .sys_rename
1275 .llong .sys_mkdir
1276 .llong .sys_rmdir /* 40 */
1277 .llong .sys_dup
1278 .llong .sys_pipe
1279 .llong .sys_times
1280 .llong .sys_ni_syscall /* old prof syscall */
1281 .llong .sys_brk /* 45 */
1282 .llong .sys_setgid
1283 .llong .sys_getgid
1284 .llong .sys_signal
1285 .llong .sys_geteuid
1286 .llong .sys_getegid /* 50 */
1287 .llong .sys_acct
1288 .llong .sys_umount
1289 .llong .sys_ni_syscall /* old lock syscall */
1290 .llong .sys_ioctl
1291 .llong .sys_fcntl /* 55 */
1292 .llong .sys_ni_syscall /* old mpx syscall */
1293 .llong .sys_setpgid
1294 .llong .sys_ni_syscall /* old ulimit syscall */
1295 .llong .sys_ni_syscall /* old uname syscall */
1296 .llong .sys_umask /* 60 */
1297 .llong .sys_chroot
1298 .llong .sys_ustat
1299 .llong .sys_dup2
1300 .llong .sys_getppid
1301 .llong .sys_getpgrp /* 65 */
1302 .llong .sys_setsid
1303 .llong .sys_ni_syscall
1304 .llong .sys_sgetmask
1305 .llong .sys_ssetmask
1306 .llong .sys_setreuid /* 70 */
1307 .llong .sys_setregid
1308 .llong .sys_ni_syscall
1309 .llong .sys_ni_syscall
1310 .llong .sys_sethostname
1311 .llong .sys_setrlimit /* 75 */
1312 .llong .sys_ni_syscall /* old getrlimit syscall */
1313 .llong .sys_getrusage
1314 .llong .sys_gettimeofday
1315 .llong .sys_settimeofday
1316 .llong .sys_getgroups /* 80 */
1317 .llong .sys_setgroups
1318 .llong .sys_ni_syscall /* old select syscall */
1319 .llong .sys_symlink
1320 .llong .sys_ni_syscall /* old lstat syscall */
1321 .llong .sys_readlink /* 85 */
1322 .llong .sys_uselib
1323 .llong .sys_swapon
1324 .llong .sys_reboot
1325 .llong .sys_ni_syscall /* old readdir syscall */
1326 .llong .sys_mmap /* 90 */
1327 .llong .sys_munmap
1328 .llong .sys_truncate
1329 .llong .sys_ftruncate
1330 .llong .sys_fchmod
1331 .llong .sys_fchown /* 95 */
1332 .llong .sys_getpriority
1333 .llong .sys_setpriority
1334 .llong .sys_ni_syscall /* old profil syscall holder */
1335 .llong .sys_statfs
1336 .llong .sys_fstatfs /* 100 */
1337 .llong .sys_ni_syscall /* old ioperm syscall */
1338 .llong .sys_socketcall
1339 .llong .sys_syslog
1340 .llong .sys_setitimer
1341 .llong .sys_getitimer /* 105 */
1342 .llong .sys_newstat
1343 .llong .sys_newlstat
1344 .llong .sys_newfstat
1345 .llong .sys_ni_syscall /* old uname syscall */
1346 .llong .sys_ni_syscall /* 110 old iopl syscall */
1347 .llong .sys_vhangup
1348 .llong .sys_ni_syscall /* old idle syscall */
1349 .llong .sys_ni_syscall /* old vm86 syscall */
1350 .llong .sys_wait4
1351 .llong .sys_swapoff /* 115 */
1352 .llong .sys_sysinfo
1353 .llong .sys_ipc
1354 .llong .sys_fsync
1355 .llong .sys_ni_syscall
1356 .llong .ppc_clone /* 120 */
1357 .llong .sys_setdomainname
1358 .llong .ppc64_newuname
1359 .llong .sys_ni_syscall /* old modify_ldt syscall */
1360 .llong .sys_adjtimex
1361 .llong .sys_mprotect /* 125 */
1362 .llong .sys_ni_syscall
1363 .llong .sys_ni_syscall /* old create_module syscall */
1364 .llong .sys_init_module
1365 .llong .sys_delete_module
1366 .llong .sys_ni_syscall /* 130 old get_kernel_syms syscall */
1367 .llong .sys_quotactl
1368 .llong .sys_getpgid
1369 .llong .sys_fchdir
1370 .llong .sys_bdflush
1371 .llong .sys_sysfs /* 135 */
1372 .llong .ppc64_personality
1373 .llong .sys_ni_syscall /* for afs_syscall */
1374 .llong .sys_setfsuid
1375 .llong .sys_setfsgid
1376 .llong .sys_llseek /* 140 */
1377 .llong .sys_getdents
1378 .llong .sys_select
1379 .llong .sys_flock
1380 .llong .sys_msync
1381 .llong .sys_readv /* 145 */
1382 .llong .sys_writev
1383 .llong .sys_getsid
1384 .llong .sys_fdatasync
1385 .llong .sys_sysctl
1386 .llong .sys_mlock /* 150 */
1387 .llong .sys_munlock
1388 .llong .sys_mlockall
1389 .llong .sys_munlockall
1390 .llong .sys_sched_setparam
1391 .llong .sys_sched_getparam /* 155 */
1392 .llong .sys_sched_setscheduler
1393 .llong .sys_sched_getscheduler
1394 .llong .sys_sched_yield
1395 .llong .sys_sched_get_priority_max
1396 .llong .sys_sched_get_priority_min /* 160 */
1397 .llong .sys_sched_rr_get_interval
1398 .llong .sys_nanosleep
1399 .llong .sys_mremap
1400 .llong .sys_setresuid
1401 .llong .sys_getresuid /* 165 */
1402 .llong .sys_ni_syscall /* old query_module syscall */
1403 .llong .sys_poll
1404 .llong .sys_nfsservctl
1405 .llong .sys_setresgid
1406 .llong .sys_getresgid /* 170 */
1407 .llong .sys_prctl
1408 .llong .ppc64_rt_sigreturn
1409 .llong .sys_rt_sigaction
1410 .llong .sys_rt_sigprocmask
1411 .llong .sys_rt_sigpending /* 175 */
1412 .llong .sys_rt_sigtimedwait
1413 .llong .sys_rt_sigqueueinfo
1414 .llong .ppc64_rt_sigsuspend
1415 .llong .sys_pread64
1416 .llong .sys_pwrite64 /* 180 */
1417 .llong .sys_chown
1418 .llong .sys_getcwd
1419 .llong .sys_capget
1420 .llong .sys_capset
1421 .llong .sys_sigaltstack /* 185 */
1422 .llong .sys_sendfile64
1423 .llong .sys_ni_syscall /* reserved for streams1 */
1424 .llong .sys_ni_syscall /* reserved for streams2 */
1425 .llong .ppc_vfork
1426 .llong .sys_getrlimit /* 190 */
1427 .llong .sys_readahead
1428 .llong .sys_ni_syscall /* 32bit only mmap2 */
1429 .llong .sys_ni_syscall /* 32bit only truncate64 */
1430 .llong .sys_ni_syscall /* 32bit only ftruncate64 */
1431 .llong .sys_ni_syscall /* 195 - 32bit only stat64 */
1432 .llong .sys_ni_syscall /* 32bit only lstat64 */
1433 .llong .sys_ni_syscall /* 32bit only fstat64 */
1434 .llong .sys_pciconfig_read
1435 .llong .sys_pciconfig_write
1436 .llong .sys_pciconfig_iobase /* 200 - pciconfig_iobase */
1437 .llong .sys_ni_syscall /* reserved for MacOnLinux */
1438 .llong .sys_getdents64
1439 .llong .sys_pivot_root
1440 .llong .sys_ni_syscall /* 32bit only fcntl64 */
1441 .llong .sys_madvise /* 205 */
1442 .llong .sys_mincore
1443 .llong .sys_gettid
1444 .llong .sys_tkill
1445 .llong .sys_setxattr
1446 .llong .sys_lsetxattr /* 210 */
1447 .llong .sys_fsetxattr
1448 .llong .sys_getxattr
1449 .llong .sys_lgetxattr
1450 .llong .sys_fgetxattr
1451 .llong .sys_listxattr /* 215 */
1452 .llong .sys_llistxattr
1453 .llong .sys_flistxattr
1454 .llong .sys_removexattr
1455 .llong .sys_lremovexattr
1456 .llong .sys_fremovexattr /* 220 */
1457 .llong .sys_futex
1458 .llong .sys_sched_setaffinity
1459 .llong .sys_sched_getaffinity
1460 .llong .sys_ni_syscall
1461 .llong .sys_ni_syscall /* 225 - reserved for tux */
1462 .llong .sys_ni_syscall /* 32bit only sendfile64 */
1463 .llong .sys_io_setup
1464 .llong .sys_io_destroy
1465 .llong .sys_io_getevents
1466 .llong .sys_io_submit /* 230 */
1467 .llong .sys_io_cancel
1468 .llong .sys_set_tid_address
1469 .llong .sys_fadvise64
1470 .llong .sys_exit_group
1471 .llong .sys_lookup_dcookie /* 235 */
1472 .llong .sys_epoll_create
1473 .llong .sys_epoll_ctl
1474 .llong .sys_epoll_wait
1475 .llong .sys_remap_file_pages
1476 .llong .sys_timer_create /* 240 */
1477 .llong .sys_timer_settime
1478 .llong .sys_timer_gettime
1479 .llong .sys_timer_getoverrun
1480 .llong .sys_timer_delete
1481 .llong .sys_clock_settime /* 245 */
1482 .llong .sys_clock_gettime
1483 .llong .sys_clock_getres
1484 .llong .sys_clock_nanosleep
1485 .llong .ppc64_swapcontext
1486 .llong .sys_tgkill /* 250 */
1487 .llong .sys_utimes
1488 .llong .sys_statfs64
1489 .llong .sys_fstatfs64
1490 .llong .sys_ni_syscall /* 32bit only fadvise64_64 */
1491 .llong .ppc_rtas /* 255 */
1492 .llong .sys_ni_syscall /* 256 reserved for sys_debug_setcontext */
1493 .llong .sys_ni_syscall /* 257 reserved for vserver */
1494 .llong .sys_ni_syscall /* 258 reserved for new sys_remap_file_pages */
1495 .llong .sys_mbind
1496 .llong .sys_get_mempolicy /* 260 */
1497 .llong .sys_set_mempolicy
1498 .llong .sys_mq_open
1499 .llong .sys_mq_unlink
1500 .llong .sys_mq_timedsend
1501 .llong .sys_mq_timedreceive /* 265 */
1502 .llong .sys_mq_notify
1503 .llong .sys_mq_getsetattr
1504 .llong .sys_kexec_load
1505 .llong .sys_add_key
1506 .llong .sys_request_key /* 270 */
1507 .llong .sys_keyctl
1508 .llong .sys_waitid
1509 .llong .sys_ioprio_set
1510 .llong .sys_ioprio_get
1511 .llong .sys_inotify_init /* 275 */
1512 .llong .sys_inotify_add_watch
1513 .llong .sys_inotify_rm_watch
diff --git a/arch/ppc64/kernel/mpic.c b/arch/ppc64/kernel/mpic.c
deleted file mode 100644
index 5f5bc73754d9..000000000000
--- a/arch/ppc64/kernel/mpic.c
+++ /dev/null
@@ -1,888 +0,0 @@
1/*
2 * arch/ppc64/kernel/mpic.c
3 *
4 * Driver for interrupt controllers following the OpenPIC standard, the
5 * common implementation beeing IBM's MPIC. This driver also can deal
6 * with various broken implementations of this HW.
7 *
8 * Copyright (C) 2004 Benjamin Herrenschmidt, IBM Corp.
9 *
10 * This file is subject to the terms and conditions of the GNU General Public
11 * License. See the file COPYING in the main directory of this archive
12 * for more details.
13 */
14
15#undef DEBUG
16
17#include <linux/config.h>
18#include <linux/types.h>
19#include <linux/kernel.h>
20#include <linux/init.h>
21#include <linux/irq.h>
22#include <linux/smp.h>
23#include <linux/interrupt.h>
24#include <linux/bootmem.h>
25#include <linux/spinlock.h>
26#include <linux/pci.h>
27
28#include <asm/ptrace.h>
29#include <asm/signal.h>
30#include <asm/io.h>
31#include <asm/pgtable.h>
32#include <asm/irq.h>
33#include <asm/machdep.h>
34
35#include "mpic.h"
36
37#ifdef DEBUG
38#define DBG(fmt...) printk(fmt)
39#else
40#define DBG(fmt...)
41#endif
42
43static struct mpic *mpics;
44static struct mpic *mpic_primary;
45static DEFINE_SPINLOCK(mpic_lock);
46
47
48/*
49 * Register accessor functions
50 */
51
52
53static inline u32 _mpic_read(unsigned int be, volatile u32 __iomem *base,
54 unsigned int reg)
55{
56 if (be)
57 return in_be32(base + (reg >> 2));
58 else
59 return in_le32(base + (reg >> 2));
60}
61
62static inline void _mpic_write(unsigned int be, volatile u32 __iomem *base,
63 unsigned int reg, u32 value)
64{
65 if (be)
66 out_be32(base + (reg >> 2), value);
67 else
68 out_le32(base + (reg >> 2), value);
69}
70
71static inline u32 _mpic_ipi_read(struct mpic *mpic, unsigned int ipi)
72{
73 unsigned int be = (mpic->flags & MPIC_BIG_ENDIAN) != 0;
74 unsigned int offset = MPIC_GREG_IPI_VECTOR_PRI_0 + (ipi * 0x10);
75
76 if (mpic->flags & MPIC_BROKEN_IPI)
77 be = !be;
78 return _mpic_read(be, mpic->gregs, offset);
79}
80
81static inline void _mpic_ipi_write(struct mpic *mpic, unsigned int ipi, u32 value)
82{
83 unsigned int offset = MPIC_GREG_IPI_VECTOR_PRI_0 + (ipi * 0x10);
84
85 _mpic_write(mpic->flags & MPIC_BIG_ENDIAN, mpic->gregs, offset, value);
86}
87
88static inline u32 _mpic_cpu_read(struct mpic *mpic, unsigned int reg)
89{
90 unsigned int cpu = 0;
91
92 if (mpic->flags & MPIC_PRIMARY)
93 cpu = hard_smp_processor_id();
94
95 return _mpic_read(mpic->flags & MPIC_BIG_ENDIAN, mpic->cpuregs[cpu], reg);
96}
97
98static inline void _mpic_cpu_write(struct mpic *mpic, unsigned int reg, u32 value)
99{
100 unsigned int cpu = 0;
101
102 if (mpic->flags & MPIC_PRIMARY)
103 cpu = hard_smp_processor_id();
104
105 _mpic_write(mpic->flags & MPIC_BIG_ENDIAN, mpic->cpuregs[cpu], reg, value);
106}
107
108static inline u32 _mpic_irq_read(struct mpic *mpic, unsigned int src_no, unsigned int reg)
109{
110 unsigned int isu = src_no >> mpic->isu_shift;
111 unsigned int idx = src_no & mpic->isu_mask;
112
113 return _mpic_read(mpic->flags & MPIC_BIG_ENDIAN, mpic->isus[isu],
114 reg + (idx * MPIC_IRQ_STRIDE));
115}
116
117static inline void _mpic_irq_write(struct mpic *mpic, unsigned int src_no,
118 unsigned int reg, u32 value)
119{
120 unsigned int isu = src_no >> mpic->isu_shift;
121 unsigned int idx = src_no & mpic->isu_mask;
122
123 _mpic_write(mpic->flags & MPIC_BIG_ENDIAN, mpic->isus[isu],
124 reg + (idx * MPIC_IRQ_STRIDE), value);
125}
126
127#define mpic_read(b,r) _mpic_read(mpic->flags & MPIC_BIG_ENDIAN,(b),(r))
128#define mpic_write(b,r,v) _mpic_write(mpic->flags & MPIC_BIG_ENDIAN,(b),(r),(v))
129#define mpic_ipi_read(i) _mpic_ipi_read(mpic,(i))
130#define mpic_ipi_write(i,v) _mpic_ipi_write(mpic,(i),(v))
131#define mpic_cpu_read(i) _mpic_cpu_read(mpic,(i))
132#define mpic_cpu_write(i,v) _mpic_cpu_write(mpic,(i),(v))
133#define mpic_irq_read(s,r) _mpic_irq_read(mpic,(s),(r))
134#define mpic_irq_write(s,r,v) _mpic_irq_write(mpic,(s),(r),(v))
135
136
137/*
138 * Low level utility functions
139 */
140
141
142
143/* Check if we have one of those nice broken MPICs with a flipped endian on
144 * reads from IPI registers
145 */
146static void __init mpic_test_broken_ipi(struct mpic *mpic)
147{
148 u32 r;
149
150 mpic_write(mpic->gregs, MPIC_GREG_IPI_VECTOR_PRI_0, MPIC_VECPRI_MASK);
151 r = mpic_read(mpic->gregs, MPIC_GREG_IPI_VECTOR_PRI_0);
152
153 if (r == le32_to_cpu(MPIC_VECPRI_MASK)) {
154 printk(KERN_INFO "mpic: Detected reversed IPI registers\n");
155 mpic->flags |= MPIC_BROKEN_IPI;
156 }
157}
158
159#ifdef CONFIG_MPIC_BROKEN_U3
160
161/* Test if an interrupt is sourced from HyperTransport (used on broken U3s)
162 * to force the edge setting on the MPIC and do the ack workaround.
163 */
164static inline int mpic_is_ht_interrupt(struct mpic *mpic, unsigned int source_no)
165{
166 if (source_no >= 128 || !mpic->fixups)
167 return 0;
168 return mpic->fixups[source_no].base != NULL;
169}
170
171static inline void mpic_apic_end_irq(struct mpic *mpic, unsigned int source_no)
172{
173 struct mpic_irq_fixup *fixup = &mpic->fixups[source_no];
174 u32 tmp;
175
176 spin_lock(&mpic->fixup_lock);
177 writeb(0x11 + 2 * fixup->irq, fixup->base);
178 tmp = readl(fixup->base + 2);
179 writel(tmp | 0x80000000ul, fixup->base + 2);
180 /* config writes shouldn't be posted but let's be safe ... */
181 (void)readl(fixup->base + 2);
182 spin_unlock(&mpic->fixup_lock);
183}
184
185
186static void __init mpic_amd8111_read_irq(struct mpic *mpic, u8 __iomem *devbase)
187{
188 int i, irq;
189 u32 tmp;
190
191 printk(KERN_INFO "mpic: - Workarounds on AMD 8111 @ %p\n", devbase);
192
193 for (i=0; i < 24; i++) {
194 writeb(0x10 + 2*i, devbase + 0xf2);
195 tmp = readl(devbase + 0xf4);
196 if ((tmp & 0x1) || !(tmp & 0x20))
197 continue;
198 irq = (tmp >> 16) & 0xff;
199 mpic->fixups[irq].irq = i;
200 mpic->fixups[irq].base = devbase + 0xf2;
201 }
202}
203
204static void __init mpic_amd8131_read_irq(struct mpic *mpic, u8 __iomem *devbase)
205{
206 int i, irq;
207 u32 tmp;
208
209 printk(KERN_INFO "mpic: - Workarounds on AMD 8131 @ %p\n", devbase);
210
211 for (i=0; i < 4; i++) {
212 writeb(0x10 + 2*i, devbase + 0xba);
213 tmp = readl(devbase + 0xbc);
214 if ((tmp & 0x1) || !(tmp & 0x20))
215 continue;
216 irq = (tmp >> 16) & 0xff;
217 mpic->fixups[irq].irq = i;
218 mpic->fixups[irq].base = devbase + 0xba;
219 }
220}
221
222static void __init mpic_scan_ioapics(struct mpic *mpic)
223{
224 unsigned int devfn;
225 u8 __iomem *cfgspace;
226
227 printk(KERN_INFO "mpic: Setting up IO-APICs workarounds for U3\n");
228
229 /* Allocate fixups array */
230 mpic->fixups = alloc_bootmem(128 * sizeof(struct mpic_irq_fixup));
231 BUG_ON(mpic->fixups == NULL);
232 memset(mpic->fixups, 0, 128 * sizeof(struct mpic_irq_fixup));
233
234 /* Init spinlock */
235 spin_lock_init(&mpic->fixup_lock);
236
237 /* Map u3 config space. We assume all IO-APICs are on the primary bus
238 * and slot will never be above "0xf" so we only need to map 32k
239 */
240 cfgspace = (unsigned char __iomem *)ioremap(0xf2000000, 0x8000);
241 BUG_ON(cfgspace == NULL);
242
243 /* Now we scan all slots. We do a very quick scan, we read the header type,
244 * vendor ID and device ID only, that's plenty enough
245 */
246 for (devfn = 0; devfn < PCI_DEVFN(0x10,0); devfn ++) {
247 u8 __iomem *devbase = cfgspace + (devfn << 8);
248 u8 hdr_type = readb(devbase + PCI_HEADER_TYPE);
249 u32 l = readl(devbase + PCI_VENDOR_ID);
250 u16 vendor_id, device_id;
251 int multifunc = 0;
252
253 DBG("devfn %x, l: %x\n", devfn, l);
254
255 /* If no device, skip */
256 if (l == 0xffffffff || l == 0x00000000 ||
257 l == 0x0000ffff || l == 0xffff0000)
258 goto next;
259
260 /* Check if it's a multifunction device (only really used
261 * to function 0 though
262 */
263 multifunc = !!(hdr_type & 0x80);
264 vendor_id = l & 0xffff;
265 device_id = (l >> 16) & 0xffff;
266
267 /* If a known device, go to fixup setup code */
268 if (vendor_id == PCI_VENDOR_ID_AMD && device_id == 0x7460)
269 mpic_amd8111_read_irq(mpic, devbase);
270 if (vendor_id == PCI_VENDOR_ID_AMD && device_id == 0x7450)
271 mpic_amd8131_read_irq(mpic, devbase);
272 next:
273 /* next device, if function 0 */
274 if ((PCI_FUNC(devfn) == 0) && !multifunc)
275 devfn += 7;
276 }
277}
278
279#endif /* CONFIG_MPIC_BROKEN_U3 */
280
281
282/* Find an mpic associated with a given linux interrupt */
283static struct mpic *mpic_find(unsigned int irq, unsigned int *is_ipi)
284{
285 struct mpic *mpic = mpics;
286
287 while(mpic) {
288 /* search IPIs first since they may override the main interrupts */
289 if (irq >= mpic->ipi_offset && irq < (mpic->ipi_offset + 4)) {
290 if (is_ipi)
291 *is_ipi = 1;
292 return mpic;
293 }
294 if (irq >= mpic->irq_offset &&
295 irq < (mpic->irq_offset + mpic->irq_count)) {
296 if (is_ipi)
297 *is_ipi = 0;
298 return mpic;
299 }
300 mpic = mpic -> next;
301 }
302 return NULL;
303}
304
305/* Convert a cpu mask from logical to physical cpu numbers. */
306static inline u32 mpic_physmask(u32 cpumask)
307{
308 int i;
309 u32 mask = 0;
310
311 for (i = 0; i < NR_CPUS; ++i, cpumask >>= 1)
312 mask |= (cpumask & 1) << get_hard_smp_processor_id(i);
313 return mask;
314}
315
316#ifdef CONFIG_SMP
317/* Get the mpic structure from the IPI number */
318static inline struct mpic * mpic_from_ipi(unsigned int ipi)
319{
320 return container_of(irq_desc[ipi].handler, struct mpic, hc_ipi);
321}
322#endif
323
324/* Get the mpic structure from the irq number */
325static inline struct mpic * mpic_from_irq(unsigned int irq)
326{
327 return container_of(irq_desc[irq].handler, struct mpic, hc_irq);
328}
329
330/* Send an EOI */
331static inline void mpic_eoi(struct mpic *mpic)
332{
333 mpic_cpu_write(MPIC_CPU_EOI, 0);
334 (void)mpic_cpu_read(MPIC_CPU_WHOAMI);
335}
336
337#ifdef CONFIG_SMP
338static irqreturn_t mpic_ipi_action(int irq, void *dev_id, struct pt_regs *regs)
339{
340 struct mpic *mpic = dev_id;
341
342 smp_message_recv(irq - mpic->ipi_offset, regs);
343 return IRQ_HANDLED;
344}
345#endif /* CONFIG_SMP */
346
347/*
348 * Linux descriptor level callbacks
349 */
350
351
352static void mpic_enable_irq(unsigned int irq)
353{
354 unsigned int loops = 100000;
355 struct mpic *mpic = mpic_from_irq(irq);
356 unsigned int src = irq - mpic->irq_offset;
357
358 DBG("%s: enable_irq: %d (src %d)\n", mpic->name, irq, src);
359
360 mpic_irq_write(src, MPIC_IRQ_VECTOR_PRI,
361 mpic_irq_read(src, MPIC_IRQ_VECTOR_PRI) & ~MPIC_VECPRI_MASK);
362
363 /* make sure mask gets to controller before we return to user */
364 do {
365 if (!loops--) {
366 printk(KERN_ERR "mpic_enable_irq timeout\n");
367 break;
368 }
369 } while(mpic_irq_read(src, MPIC_IRQ_VECTOR_PRI) & MPIC_VECPRI_MASK);
370}
371
372static void mpic_disable_irq(unsigned int irq)
373{
374 unsigned int loops = 100000;
375 struct mpic *mpic = mpic_from_irq(irq);
376 unsigned int src = irq - mpic->irq_offset;
377
378 DBG("%s: disable_irq: %d (src %d)\n", mpic->name, irq, src);
379
380 mpic_irq_write(src, MPIC_IRQ_VECTOR_PRI,
381 mpic_irq_read(src, MPIC_IRQ_VECTOR_PRI) | MPIC_VECPRI_MASK);
382
383 /* make sure mask gets to controller before we return to user */
384 do {
385 if (!loops--) {
386 printk(KERN_ERR "mpic_enable_irq timeout\n");
387 break;
388 }
389 } while(!(mpic_irq_read(src, MPIC_IRQ_VECTOR_PRI) & MPIC_VECPRI_MASK));
390}
391
392static void mpic_end_irq(unsigned int irq)
393{
394 struct mpic *mpic = mpic_from_irq(irq);
395
396 DBG("%s: end_irq: %d\n", mpic->name, irq);
397
398 /* We always EOI on end_irq() even for edge interrupts since that
399 * should only lower the priority, the MPIC should have properly
400 * latched another edge interrupt coming in anyway
401 */
402
403#ifdef CONFIG_MPIC_BROKEN_U3
404 if (mpic->flags & MPIC_BROKEN_U3) {
405 unsigned int src = irq - mpic->irq_offset;
406 if (mpic_is_ht_interrupt(mpic, src))
407 mpic_apic_end_irq(mpic, src);
408 }
409#endif /* CONFIG_MPIC_BROKEN_U3 */
410
411 mpic_eoi(mpic);
412}
413
414#ifdef CONFIG_SMP
415
416static void mpic_enable_ipi(unsigned int irq)
417{
418 struct mpic *mpic = mpic_from_ipi(irq);
419 unsigned int src = irq - mpic->ipi_offset;
420
421 DBG("%s: enable_ipi: %d (ipi %d)\n", mpic->name, irq, src);
422 mpic_ipi_write(src, mpic_ipi_read(src) & ~MPIC_VECPRI_MASK);
423}
424
425static void mpic_disable_ipi(unsigned int irq)
426{
427 /* NEVER disable an IPI... that's just plain wrong! */
428}
429
430static void mpic_end_ipi(unsigned int irq)
431{
432 struct mpic *mpic = mpic_from_ipi(irq);
433
434 /*
435 * IPIs are marked IRQ_PER_CPU. This has the side effect of
436 * preventing the IRQ_PENDING/IRQ_INPROGRESS logic from
437 * applying to them. We EOI them late to avoid re-entering.
438 * We mark IPI's with SA_INTERRUPT as they must run with
439 * irqs disabled.
440 */
441 mpic_eoi(mpic);
442}
443
444#endif /* CONFIG_SMP */
445
446static void mpic_set_affinity(unsigned int irq, cpumask_t cpumask)
447{
448 struct mpic *mpic = mpic_from_irq(irq);
449
450 cpumask_t tmp;
451
452 cpus_and(tmp, cpumask, cpu_online_map);
453
454 mpic_irq_write(irq - mpic->irq_offset, MPIC_IRQ_DESTINATION,
455 mpic_physmask(cpus_addr(tmp)[0]));
456}
457
458
459/*
460 * Exported functions
461 */
462
463
464struct mpic * __init mpic_alloc(unsigned long phys_addr,
465 unsigned int flags,
466 unsigned int isu_size,
467 unsigned int irq_offset,
468 unsigned int irq_count,
469 unsigned int ipi_offset,
470 unsigned char *senses,
471 unsigned int senses_count,
472 const char *name)
473{
474 struct mpic *mpic;
475 u32 reg;
476 const char *vers;
477 int i;
478
479 mpic = alloc_bootmem(sizeof(struct mpic));
480 if (mpic == NULL)
481 return NULL;
482
483 memset(mpic, 0, sizeof(struct mpic));
484 mpic->name = name;
485
486 mpic->hc_irq.typename = name;
487 mpic->hc_irq.enable = mpic_enable_irq;
488 mpic->hc_irq.disable = mpic_disable_irq;
489 mpic->hc_irq.end = mpic_end_irq;
490 if (flags & MPIC_PRIMARY)
491 mpic->hc_irq.set_affinity = mpic_set_affinity;
492#ifdef CONFIG_SMP
493 mpic->hc_ipi.typename = name;
494 mpic->hc_ipi.enable = mpic_enable_ipi;
495 mpic->hc_ipi.disable = mpic_disable_ipi;
496 mpic->hc_ipi.end = mpic_end_ipi;
497#endif /* CONFIG_SMP */
498
499 mpic->flags = flags;
500 mpic->isu_size = isu_size;
501 mpic->irq_offset = irq_offset;
502 mpic->irq_count = irq_count;
503 mpic->ipi_offset = ipi_offset;
504 mpic->num_sources = 0; /* so far */
505 mpic->senses = senses;
506 mpic->senses_count = senses_count;
507
508 /* Map the global registers */
509 mpic->gregs = ioremap(phys_addr + MPIC_GREG_BASE, 0x2000);
510 mpic->tmregs = mpic->gregs + ((MPIC_TIMER_BASE - MPIC_GREG_BASE) >> 2);
511 BUG_ON(mpic->gregs == NULL);
512
513 /* Reset */
514 if (flags & MPIC_WANTS_RESET) {
515 mpic_write(mpic->gregs, MPIC_GREG_GLOBAL_CONF_0,
516 mpic_read(mpic->gregs, MPIC_GREG_GLOBAL_CONF_0)
517 | MPIC_GREG_GCONF_RESET);
518 while( mpic_read(mpic->gregs, MPIC_GREG_GLOBAL_CONF_0)
519 & MPIC_GREG_GCONF_RESET)
520 mb();
521 }
522
523 /* Read feature register, calculate num CPUs and, for non-ISU
524 * MPICs, num sources as well. On ISU MPICs, sources are counted
525 * as ISUs are added
526 */
527 reg = mpic_read(mpic->gregs, MPIC_GREG_FEATURE_0);
528 mpic->num_cpus = ((reg & MPIC_GREG_FEATURE_LAST_CPU_MASK)
529 >> MPIC_GREG_FEATURE_LAST_CPU_SHIFT) + 1;
530 if (isu_size == 0)
531 mpic->num_sources = ((reg & MPIC_GREG_FEATURE_LAST_SRC_MASK)
532 >> MPIC_GREG_FEATURE_LAST_SRC_SHIFT) + 1;
533
534 /* Map the per-CPU registers */
535 for (i = 0; i < mpic->num_cpus; i++) {
536 mpic->cpuregs[i] = ioremap(phys_addr + MPIC_CPU_BASE +
537 i * MPIC_CPU_STRIDE, 0x1000);
538 BUG_ON(mpic->cpuregs[i] == NULL);
539 }
540
541 /* Initialize main ISU if none provided */
542 if (mpic->isu_size == 0) {
543 mpic->isu_size = mpic->num_sources;
544 mpic->isus[0] = ioremap(phys_addr + MPIC_IRQ_BASE,
545 MPIC_IRQ_STRIDE * mpic->isu_size);
546 BUG_ON(mpic->isus[0] == NULL);
547 }
548 mpic->isu_shift = 1 + __ilog2(mpic->isu_size - 1);
549 mpic->isu_mask = (1 << mpic->isu_shift) - 1;
550
551 /* Display version */
552 switch (reg & MPIC_GREG_FEATURE_VERSION_MASK) {
553 case 1:
554 vers = "1.0";
555 break;
556 case 2:
557 vers = "1.2";
558 break;
559 case 3:
560 vers = "1.3";
561 break;
562 default:
563 vers = "<unknown>";
564 break;
565 }
566 printk(KERN_INFO "mpic: Setting up MPIC \"%s\" version %s at %lx, max %d CPUs\n",
567 name, vers, phys_addr, mpic->num_cpus);
568 printk(KERN_INFO "mpic: ISU size: %d, shift: %d, mask: %x\n", mpic->isu_size,
569 mpic->isu_shift, mpic->isu_mask);
570
571 mpic->next = mpics;
572 mpics = mpic;
573
574 if (flags & MPIC_PRIMARY)
575 mpic_primary = mpic;
576
577 return mpic;
578}
579
580void __init mpic_assign_isu(struct mpic *mpic, unsigned int isu_num,
581 unsigned long phys_addr)
582{
583 unsigned int isu_first = isu_num * mpic->isu_size;
584
585 BUG_ON(isu_num >= MPIC_MAX_ISU);
586
587 mpic->isus[isu_num] = ioremap(phys_addr, MPIC_IRQ_STRIDE * mpic->isu_size);
588 if ((isu_first + mpic->isu_size) > mpic->num_sources)
589 mpic->num_sources = isu_first + mpic->isu_size;
590}
591
592void __init mpic_setup_cascade(unsigned int irq, mpic_cascade_t handler,
593 void *data)
594{
595 struct mpic *mpic = mpic_find(irq, NULL);
596 unsigned long flags;
597
598 /* Synchronization here is a bit dodgy, so don't try to replace cascade
599 * interrupts on the fly too often ... but normally it's set up at boot.
600 */
601 spin_lock_irqsave(&mpic_lock, flags);
602 if (mpic->cascade)
603 mpic_disable_irq(mpic->cascade_vec + mpic->irq_offset);
604 mpic->cascade = NULL;
605 wmb();
606 mpic->cascade_vec = irq - mpic->irq_offset;
607 mpic->cascade_data = data;
608 wmb();
609 mpic->cascade = handler;
610 mpic_enable_irq(irq);
611 spin_unlock_irqrestore(&mpic_lock, flags);
612}
613
614void __init mpic_init(struct mpic *mpic)
615{
616 int i;
617
618 BUG_ON(mpic->num_sources == 0);
619
620 printk(KERN_INFO "mpic: Initializing for %d sources\n", mpic->num_sources);
621
622 /* Set current processor priority to max */
623 mpic_cpu_write(MPIC_CPU_CURRENT_TASK_PRI, 0xf);
624
625 /* Initialize timers: just disable them all */
626 for (i = 0; i < 4; i++) {
627 mpic_write(mpic->tmregs,
628 i * MPIC_TIMER_STRIDE + MPIC_TIMER_DESTINATION, 0);
629 mpic_write(mpic->tmregs,
630 i * MPIC_TIMER_STRIDE + MPIC_TIMER_VECTOR_PRI,
631 MPIC_VECPRI_MASK |
632 (MPIC_VEC_TIMER_0 + i));
633 }
634
635 /* Initialize IPIs to our reserved vectors and mark them disabled for now */
636 mpic_test_broken_ipi(mpic);
637 for (i = 0; i < 4; i++) {
638 mpic_ipi_write(i,
639 MPIC_VECPRI_MASK |
640 (10 << MPIC_VECPRI_PRIORITY_SHIFT) |
641 (MPIC_VEC_IPI_0 + i));
642#ifdef CONFIG_SMP
643 if (!(mpic->flags & MPIC_PRIMARY))
644 continue;
645 irq_desc[mpic->ipi_offset+i].status |= IRQ_PER_CPU;
646 irq_desc[mpic->ipi_offset+i].handler = &mpic->hc_ipi;
647
648#endif /* CONFIG_SMP */
649 }
650
651 /* Initialize interrupt sources */
652 if (mpic->irq_count == 0)
653 mpic->irq_count = mpic->num_sources;
654
655#ifdef CONFIG_MPIC_BROKEN_U3
656 /* Do the ioapic fixups on U3 broken mpic */
657 DBG("MPIC flags: %x\n", mpic->flags);
658 if ((mpic->flags & MPIC_BROKEN_U3) && (mpic->flags & MPIC_PRIMARY))
659 mpic_scan_ioapics(mpic);
660#endif /* CONFIG_MPIC_BROKEN_U3 */
661
662 for (i = 0; i < mpic->num_sources; i++) {
663 /* start with vector = source number, and masked */
664 u32 vecpri = MPIC_VECPRI_MASK | i | (8 << MPIC_VECPRI_PRIORITY_SHIFT);
665 int level = 0;
666
667 /* if it's an IPI, we skip it */
668 if ((mpic->irq_offset + i) >= (mpic->ipi_offset + i) &&
669 (mpic->irq_offset + i) < (mpic->ipi_offset + i + 4))
670 continue;
671
672 /* do senses munging */
673 if (mpic->senses && i < mpic->senses_count) {
674 if (mpic->senses[i] & IRQ_SENSE_LEVEL)
675 vecpri |= MPIC_VECPRI_SENSE_LEVEL;
676 if (mpic->senses[i] & IRQ_POLARITY_POSITIVE)
677 vecpri |= MPIC_VECPRI_POLARITY_POSITIVE;
678 } else
679 vecpri |= MPIC_VECPRI_SENSE_LEVEL;
680
681 /* remember if it was a level interrupts */
682 level = (vecpri & MPIC_VECPRI_SENSE_LEVEL);
683
684 /* deal with broken U3 */
685 if (mpic->flags & MPIC_BROKEN_U3) {
686#ifdef CONFIG_MPIC_BROKEN_U3
687 if (mpic_is_ht_interrupt(mpic, i)) {
688 vecpri &= ~(MPIC_VECPRI_SENSE_MASK |
689 MPIC_VECPRI_POLARITY_MASK);
690 vecpri |= MPIC_VECPRI_POLARITY_POSITIVE;
691 }
692#else
693 printk(KERN_ERR "mpic: BROKEN_U3 set, but CONFIG doesn't match\n");
694#endif
695 }
696
697 DBG("setup source %d, vecpri: %08x, level: %d\n", i, vecpri,
698 (level != 0));
699
700 /* init hw */
701 mpic_irq_write(i, MPIC_IRQ_VECTOR_PRI, vecpri);
702 mpic_irq_write(i, MPIC_IRQ_DESTINATION,
703 1 << get_hard_smp_processor_id(boot_cpuid));
704
705 /* init linux descriptors */
706 if (i < mpic->irq_count) {
707 irq_desc[mpic->irq_offset+i].status = level ? IRQ_LEVEL : 0;
708 irq_desc[mpic->irq_offset+i].handler = &mpic->hc_irq;
709 }
710 }
711
712 /* Init spurrious vector */
713 mpic_write(mpic->gregs, MPIC_GREG_SPURIOUS, MPIC_VEC_SPURRIOUS);
714
715 /* Disable 8259 passthrough */
716 mpic_write(mpic->gregs, MPIC_GREG_GLOBAL_CONF_0,
717 mpic_read(mpic->gregs, MPIC_GREG_GLOBAL_CONF_0)
718 | MPIC_GREG_GCONF_8259_PTHROU_DIS);
719
720 /* Set current processor priority to 0 */
721 mpic_cpu_write(MPIC_CPU_CURRENT_TASK_PRI, 0);
722}
723
724
725
726void mpic_irq_set_priority(unsigned int irq, unsigned int pri)
727{
728 int is_ipi;
729 struct mpic *mpic = mpic_find(irq, &is_ipi);
730 unsigned long flags;
731 u32 reg;
732
733 spin_lock_irqsave(&mpic_lock, flags);
734 if (is_ipi) {
735 reg = mpic_ipi_read(irq - mpic->ipi_offset) & MPIC_VECPRI_PRIORITY_MASK;
736 mpic_ipi_write(irq - mpic->ipi_offset,
737 reg | (pri << MPIC_VECPRI_PRIORITY_SHIFT));
738 } else {
739 reg = mpic_irq_read(irq - mpic->irq_offset, MPIC_IRQ_VECTOR_PRI)
740 & MPIC_VECPRI_PRIORITY_MASK;
741 mpic_irq_write(irq - mpic->irq_offset, MPIC_IRQ_VECTOR_PRI,
742 reg | (pri << MPIC_VECPRI_PRIORITY_SHIFT));
743 }
744 spin_unlock_irqrestore(&mpic_lock, flags);
745}
746
747unsigned int mpic_irq_get_priority(unsigned int irq)
748{
749 int is_ipi;
750 struct mpic *mpic = mpic_find(irq, &is_ipi);
751 unsigned long flags;
752 u32 reg;
753
754 spin_lock_irqsave(&mpic_lock, flags);
755 if (is_ipi)
756 reg = mpic_ipi_read(irq - mpic->ipi_offset);
757 else
758 reg = mpic_irq_read(irq - mpic->irq_offset, MPIC_IRQ_VECTOR_PRI);
759 spin_unlock_irqrestore(&mpic_lock, flags);
760 return (reg & MPIC_VECPRI_PRIORITY_MASK) >> MPIC_VECPRI_PRIORITY_SHIFT;
761}
762
763void mpic_setup_this_cpu(void)
764{
765#ifdef CONFIG_SMP
766 struct mpic *mpic = mpic_primary;
767 unsigned long flags;
768 u32 msk = 1 << hard_smp_processor_id();
769 unsigned int i;
770
771 BUG_ON(mpic == NULL);
772
773 DBG("%s: setup_this_cpu(%d)\n", mpic->name, hard_smp_processor_id());
774
775 spin_lock_irqsave(&mpic_lock, flags);
776
777 /* let the mpic know we want intrs. default affinity is 0xffffffff
778 * until changed via /proc. That's how it's done on x86. If we want
779 * it differently, then we should make sure we also change the default
780 * values of irq_affinity in irq.c.
781 */
782 if (distribute_irqs) {
783 for (i = 0; i < mpic->num_sources ; i++)
784 mpic_irq_write(i, MPIC_IRQ_DESTINATION,
785 mpic_irq_read(i, MPIC_IRQ_DESTINATION) | msk);
786 }
787
788 /* Set current processor priority to 0 */
789 mpic_cpu_write(MPIC_CPU_CURRENT_TASK_PRI, 0);
790
791 spin_unlock_irqrestore(&mpic_lock, flags);
792#endif /* CONFIG_SMP */
793}
794
795/*
796 * XXX: someone who knows mpic should check this.
797 * do we need to eoi the ipi including for kexec cpu here (see xics comments)?
798 * or can we reset the mpic in the new kernel?
799 */
800void mpic_teardown_this_cpu(int secondary)
801{
802 struct mpic *mpic = mpic_primary;
803 unsigned long flags;
804 u32 msk = 1 << hard_smp_processor_id();
805 unsigned int i;
806
807 BUG_ON(mpic == NULL);
808
809 DBG("%s: teardown_this_cpu(%d)\n", mpic->name, hard_smp_processor_id());
810 spin_lock_irqsave(&mpic_lock, flags);
811
812 /* let the mpic know we don't want intrs. */
813 for (i = 0; i < mpic->num_sources ; i++)
814 mpic_irq_write(i, MPIC_IRQ_DESTINATION,
815 mpic_irq_read(i, MPIC_IRQ_DESTINATION) & ~msk);
816
817 /* Set current processor priority to max */
818 mpic_cpu_write(MPIC_CPU_CURRENT_TASK_PRI, 0xf);
819
820 spin_unlock_irqrestore(&mpic_lock, flags);
821}
822
823
824void mpic_send_ipi(unsigned int ipi_no, unsigned int cpu_mask)
825{
826 struct mpic *mpic = mpic_primary;
827
828 BUG_ON(mpic == NULL);
829
830 DBG("%s: send_ipi(ipi_no: %d)\n", mpic->name, ipi_no);
831
832 mpic_cpu_write(MPIC_CPU_IPI_DISPATCH_0 + ipi_no * 0x10,
833 mpic_physmask(cpu_mask & cpus_addr(cpu_online_map)[0]));
834}
835
836int mpic_get_one_irq(struct mpic *mpic, struct pt_regs *regs)
837{
838 u32 irq;
839
840 irq = mpic_cpu_read(MPIC_CPU_INTACK) & MPIC_VECPRI_VECTOR_MASK;
841 DBG("%s: get_one_irq(): %d\n", mpic->name, irq);
842
843 if (mpic->cascade && irq == mpic->cascade_vec) {
844 DBG("%s: cascading ...\n", mpic->name);
845 irq = mpic->cascade(regs, mpic->cascade_data);
846 mpic_eoi(mpic);
847 return irq;
848 }
849 if (unlikely(irq == MPIC_VEC_SPURRIOUS))
850 return -1;
851 if (irq < MPIC_VEC_IPI_0)
852 return irq + mpic->irq_offset;
853 DBG("%s: ipi %d !\n", mpic->name, irq - MPIC_VEC_IPI_0);
854 return irq - MPIC_VEC_IPI_0 + mpic->ipi_offset;
855}
856
857int mpic_get_irq(struct pt_regs *regs)
858{
859 struct mpic *mpic = mpic_primary;
860
861 BUG_ON(mpic == NULL);
862
863 return mpic_get_one_irq(mpic, regs);
864}
865
866
867#ifdef CONFIG_SMP
868void mpic_request_ipis(void)
869{
870 struct mpic *mpic = mpic_primary;
871
872 BUG_ON(mpic == NULL);
873
874 printk("requesting IPIs ... \n");
875
876 /* IPIs are marked SA_INTERRUPT as they must run with irqs disabled */
877 request_irq(mpic->ipi_offset+0, mpic_ipi_action, SA_INTERRUPT,
878 "IPI0 (call function)", mpic);
879 request_irq(mpic->ipi_offset+1, mpic_ipi_action, SA_INTERRUPT,
880 "IPI1 (reschedule)", mpic);
881 request_irq(mpic->ipi_offset+2, mpic_ipi_action, SA_INTERRUPT,
882 "IPI2 (unused)", mpic);
883 request_irq(mpic->ipi_offset+3, mpic_ipi_action, SA_INTERRUPT,
884 "IPI3 (debugger break)", mpic);
885
886 printk("IPIs requested... \n");
887}
888#endif /* CONFIG_SMP */
diff --git a/arch/ppc64/kernel/mpic.h b/arch/ppc64/kernel/mpic.h
deleted file mode 100644
index ca78a7f10528..000000000000
--- a/arch/ppc64/kernel/mpic.h
+++ /dev/null
@@ -1,273 +0,0 @@
1#include <linux/irq.h>
2
3/*
4 * Global registers
5 */
6
7#define MPIC_GREG_BASE 0x01000
8
9#define MPIC_GREG_FEATURE_0 0x00000
10#define MPIC_GREG_FEATURE_LAST_SRC_MASK 0x07ff0000
11#define MPIC_GREG_FEATURE_LAST_SRC_SHIFT 16
12#define MPIC_GREG_FEATURE_LAST_CPU_MASK 0x00001f00
13#define MPIC_GREG_FEATURE_LAST_CPU_SHIFT 8
14#define MPIC_GREG_FEATURE_VERSION_MASK 0xff
15#define MPIC_GREG_FEATURE_1 0x00010
16#define MPIC_GREG_GLOBAL_CONF_0 0x00020
17#define MPIC_GREG_GCONF_RESET 0x80000000
18#define MPIC_GREG_GCONF_8259_PTHROU_DIS 0x20000000
19#define MPIC_GREG_GCONF_BASE_MASK 0x000fffff
20#define MPIC_GREG_GLOBAL_CONF_1 0x00030
21#define MPIC_GREG_VENDOR_0 0x00040
22#define MPIC_GREG_VENDOR_1 0x00050
23#define MPIC_GREG_VENDOR_2 0x00060
24#define MPIC_GREG_VENDOR_3 0x00070
25#define MPIC_GREG_VENDOR_ID 0x00080
26#define MPIC_GREG_VENDOR_ID_STEPPING_MASK 0x00ff0000
27#define MPIC_GREG_VENDOR_ID_STEPPING_SHIFT 16
28#define MPIC_GREG_VENDOR_ID_DEVICE_ID_MASK 0x0000ff00
29#define MPIC_GREG_VENDOR_ID_DEVICE_ID_SHIFT 8
30#define MPIC_GREG_VENDOR_ID_VENDOR_ID_MASK 0x000000ff
31#define MPIC_GREG_PROCESSOR_INIT 0x00090
32#define MPIC_GREG_IPI_VECTOR_PRI_0 0x000a0
33#define MPIC_GREG_IPI_VECTOR_PRI_1 0x000b0
34#define MPIC_GREG_IPI_VECTOR_PRI_2 0x000c0
35#define MPIC_GREG_IPI_VECTOR_PRI_3 0x000d0
36#define MPIC_GREG_SPURIOUS 0x000e0
37#define MPIC_GREG_TIMER_FREQ 0x000f0
38
39/*
40 *
41 * Timer registers
42 */
43#define MPIC_TIMER_BASE 0x01100
44#define MPIC_TIMER_STRIDE 0x40
45
46#define MPIC_TIMER_CURRENT_CNT 0x00000
47#define MPIC_TIMER_BASE_CNT 0x00010
48#define MPIC_TIMER_VECTOR_PRI 0x00020
49#define MPIC_TIMER_DESTINATION 0x00030
50
51/*
52 * Per-Processor registers
53 */
54
55#define MPIC_CPU_THISBASE 0x00000
56#define MPIC_CPU_BASE 0x20000
57#define MPIC_CPU_STRIDE 0x01000
58
59#define MPIC_CPU_IPI_DISPATCH_0 0x00040
60#define MPIC_CPU_IPI_DISPATCH_1 0x00050
61#define MPIC_CPU_IPI_DISPATCH_2 0x00060
62#define MPIC_CPU_IPI_DISPATCH_3 0x00070
63#define MPIC_CPU_CURRENT_TASK_PRI 0x00080
64#define MPIC_CPU_TASKPRI_MASK 0x0000000f
65#define MPIC_CPU_WHOAMI 0x00090
66#define MPIC_CPU_WHOAMI_MASK 0x0000001f
67#define MPIC_CPU_INTACK 0x000a0
68#define MPIC_CPU_EOI 0x000b0
69
70/*
71 * Per-source registers
72 */
73
74#define MPIC_IRQ_BASE 0x10000
75#define MPIC_IRQ_STRIDE 0x00020
76#define MPIC_IRQ_VECTOR_PRI 0x00000
77#define MPIC_VECPRI_MASK 0x80000000
78#define MPIC_VECPRI_ACTIVITY 0x40000000 /* Read Only */
79#define MPIC_VECPRI_PRIORITY_MASK 0x000f0000
80#define MPIC_VECPRI_PRIORITY_SHIFT 16
81#define MPIC_VECPRI_VECTOR_MASK 0x000007ff
82#define MPIC_VECPRI_POLARITY_POSITIVE 0x00800000
83#define MPIC_VECPRI_POLARITY_NEGATIVE 0x00000000
84#define MPIC_VECPRI_POLARITY_MASK 0x00800000
85#define MPIC_VECPRI_SENSE_LEVEL 0x00400000
86#define MPIC_VECPRI_SENSE_EDGE 0x00000000
87#define MPIC_VECPRI_SENSE_MASK 0x00400000
88#define MPIC_IRQ_DESTINATION 0x00010
89
90#define MPIC_MAX_IRQ_SOURCES 2048
91#define MPIC_MAX_CPUS 32
92#define MPIC_MAX_ISU 32
93
94/*
95 * Special vector numbers (internal use only)
96 */
97#define MPIC_VEC_SPURRIOUS 255
98#define MPIC_VEC_IPI_3 254
99#define MPIC_VEC_IPI_2 253
100#define MPIC_VEC_IPI_1 252
101#define MPIC_VEC_IPI_0 251
102
103/* unused */
104#define MPIC_VEC_TIMER_3 250
105#define MPIC_VEC_TIMER_2 249
106#define MPIC_VEC_TIMER_1 248
107#define MPIC_VEC_TIMER_0 247
108
109/* Type definition of the cascade handler */
110typedef int (*mpic_cascade_t)(struct pt_regs *regs, void *data);
111
112#ifdef CONFIG_MPIC_BROKEN_U3
113/* Fixup table entry */
114struct mpic_irq_fixup
115{
116 u8 __iomem *base;
117 unsigned int irq;
118};
119#endif /* CONFIG_MPIC_BROKEN_U3 */
120
121
122/* The instance data of a given MPIC */
123struct mpic
124{
125 /* The "linux" controller struct */
126 hw_irq_controller hc_irq;
127#ifdef CONFIG_SMP
128 hw_irq_controller hc_ipi;
129#endif
130 const char *name;
131 /* Flags */
132 unsigned int flags;
133 /* How many irq sources in a given ISU */
134 unsigned int isu_size;
135 unsigned int isu_shift;
136 unsigned int isu_mask;
137 /* Offset of irq vector numbers */
138 unsigned int irq_offset;
139 unsigned int irq_count;
140 /* Offset of ipi vector numbers */
141 unsigned int ipi_offset;
142 /* Number of sources */
143 unsigned int num_sources;
144 /* Number of CPUs */
145 unsigned int num_cpus;
146 /* cascade handler */
147 mpic_cascade_t cascade;
148 void *cascade_data;
149 unsigned int cascade_vec;
150 /* senses array */
151 unsigned char *senses;
152 unsigned int senses_count;
153
154#ifdef CONFIG_MPIC_BROKEN_U3
155 /* The fixup table */
156 struct mpic_irq_fixup *fixups;
157 spinlock_t fixup_lock;
158#endif
159
160 /* The various ioremap'ed bases */
161 volatile u32 __iomem *gregs;
162 volatile u32 __iomem *tmregs;
163 volatile u32 __iomem *cpuregs[MPIC_MAX_CPUS];
164 volatile u32 __iomem *isus[MPIC_MAX_ISU];
165
166 /* link */
167 struct mpic *next;
168};
169
170/* This is the primary controller, only that one has IPIs and
171 * has afinity control. A non-primary MPIC always uses CPU0
172 * registers only
173 */
174#define MPIC_PRIMARY 0x00000001
175/* Set this for a big-endian MPIC */
176#define MPIC_BIG_ENDIAN 0x00000002
177/* Broken U3 MPIC */
178#define MPIC_BROKEN_U3 0x00000004
179/* Broken IPI registers (autodetected) */
180#define MPIC_BROKEN_IPI 0x00000008
181/* MPIC wants a reset */
182#define MPIC_WANTS_RESET 0x00000010
183
184/* Allocate the controller structure and setup the linux irq descs
185 * for the range if interrupts passed in. No HW initialization is
186 * actually performed.
187 *
188 * @phys_addr: physial base address of the MPIC
189 * @flags: flags, see constants above
190 * @isu_size: number of interrupts in an ISU. Use 0 to use a
191 * standard ISU-less setup (aka powermac)
192 * @irq_offset: first irq number to assign to this mpic
193 * @irq_count: number of irqs to use with this mpic IRQ sources. Pass 0
194 * to match the number of sources
195 * @ipi_offset: first irq number to assign to this mpic IPI sources,
196 * used only on primary mpic
197 * @senses: array of sense values
198 * @senses_num: number of entries in the array
199 *
200 * Note about the sense array. If none is passed, all interrupts are
201 * setup to be level negative unless MPIC_BROKEN_U3 is set in which
202 * case they are edge positive (and the array is ignored anyway).
203 * The values in the array start at the first source of the MPIC,
204 * that is senses[0] correspond to linux irq "irq_offset".
205 */
206extern struct mpic *mpic_alloc(unsigned long phys_addr,
207 unsigned int flags,
208 unsigned int isu_size,
209 unsigned int irq_offset,
210 unsigned int irq_count,
211 unsigned int ipi_offset,
212 unsigned char *senses,
213 unsigned int senses_num,
214 const char *name);
215
216/* Assign ISUs, to call before mpic_init()
217 *
218 * @mpic: controller structure as returned by mpic_alloc()
219 * @isu_num: ISU number
220 * @phys_addr: physical address of the ISU
221 */
222extern void mpic_assign_isu(struct mpic *mpic, unsigned int isu_num,
223 unsigned long phys_addr);
224
225/* Initialize the controller. After this has been called, none of the above
226 * should be called again for this mpic
227 */
228extern void mpic_init(struct mpic *mpic);
229
230/* Setup a cascade. Currently, only one cascade is supported this
231 * way, though you can always do a normal request_irq() and add
232 * other cascades this way. You should call this _after_ having
233 * added all the ISUs
234 *
235 * @irq_no: "linux" irq number of the cascade (that is offset'ed vector)
236 * @handler: cascade handler function
237 */
238extern void mpic_setup_cascade(unsigned int irq_no, mpic_cascade_t hanlder,
239 void *data);
240
241/*
242 * All of the following functions must only be used after the
243 * ISUs have been assigned and the controller fully initialized
244 * with mpic_init()
245 */
246
247
248/* Change/Read the priority of an interrupt. Default is 8 for irqs and
249 * 10 for IPIs. You can call this on both IPIs and IRQ numbers, but the
250 * IPI number is then the offset'ed (linux irq number mapped to the IPI)
251 */
252extern void mpic_irq_set_priority(unsigned int irq, unsigned int pri);
253extern unsigned int mpic_irq_get_priority(unsigned int irq);
254
255/* Setup a non-boot CPU */
256extern void mpic_setup_this_cpu(void);
257
258/* Clean up for kexec (or cpu offline or ...) */
259extern void mpic_teardown_this_cpu(int secondary);
260
261/* Request IPIs on primary mpic */
262extern void mpic_request_ipis(void);
263
264/* Send an IPI (non offseted number 0..3) */
265extern void mpic_send_ipi(unsigned int ipi_no, unsigned int cpu_mask);
266
267/* Fetch interrupt from a given mpic */
268extern int mpic_get_one_irq(struct mpic *mpic, struct pt_regs *regs);
269/* This one gets to the primary mpic */
270extern int mpic_get_irq(struct pt_regs *regs);
271
272/* global mpic for pSeries */
273extern struct mpic *pSeries_mpic;
diff --git a/arch/ppc64/kernel/of_device.c b/arch/ppc64/kernel/of_device.c
deleted file mode 100644
index 9f200f0f2ad5..000000000000
--- a/arch/ppc64/kernel/of_device.c
+++ /dev/null
@@ -1,274 +0,0 @@
1#include <linux/config.h>
2#include <linux/string.h>
3#include <linux/kernel.h>
4#include <linux/init.h>
5#include <linux/module.h>
6#include <linux/mod_devicetable.h>
7#include <asm/errno.h>
8#include <asm/of_device.h>
9
10/**
11 * of_match_device - Tell if an of_device structure has a matching
12 * of_match structure
13 * @ids: array of of device match structures to search in
14 * @dev: the of device structure to match against
15 *
16 * Used by a driver to check whether an of_device present in the
17 * system is in its list of supported devices.
18 */
19const struct of_device_id *of_match_device(const struct of_device_id *matches,
20 const struct of_device *dev)
21{
22 if (!dev->node)
23 return NULL;
24 while (matches->name[0] || matches->type[0] || matches->compatible[0]) {
25 int match = 1;
26 if (matches->name[0])
27 match &= dev->node->name
28 && !strcmp(matches->name, dev->node->name);
29 if (matches->type[0])
30 match &= dev->node->type
31 && !strcmp(matches->type, dev->node->type);
32 if (matches->compatible[0])
33 match &= device_is_compatible(dev->node,
34 matches->compatible);
35 if (match)
36 return matches;
37 matches++;
38 }
39 return NULL;
40}
41
42static int of_platform_bus_match(struct device *dev, struct device_driver *drv)
43{
44 struct of_device * of_dev = to_of_device(dev);
45 struct of_platform_driver * of_drv = to_of_platform_driver(drv);
46 const struct of_device_id * matches = of_drv->match_table;
47
48 if (!matches)
49 return 0;
50
51 return of_match_device(matches, of_dev) != NULL;
52}
53
54struct of_device *of_dev_get(struct of_device *dev)
55{
56 struct device *tmp;
57
58 if (!dev)
59 return NULL;
60 tmp = get_device(&dev->dev);
61 if (tmp)
62 return to_of_device(tmp);
63 else
64 return NULL;
65}
66
67void of_dev_put(struct of_device *dev)
68{
69 if (dev)
70 put_device(&dev->dev);
71}
72
73
74static int of_device_probe(struct device *dev)
75{
76 int error = -ENODEV;
77 struct of_platform_driver *drv;
78 struct of_device *of_dev;
79 const struct of_device_id *match;
80
81 drv = to_of_platform_driver(dev->driver);
82 of_dev = to_of_device(dev);
83
84 if (!drv->probe)
85 return error;
86
87 of_dev_get(of_dev);
88
89 match = of_match_device(drv->match_table, of_dev);
90 if (match)
91 error = drv->probe(of_dev, match);
92 if (error)
93 of_dev_put(of_dev);
94
95 return error;
96}
97
98static int of_device_remove(struct device *dev)
99{
100 struct of_device * of_dev = to_of_device(dev);
101 struct of_platform_driver * drv = to_of_platform_driver(dev->driver);
102
103 if (dev->driver && drv->remove)
104 drv->remove(of_dev);
105 return 0;
106}
107
108static int of_device_suspend(struct device *dev, pm_message_t state)
109{
110 struct of_device * of_dev = to_of_device(dev);
111 struct of_platform_driver * drv = to_of_platform_driver(dev->driver);
112 int error = 0;
113
114 if (dev->driver && drv->suspend)
115 error = drv->suspend(of_dev, state);
116 return error;
117}
118
119static int of_device_resume(struct device * dev)
120{
121 struct of_device * of_dev = to_of_device(dev);
122 struct of_platform_driver * drv = to_of_platform_driver(dev->driver);
123 int error = 0;
124
125 if (dev->driver && drv->resume)
126 error = drv->resume(of_dev);
127 return error;
128}
129
130struct bus_type of_platform_bus_type = {
131 .name = "of_platform",
132 .match = of_platform_bus_match,
133 .suspend = of_device_suspend,
134 .resume = of_device_resume,
135};
136
137static int __init of_bus_driver_init(void)
138{
139 return bus_register(&of_platform_bus_type);
140}
141
142postcore_initcall(of_bus_driver_init);
143
144int of_register_driver(struct of_platform_driver *drv)
145{
146 int count = 0;
147
148 /* initialize common driver fields */
149 drv->driver.name = drv->name;
150 drv->driver.bus = &of_platform_bus_type;
151 drv->driver.probe = of_device_probe;
152 drv->driver.remove = of_device_remove;
153
154 /* register with core */
155 count = driver_register(&drv->driver);
156 return count ? count : 1;
157}
158
159void of_unregister_driver(struct of_platform_driver *drv)
160{
161 driver_unregister(&drv->driver);
162}
163
164
165static ssize_t dev_show_devspec(struct device *dev, struct device_attribute *attr, char *buf)
166{
167 struct of_device *ofdev;
168
169 ofdev = to_of_device(dev);
170 return sprintf(buf, "%s", ofdev->node->full_name);
171}
172
173static DEVICE_ATTR(devspec, S_IRUGO, dev_show_devspec, NULL);
174
175/**
176 * of_release_dev - free an of device structure when all users of it are finished.
177 * @dev: device that's been disconnected
178 *
179 * Will be called only by the device core when all users of this of device are
180 * done.
181 */
182void of_release_dev(struct device *dev)
183{
184 struct of_device *ofdev;
185
186 ofdev = to_of_device(dev);
187 kfree(ofdev);
188}
189
190int of_device_register(struct of_device *ofdev)
191{
192 int rc;
193 struct of_device **odprop;
194
195 BUG_ON(ofdev->node == NULL);
196
197 odprop = (struct of_device **)get_property(ofdev->node, "linux,device", NULL);
198 if (!odprop) {
199 struct property *new_prop;
200
201 new_prop = kmalloc(sizeof(struct property) + sizeof(struct of_device *),
202 GFP_KERNEL);
203 if (new_prop == NULL)
204 return -ENOMEM;
205 new_prop->name = "linux,device";
206 new_prop->length = sizeof(sizeof(struct of_device *));
207 new_prop->value = (unsigned char *)&new_prop[1];
208 odprop = (struct of_device **)new_prop->value;
209 *odprop = NULL;
210 prom_add_property(ofdev->node, new_prop);
211 }
212 *odprop = ofdev;
213
214 rc = device_register(&ofdev->dev);
215 if (rc)
216 return rc;
217
218 device_create_file(&ofdev->dev, &dev_attr_devspec);
219
220 return 0;
221}
222
223void of_device_unregister(struct of_device *ofdev)
224{
225 struct of_device **odprop;
226
227 device_remove_file(&ofdev->dev, &dev_attr_devspec);
228
229 odprop = (struct of_device **)get_property(ofdev->node, "linux,device", NULL);
230 if (odprop)
231 *odprop = NULL;
232
233 device_unregister(&ofdev->dev);
234}
235
236struct of_device* of_platform_device_create(struct device_node *np,
237 const char *bus_id,
238 struct device *parent)
239{
240 struct of_device *dev;
241
242 dev = kmalloc(sizeof(*dev), GFP_KERNEL);
243 if (!dev)
244 return NULL;
245 memset(dev, 0, sizeof(*dev));
246
247 dev->node = np;
248 dev->dma_mask = 0xffffffffUL;
249 dev->dev.dma_mask = &dev->dma_mask;
250 dev->dev.parent = parent;
251 dev->dev.bus = &of_platform_bus_type;
252 dev->dev.release = of_release_dev;
253
254 strlcpy(dev->dev.bus_id, bus_id, BUS_ID_SIZE);
255
256 if (of_device_register(dev) != 0) {
257 kfree(dev);
258 return NULL;
259 }
260
261 return dev;
262}
263
264
265EXPORT_SYMBOL(of_match_device);
266EXPORT_SYMBOL(of_platform_bus_type);
267EXPORT_SYMBOL(of_register_driver);
268EXPORT_SYMBOL(of_unregister_driver);
269EXPORT_SYMBOL(of_device_register);
270EXPORT_SYMBOL(of_device_unregister);
271EXPORT_SYMBOL(of_dev_get);
272EXPORT_SYMBOL(of_dev_put);
273EXPORT_SYMBOL(of_platform_device_create);
274EXPORT_SYMBOL(of_release_dev);
diff --git a/arch/ppc64/kernel/pSeries_hvCall.S b/arch/ppc64/kernel/pSeries_hvCall.S
deleted file mode 100644
index 176e8da76466..000000000000
--- a/arch/ppc64/kernel/pSeries_hvCall.S
+++ /dev/null
@@ -1,131 +0,0 @@
1/*
2 * arch/ppc64/kernel/pSeries_hvCall.S
3 *
4 * This file contains the generic code to perform a call to the
5 * pSeries LPAR hypervisor.
6 * NOTE: this file will go away when we move to inline this work.
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
12 */
13#include <asm/hvcall.h>
14#include <asm/processor.h>
15#include <asm/ppc_asm.h>
16
17#define STK_PARM(i) (48 + ((i)-3)*8)
18
19 .text
20
21/* long plpar_hcall(unsigned long opcode, R3
22 unsigned long arg1, R4
23 unsigned long arg2, R5
24 unsigned long arg3, R6
25 unsigned long arg4, R7
26 unsigned long *out1, R8
27 unsigned long *out2, R9
28 unsigned long *out3); R10
29 */
30_GLOBAL(plpar_hcall)
31 HMT_MEDIUM
32
33 mfcr r0
34
35 std r8,STK_PARM(r8)(r1) /* Save out ptrs */
36 std r9,STK_PARM(r9)(r1)
37 std r10,STK_PARM(r10)(r1)
38
39 stw r0,8(r1)
40
41 HVSC /* invoke the hypervisor */
42
43 lwz r0,8(r1)
44
45 ld r8,STK_PARM(r8)(r1) /* Fetch r4-r6 ret args */
46 ld r9,STK_PARM(r9)(r1)
47 ld r10,STK_PARM(r10)(r1)
48 std r4,0(r8)
49 std r5,0(r9)
50 std r6,0(r10)
51
52 mtcrf 0xff,r0
53 blr /* return r3 = status */
54
55
56/* Simple interface with no output values (other than status) */
57_GLOBAL(plpar_hcall_norets)
58 HMT_MEDIUM
59
60 mfcr r0
61 stw r0,8(r1)
62
63 HVSC /* invoke the hypervisor */
64
65 lwz r0,8(r1)
66 mtcrf 0xff,r0
67 blr /* return r3 = status */
68
69
70/* long plpar_hcall_8arg_2ret(unsigned long opcode, R3
71 unsigned long arg1, R4
72 unsigned long arg2, R5
73 unsigned long arg3, R6
74 unsigned long arg4, R7
75 unsigned long arg5, R8
76 unsigned long arg6, R9
77 unsigned long arg7, R10
78 unsigned long arg8, 112(R1)
79 unsigned long *out1); 120(R1)
80 */
81_GLOBAL(plpar_hcall_8arg_2ret)
82 HMT_MEDIUM
83
84 mfcr r0
85 ld r11,STK_PARM(r11)(r1) /* put arg8 in R11 */
86 stw r0,8(r1)
87
88 HVSC /* invoke the hypervisor */
89
90 lwz r0,8(r1)
91 ld r10,STK_PARM(r12)(r1) /* Fetch r4 ret arg */
92 std r4,0(r10)
93 mtcrf 0xff,r0
94 blr /* return r3 = status */
95
96
97/* long plpar_hcall_4out(unsigned long opcode, R3
98 unsigned long arg1, R4
99 unsigned long arg2, R5
100 unsigned long arg3, R6
101 unsigned long arg4, R7
102 unsigned long *out1, R8
103 unsigned long *out2, R9
104 unsigned long *out3, R10
105 unsigned long *out4); 112(R1)
106 */
107_GLOBAL(plpar_hcall_4out)
108 HMT_MEDIUM
109
110 mfcr r0
111 stw r0,8(r1)
112
113 std r8,STK_PARM(r8)(r1) /* Save out ptrs */
114 std r9,STK_PARM(r9)(r1)
115 std r10,STK_PARM(r10)(r1)
116
117 HVSC /* invoke the hypervisor */
118
119 lwz r0,8(r1)
120
121 ld r8,STK_PARM(r8)(r1) /* Fetch r4-r7 ret args */
122 ld r9,STK_PARM(r9)(r1)
123 ld r10,STK_PARM(r10)(r1)
124 ld r11,STK_PARM(r11)(r1)
125 std r4,0(r8)
126 std r5,0(r9)
127 std r6,0(r10)
128 std r7,0(r11)
129
130 mtcrf 0xff,r0
131 blr /* return r3 = status */
diff --git a/arch/ppc64/kernel/pSeries_iommu.c b/arch/ppc64/kernel/pSeries_iommu.c
deleted file mode 100644
index d17f0108a032..000000000000
--- a/arch/ppc64/kernel/pSeries_iommu.c
+++ /dev/null
@@ -1,590 +0,0 @@
1/*
2 * arch/ppc64/kernel/pSeries_iommu.c
3 *
4 * Copyright (C) 2001 Mike Corrigan & Dave Engebretsen, IBM Corporation
5 *
6 * Rewrite, cleanup:
7 *
8 * Copyright (C) 2004 Olof Johansson <olof@austin.ibm.com>, IBM Corporation
9 *
10 * Dynamic DMA mapping support, pSeries-specific parts, both SMP and LPAR.
11 *
12 *
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License as published by
15 * the Free Software Foundation; either version 2 of the License, or
16 * (at your option) any later version.
17 *
18 * This program is distributed in the hope that it will be useful,
19 * but WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 * GNU General Public License for more details.
22 *
23 * You should have received a copy of the GNU General Public License
24 * along with this program; if not, write to the Free Software
25 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
26 */
27
28#include <linux/config.h>
29#include <linux/init.h>
30#include <linux/types.h>
31#include <linux/slab.h>
32#include <linux/mm.h>
33#include <linux/spinlock.h>
34#include <linux/string.h>
35#include <linux/pci.h>
36#include <linux/dma-mapping.h>
37#include <asm/io.h>
38#include <asm/prom.h>
39#include <asm/rtas.h>
40#include <asm/ppcdebug.h>
41#include <asm/iommu.h>
42#include <asm/pci-bridge.h>
43#include <asm/machdep.h>
44#include <asm/abs_addr.h>
45#include <asm/plpar_wrappers.h>
46#include <asm/pSeries_reconfig.h>
47#include <asm/systemcfg.h>
48#include <asm/firmware.h>
49#include "pci.h"
50
51#define DBG(fmt...)
52
53extern int is_python(struct device_node *);
54
55static void tce_build_pSeries(struct iommu_table *tbl, long index,
56 long npages, unsigned long uaddr,
57 enum dma_data_direction direction)
58{
59 union tce_entry t;
60 union tce_entry *tp;
61
62 t.te_word = 0;
63 t.te_rdwr = 1; // Read allowed
64
65 if (direction != DMA_TO_DEVICE)
66 t.te_pciwr = 1;
67
68 tp = ((union tce_entry *)tbl->it_base) + index;
69
70 while (npages--) {
71 /* can't move this out since we might cross LMB boundary */
72 t.te_rpn = (virt_to_abs(uaddr)) >> PAGE_SHIFT;
73
74 tp->te_word = t.te_word;
75
76 uaddr += PAGE_SIZE;
77 tp++;
78 }
79}
80
81
82static void tce_free_pSeries(struct iommu_table *tbl, long index, long npages)
83{
84 union tce_entry t;
85 union tce_entry *tp;
86
87 t.te_word = 0;
88 tp = ((union tce_entry *)tbl->it_base) + index;
89
90 while (npages--) {
91 tp->te_word = t.te_word;
92
93 tp++;
94 }
95}
96
97
98static void tce_build_pSeriesLP(struct iommu_table *tbl, long tcenum,
99 long npages, unsigned long uaddr,
100 enum dma_data_direction direction)
101{
102 u64 rc;
103 union tce_entry tce;
104
105 tce.te_word = 0;
106 tce.te_rpn = (virt_to_abs(uaddr)) >> PAGE_SHIFT;
107 tce.te_rdwr = 1;
108 if (direction != DMA_TO_DEVICE)
109 tce.te_pciwr = 1;
110
111 while (npages--) {
112 rc = plpar_tce_put((u64)tbl->it_index,
113 (u64)tcenum << 12,
114 tce.te_word );
115
116 if (rc && printk_ratelimit()) {
117 printk("tce_build_pSeriesLP: plpar_tce_put failed. rc=%ld\n", rc);
118 printk("\tindex = 0x%lx\n", (u64)tbl->it_index);
119 printk("\ttcenum = 0x%lx\n", (u64)tcenum);
120 printk("\ttce val = 0x%lx\n", tce.te_word );
121 show_stack(current, (unsigned long *)__get_SP());
122 }
123
124 tcenum++;
125 tce.te_rpn++;
126 }
127}
128
129static DEFINE_PER_CPU(void *, tce_page) = NULL;
130
131static void tce_buildmulti_pSeriesLP(struct iommu_table *tbl, long tcenum,
132 long npages, unsigned long uaddr,
133 enum dma_data_direction direction)
134{
135 u64 rc;
136 union tce_entry tce, *tcep;
137 long l, limit;
138
139 if (npages == 1)
140 return tce_build_pSeriesLP(tbl, tcenum, npages, uaddr,
141 direction);
142
143 tcep = __get_cpu_var(tce_page);
144
145 /* This is safe to do since interrupts are off when we're called
146 * from iommu_alloc{,_sg}()
147 */
148 if (!tcep) {
149 tcep = (void *)__get_free_page(GFP_ATOMIC);
150 /* If allocation fails, fall back to the loop implementation */
151 if (!tcep)
152 return tce_build_pSeriesLP(tbl, tcenum, npages,
153 uaddr, direction);
154 __get_cpu_var(tce_page) = tcep;
155 }
156
157 tce.te_word = 0;
158 tce.te_rpn = (virt_to_abs(uaddr)) >> PAGE_SHIFT;
159 tce.te_rdwr = 1;
160 if (direction != DMA_TO_DEVICE)
161 tce.te_pciwr = 1;
162
163 /* We can map max one pageful of TCEs at a time */
164 do {
165 /*
166 * Set up the page with TCE data, looping through and setting
167 * the values.
168 */
169 limit = min_t(long, npages, PAGE_SIZE/sizeof(union tce_entry));
170
171 for (l = 0; l < limit; l++) {
172 tcep[l] = tce;
173 tce.te_rpn++;
174 }
175
176 rc = plpar_tce_put_indirect((u64)tbl->it_index,
177 (u64)tcenum << 12,
178 (u64)virt_to_abs(tcep),
179 limit);
180
181 npages -= limit;
182 tcenum += limit;
183 } while (npages > 0 && !rc);
184
185 if (rc && printk_ratelimit()) {
186 printk("tce_buildmulti_pSeriesLP: plpar_tce_put failed. rc=%ld\n", rc);
187 printk("\tindex = 0x%lx\n", (u64)tbl->it_index);
188 printk("\tnpages = 0x%lx\n", (u64)npages);
189 printk("\ttce[0] val = 0x%lx\n", tcep[0].te_word);
190 show_stack(current, (unsigned long *)__get_SP());
191 }
192}
193
194static void tce_free_pSeriesLP(struct iommu_table *tbl, long tcenum, long npages)
195{
196 u64 rc;
197 union tce_entry tce;
198
199 tce.te_word = 0;
200
201 while (npages--) {
202 rc = plpar_tce_put((u64)tbl->it_index,
203 (u64)tcenum << 12,
204 tce.te_word);
205
206 if (rc && printk_ratelimit()) {
207 printk("tce_free_pSeriesLP: plpar_tce_put failed. rc=%ld\n", rc);
208 printk("\tindex = 0x%lx\n", (u64)tbl->it_index);
209 printk("\ttcenum = 0x%lx\n", (u64)tcenum);
210 printk("\ttce val = 0x%lx\n", tce.te_word );
211 show_stack(current, (unsigned long *)__get_SP());
212 }
213
214 tcenum++;
215 }
216}
217
218
219static void tce_freemulti_pSeriesLP(struct iommu_table *tbl, long tcenum, long npages)
220{
221 u64 rc;
222 union tce_entry tce;
223
224 tce.te_word = 0;
225
226 rc = plpar_tce_stuff((u64)tbl->it_index,
227 (u64)tcenum << 12,
228 tce.te_word,
229 npages);
230
231 if (rc && printk_ratelimit()) {
232 printk("tce_freemulti_pSeriesLP: plpar_tce_stuff failed\n");
233 printk("\trc = %ld\n", rc);
234 printk("\tindex = 0x%lx\n", (u64)tbl->it_index);
235 printk("\tnpages = 0x%lx\n", (u64)npages);
236 printk("\ttce val = 0x%lx\n", tce.te_word );
237 show_stack(current, (unsigned long *)__get_SP());
238 }
239}
240
241static void iommu_table_setparms(struct pci_controller *phb,
242 struct device_node *dn,
243 struct iommu_table *tbl)
244{
245 struct device_node *node;
246 unsigned long *basep;
247 unsigned int *sizep;
248
249 node = (struct device_node *)phb->arch_data;
250
251 basep = (unsigned long *)get_property(node, "linux,tce-base", NULL);
252 sizep = (unsigned int *)get_property(node, "linux,tce-size", NULL);
253 if (basep == NULL || sizep == NULL) {
254 printk(KERN_ERR "PCI_DMA: iommu_table_setparms: %s has "
255 "missing tce entries !\n", dn->full_name);
256 return;
257 }
258
259 tbl->it_base = (unsigned long)__va(*basep);
260 memset((void *)tbl->it_base, 0, *sizep);
261
262 tbl->it_busno = phb->bus->number;
263
264 /* Units of tce entries */
265 tbl->it_offset = phb->dma_window_base_cur >> PAGE_SHIFT;
266
267 /* Test if we are going over 2GB of DMA space */
268 if (phb->dma_window_base_cur + phb->dma_window_size > 0x80000000ul) {
269 udbg_printf("PCI_DMA: Unexpected number of IOAs under this PHB.\n");
270 panic("PCI_DMA: Unexpected number of IOAs under this PHB.\n");
271 }
272
273 phb->dma_window_base_cur += phb->dma_window_size;
274
275 /* Set the tce table size - measured in entries */
276 tbl->it_size = phb->dma_window_size >> PAGE_SHIFT;
277
278 tbl->it_index = 0;
279 tbl->it_blocksize = 16;
280 tbl->it_type = TCE_PCI;
281}
282
283/*
284 * iommu_table_setparms_lpar
285 *
286 * Function: On pSeries LPAR systems, return TCE table info, given a pci bus.
287 *
288 * ToDo: properly interpret the ibm,dma-window property. The definition is:
289 * logical-bus-number (1 word)
290 * phys-address (#address-cells words)
291 * size (#cell-size words)
292 *
293 * Currently we hard code these sizes (more or less).
294 */
295static void iommu_table_setparms_lpar(struct pci_controller *phb,
296 struct device_node *dn,
297 struct iommu_table *tbl,
298 unsigned int *dma_window)
299{
300 tbl->it_busno = PCI_DN(dn)->bussubno;
301
302 /* TODO: Parse field size properties properly. */
303 tbl->it_size = (((unsigned long)dma_window[4] << 32) |
304 (unsigned long)dma_window[5]) >> PAGE_SHIFT;
305 tbl->it_offset = (((unsigned long)dma_window[2] << 32) |
306 (unsigned long)dma_window[3]) >> PAGE_SHIFT;
307 tbl->it_base = 0;
308 tbl->it_index = dma_window[0];
309 tbl->it_blocksize = 16;
310 tbl->it_type = TCE_PCI;
311}
312
313static void iommu_bus_setup_pSeries(struct pci_bus *bus)
314{
315 struct device_node *dn;
316 struct iommu_table *tbl;
317 struct device_node *isa_dn, *isa_dn_orig;
318 struct device_node *tmp;
319 struct pci_dn *pci;
320 int children;
321
322 DBG("iommu_bus_setup_pSeries, bus %p, bus->self %p\n", bus, bus->self);
323
324 dn = pci_bus_to_OF_node(bus);
325 pci = PCI_DN(dn);
326
327 if (bus->self) {
328 /* This is not a root bus, any setup will be done for the
329 * device-side of the bridge in iommu_dev_setup_pSeries().
330 */
331 return;
332 }
333
334 /* Check if the ISA bus on the system is under
335 * this PHB.
336 */
337 isa_dn = isa_dn_orig = of_find_node_by_type(NULL, "isa");
338
339 while (isa_dn && isa_dn != dn)
340 isa_dn = isa_dn->parent;
341
342 if (isa_dn_orig)
343 of_node_put(isa_dn_orig);
344
345 /* Count number of direct PCI children of the PHB.
346 * All PCI device nodes have class-code property, so it's
347 * an easy way to find them.
348 */
349 for (children = 0, tmp = dn->child; tmp; tmp = tmp->sibling)
350 if (get_property(tmp, "class-code", NULL))
351 children++;
352
353 DBG("Children: %d\n", children);
354
355 /* Calculate amount of DMA window per slot. Each window must be
356 * a power of two (due to pci_alloc_consistent requirements).
357 *
358 * Keep 256MB aside for PHBs with ISA.
359 */
360
361 if (!isa_dn) {
362 /* No ISA/IDE - just set window size and return */
363 pci->phb->dma_window_size = 0x80000000ul; /* To be divided */
364
365 while (pci->phb->dma_window_size * children > 0x80000000ul)
366 pci->phb->dma_window_size >>= 1;
367 DBG("No ISA/IDE, window size is 0x%lx\n",
368 pci->phb->dma_window_size);
369 pci->phb->dma_window_base_cur = 0;
370
371 return;
372 }
373
374 /* If we have ISA, then we probably have an IDE
375 * controller too. Allocate a 128MB table but
376 * skip the first 128MB to avoid stepping on ISA
377 * space.
378 */
379 pci->phb->dma_window_size = 0x8000000ul;
380 pci->phb->dma_window_base_cur = 0x8000000ul;
381
382 tbl = kmalloc(sizeof(struct iommu_table), GFP_KERNEL);
383
384 iommu_table_setparms(pci->phb, dn, tbl);
385 pci->iommu_table = iommu_init_table(tbl);
386
387 /* Divide the rest (1.75GB) among the children */
388 pci->phb->dma_window_size = 0x80000000ul;
389 while (pci->phb->dma_window_size * children > 0x70000000ul)
390 pci->phb->dma_window_size >>= 1;
391
392 DBG("ISA/IDE, window size is 0x%lx\n", pci->phb->dma_window_size);
393
394}
395
396
397static void iommu_bus_setup_pSeriesLP(struct pci_bus *bus)
398{
399 struct iommu_table *tbl;
400 struct device_node *dn, *pdn;
401 struct pci_dn *ppci;
402 unsigned int *dma_window = NULL;
403
404 DBG("iommu_bus_setup_pSeriesLP, bus %p, bus->self %p\n", bus, bus->self);
405
406 dn = pci_bus_to_OF_node(bus);
407
408 /* Find nearest ibm,dma-window, walking up the device tree */
409 for (pdn = dn; pdn != NULL; pdn = pdn->parent) {
410 dma_window = (unsigned int *)get_property(pdn, "ibm,dma-window", NULL);
411 if (dma_window != NULL)
412 break;
413 }
414
415 if (dma_window == NULL) {
416 DBG("iommu_bus_setup_pSeriesLP: bus %s seems to have no ibm,dma-window property\n", dn->full_name);
417 return;
418 }
419
420 ppci = pdn->data;
421 if (!ppci->iommu_table) {
422 /* Bussubno hasn't been copied yet.
423 * Do it now because iommu_table_setparms_lpar needs it.
424 */
425
426 ppci->bussubno = bus->number;
427
428 tbl = (struct iommu_table *)kmalloc(sizeof(struct iommu_table),
429 GFP_KERNEL);
430
431 iommu_table_setparms_lpar(ppci->phb, pdn, tbl, dma_window);
432
433 ppci->iommu_table = iommu_init_table(tbl);
434 }
435
436 if (pdn != dn)
437 PCI_DN(dn)->iommu_table = ppci->iommu_table;
438}
439
440
441static void iommu_dev_setup_pSeries(struct pci_dev *dev)
442{
443 struct device_node *dn, *mydn;
444 struct iommu_table *tbl;
445
446 DBG("iommu_dev_setup_pSeries, dev %p (%s)\n", dev, pci_name(dev));
447
448 mydn = dn = pci_device_to_OF_node(dev);
449
450 /* If we're the direct child of a root bus, then we need to allocate
451 * an iommu table ourselves. The bus setup code should have setup
452 * the window sizes already.
453 */
454 if (!dev->bus->self) {
455 DBG(" --> first child, no bridge. Allocating iommu table.\n");
456 tbl = kmalloc(sizeof(struct iommu_table), GFP_KERNEL);
457 iommu_table_setparms(PCI_DN(dn)->phb, dn, tbl);
458 PCI_DN(mydn)->iommu_table = iommu_init_table(tbl);
459
460 return;
461 }
462
463 /* If this device is further down the bus tree, search upwards until
464 * an already allocated iommu table is found and use that.
465 */
466
467 while (dn && dn->data && PCI_DN(dn)->iommu_table == NULL)
468 dn = dn->parent;
469
470 if (dn && dn->data) {
471 PCI_DN(mydn)->iommu_table = PCI_DN(dn)->iommu_table;
472 } else {
473 DBG("iommu_dev_setup_pSeries, dev %p (%s) has no iommu table\n", dev, pci_name(dev));
474 }
475}
476
477static int iommu_reconfig_notifier(struct notifier_block *nb, unsigned long action, void *node)
478{
479 int err = NOTIFY_OK;
480 struct device_node *np = node;
481 struct pci_dn *pci = np->data;
482
483 switch (action) {
484 case PSERIES_RECONFIG_REMOVE:
485 if (pci->iommu_table &&
486 get_property(np, "ibm,dma-window", NULL))
487 iommu_free_table(np);
488 break;
489 default:
490 err = NOTIFY_DONE;
491 break;
492 }
493 return err;
494}
495
496static struct notifier_block iommu_reconfig_nb = {
497 .notifier_call = iommu_reconfig_notifier,
498};
499
500static void iommu_dev_setup_pSeriesLP(struct pci_dev *dev)
501{
502 struct device_node *pdn, *dn;
503 struct iommu_table *tbl;
504 int *dma_window = NULL;
505 struct pci_dn *pci;
506
507 DBG("iommu_dev_setup_pSeriesLP, dev %p (%s)\n", dev, pci_name(dev));
508
509 /* dev setup for LPAR is a little tricky, since the device tree might
510 * contain the dma-window properties per-device and not neccesarily
511 * for the bus. So we need to search upwards in the tree until we
512 * either hit a dma-window property, OR find a parent with a table
513 * already allocated.
514 */
515 dn = pci_device_to_OF_node(dev);
516
517 for (pdn = dn; pdn && pdn->data && !PCI_DN(pdn)->iommu_table;
518 pdn = pdn->parent) {
519 dma_window = (unsigned int *)
520 get_property(pdn, "ibm,dma-window", NULL);
521 if (dma_window)
522 break;
523 }
524
525 /* Check for parent == NULL so we don't try to setup the empty EADS
526 * slots on POWER4 machines.
527 */
528 if (dma_window == NULL || pdn->parent == NULL) {
529 DBG("No dma window for device, linking to parent\n");
530 PCI_DN(dn)->iommu_table = PCI_DN(pdn)->iommu_table;
531 return;
532 } else {
533 DBG("Found DMA window, allocating table\n");
534 }
535
536 pci = pdn->data;
537 if (!pci->iommu_table) {
538 /* iommu_table_setparms_lpar needs bussubno. */
539 pci->bussubno = pci->phb->bus->number;
540
541 tbl = (struct iommu_table *)kmalloc(sizeof(struct iommu_table),
542 GFP_KERNEL);
543
544 iommu_table_setparms_lpar(pci->phb, pdn, tbl, dma_window);
545
546 pci->iommu_table = iommu_init_table(tbl);
547 }
548
549 if (pdn != dn)
550 PCI_DN(dn)->iommu_table = pci->iommu_table;
551}
552
553static void iommu_bus_setup_null(struct pci_bus *b) { }
554static void iommu_dev_setup_null(struct pci_dev *d) { }
555
556/* These are called very early. */
557void iommu_init_early_pSeries(void)
558{
559 if (of_chosen && get_property(of_chosen, "linux,iommu-off", NULL)) {
560 /* Direct I/O, IOMMU off */
561 ppc_md.iommu_dev_setup = iommu_dev_setup_null;
562 ppc_md.iommu_bus_setup = iommu_bus_setup_null;
563 pci_direct_iommu_init();
564
565 return;
566 }
567
568 if (systemcfg->platform & PLATFORM_LPAR) {
569 if (firmware_has_feature(FW_FEATURE_MULTITCE)) {
570 ppc_md.tce_build = tce_buildmulti_pSeriesLP;
571 ppc_md.tce_free = tce_freemulti_pSeriesLP;
572 } else {
573 ppc_md.tce_build = tce_build_pSeriesLP;
574 ppc_md.tce_free = tce_free_pSeriesLP;
575 }
576 ppc_md.iommu_bus_setup = iommu_bus_setup_pSeriesLP;
577 ppc_md.iommu_dev_setup = iommu_dev_setup_pSeriesLP;
578 } else {
579 ppc_md.tce_build = tce_build_pSeries;
580 ppc_md.tce_free = tce_free_pSeries;
581 ppc_md.iommu_bus_setup = iommu_bus_setup_pSeries;
582 ppc_md.iommu_dev_setup = iommu_dev_setup_pSeries;
583 }
584
585
586 pSeries_reconfig_notifier_register(&iommu_reconfig_nb);
587
588 pci_iommu_init();
589}
590
diff --git a/arch/ppc64/kernel/pSeries_lpar.c b/arch/ppc64/kernel/pSeries_lpar.c
deleted file mode 100644
index a6de83f2078f..000000000000
--- a/arch/ppc64/kernel/pSeries_lpar.c
+++ /dev/null
@@ -1,518 +0,0 @@
1/*
2 * pSeries_lpar.c
3 * Copyright (C) 2001 Todd Inglett, IBM Corporation
4 *
5 * pSeries LPAR support.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 */
21
22#define DEBUG
23
24#include <linux/config.h>
25#include <linux/kernel.h>
26#include <linux/dma-mapping.h>
27#include <asm/processor.h>
28#include <asm/mmu.h>
29#include <asm/page.h>
30#include <asm/pgtable.h>
31#include <asm/machdep.h>
32#include <asm/abs_addr.h>
33#include <asm/mmu_context.h>
34#include <asm/ppcdebug.h>
35#include <asm/iommu.h>
36#include <asm/tlbflush.h>
37#include <asm/tlb.h>
38#include <asm/prom.h>
39#include <asm/abs_addr.h>
40#include <asm/cputable.h>
41#include <asm/plpar_wrappers.h>
42
43#ifdef DEBUG
44#define DBG(fmt...) udbg_printf(fmt)
45#else
46#define DBG(fmt...)
47#endif
48
49/* in pSeries_hvCall.S */
50EXPORT_SYMBOL(plpar_hcall);
51EXPORT_SYMBOL(plpar_hcall_4out);
52EXPORT_SYMBOL(plpar_hcall_norets);
53EXPORT_SYMBOL(plpar_hcall_8arg_2ret);
54
55extern void pSeries_find_serial_port(void);
56
57
58int vtermno; /* virtual terminal# for udbg */
59
60#define __ALIGNED__ __attribute__((__aligned__(sizeof(long))))
61static void udbg_hvsi_putc(unsigned char c)
62{
63 /* packet's seqno isn't used anyways */
64 uint8_t packet[] __ALIGNED__ = { 0xff, 5, 0, 0, c };
65 int rc;
66
67 if (c == '\n')
68 udbg_hvsi_putc('\r');
69
70 do {
71 rc = plpar_put_term_char(vtermno, sizeof(packet), packet);
72 } while (rc == H_Busy);
73}
74
75static long hvsi_udbg_buf_len;
76static uint8_t hvsi_udbg_buf[256];
77
78static int udbg_hvsi_getc_poll(void)
79{
80 unsigned char ch;
81 int rc, i;
82
83 if (hvsi_udbg_buf_len == 0) {
84 rc = plpar_get_term_char(vtermno, &hvsi_udbg_buf_len, hvsi_udbg_buf);
85 if (rc != H_Success || hvsi_udbg_buf[0] != 0xff) {
86 /* bad read or non-data packet */
87 hvsi_udbg_buf_len = 0;
88 } else {
89 /* remove the packet header */
90 for (i = 4; i < hvsi_udbg_buf_len; i++)
91 hvsi_udbg_buf[i-4] = hvsi_udbg_buf[i];
92 hvsi_udbg_buf_len -= 4;
93 }
94 }
95
96 if (hvsi_udbg_buf_len <= 0 || hvsi_udbg_buf_len > 256) {
97 /* no data ready */
98 hvsi_udbg_buf_len = 0;
99 return -1;
100 }
101
102 ch = hvsi_udbg_buf[0];
103 /* shift remaining data down */
104 for (i = 1; i < hvsi_udbg_buf_len; i++) {
105 hvsi_udbg_buf[i-1] = hvsi_udbg_buf[i];
106 }
107 hvsi_udbg_buf_len--;
108
109 return ch;
110}
111
112static unsigned char udbg_hvsi_getc(void)
113{
114 int ch;
115 for (;;) {
116 ch = udbg_hvsi_getc_poll();
117 if (ch == -1) {
118 /* This shouldn't be needed...but... */
119 volatile unsigned long delay;
120 for (delay=0; delay < 2000000; delay++)
121 ;
122 } else {
123 return ch;
124 }
125 }
126}
127
128static void udbg_putcLP(unsigned char c)
129{
130 char buf[16];
131 unsigned long rc;
132
133 if (c == '\n')
134 udbg_putcLP('\r');
135
136 buf[0] = c;
137 do {
138 rc = plpar_put_term_char(vtermno, 1, buf);
139 } while(rc == H_Busy);
140}
141
142/* Buffered chars getc */
143static long inbuflen;
144static long inbuf[2]; /* must be 2 longs */
145
146static int udbg_getc_pollLP(void)
147{
148 /* The interface is tricky because it may return up to 16 chars.
149 * We save them statically for future calls to udbg_getc().
150 */
151 char ch, *buf = (char *)inbuf;
152 int i;
153 long rc;
154 if (inbuflen == 0) {
155 /* get some more chars. */
156 inbuflen = 0;
157 rc = plpar_get_term_char(vtermno, &inbuflen, buf);
158 if (rc != H_Success)
159 inbuflen = 0; /* otherwise inbuflen is garbage */
160 }
161 if (inbuflen <= 0 || inbuflen > 16) {
162 /* Catch error case as well as other oddities (corruption) */
163 inbuflen = 0;
164 return -1;
165 }
166 ch = buf[0];
167 for (i = 1; i < inbuflen; i++) /* shuffle them down. */
168 buf[i-1] = buf[i];
169 inbuflen--;
170 return ch;
171}
172
173static unsigned char udbg_getcLP(void)
174{
175 int ch;
176 for (;;) {
177 ch = udbg_getc_pollLP();
178 if (ch == -1) {
179 /* This shouldn't be needed...but... */
180 volatile unsigned long delay;
181 for (delay=0; delay < 2000000; delay++)
182 ;
183 } else {
184 return ch;
185 }
186 }
187}
188
189/* call this from early_init() for a working debug console on
190 * vterm capable LPAR machines
191 */
192void udbg_init_debug_lpar(void)
193{
194 vtermno = 0;
195 udbg_putc = udbg_putcLP;
196 udbg_getc = udbg_getcLP;
197 udbg_getc_poll = udbg_getc_pollLP;
198}
199
200/* returns 0 if couldn't find or use /chosen/stdout as console */
201int find_udbg_vterm(void)
202{
203 struct device_node *stdout_node;
204 u32 *termno;
205 char *name;
206 int found = 0;
207
208 /* find the boot console from /chosen/stdout */
209 if (!of_chosen)
210 return 0;
211 name = (char *)get_property(of_chosen, "linux,stdout-path", NULL);
212 if (name == NULL)
213 return 0;
214 stdout_node = of_find_node_by_path(name);
215 if (!stdout_node)
216 return 0;
217
218 /* now we have the stdout node; figure out what type of device it is. */
219 name = (char *)get_property(stdout_node, "name", NULL);
220 if (!name) {
221 printk(KERN_WARNING "stdout node missing 'name' property!\n");
222 goto out;
223 }
224
225 if (strncmp(name, "vty", 3) == 0) {
226 if (device_is_compatible(stdout_node, "hvterm1")) {
227 termno = (u32 *)get_property(stdout_node, "reg", NULL);
228 if (termno) {
229 vtermno = termno[0];
230 udbg_putc = udbg_putcLP;
231 udbg_getc = udbg_getcLP;
232 udbg_getc_poll = udbg_getc_pollLP;
233 found = 1;
234 }
235 } else if (device_is_compatible(stdout_node, "hvterm-protocol")) {
236 termno = (u32 *)get_property(stdout_node, "reg", NULL);
237 if (termno) {
238 vtermno = termno[0];
239 udbg_putc = udbg_hvsi_putc;
240 udbg_getc = udbg_hvsi_getc;
241 udbg_getc_poll = udbg_hvsi_getc_poll;
242 found = 1;
243 }
244 }
245 } else if (strncmp(name, "serial", 6)) {
246 /* XXX fix ISA serial console */
247 printk(KERN_WARNING "serial stdout on LPAR ('%s')! "
248 "can't print udbg messages\n",
249 stdout_node->full_name);
250 } else {
251 printk(KERN_WARNING "don't know how to print to stdout '%s'\n",
252 stdout_node->full_name);
253 }
254
255out:
256 of_node_put(stdout_node);
257 return found;
258}
259
260void vpa_init(int cpu)
261{
262 int hwcpu = get_hard_smp_processor_id(cpu);
263 unsigned long vpa = (unsigned long)&(paca[cpu].lppaca);
264 long ret;
265 unsigned long flags;
266
267 /* Register the Virtual Processor Area (VPA) */
268 flags = 1UL << (63 - 18);
269
270 if (cpu_has_feature(CPU_FTR_ALTIVEC))
271 paca[cpu].lppaca.vmxregs_in_use = 1;
272
273 ret = register_vpa(flags, hwcpu, __pa(vpa));
274
275 if (ret)
276 printk(KERN_ERR "WARNING: vpa_init: VPA registration for "
277 "cpu %d (hw %d) of area %lx returns %ld\n",
278 cpu, hwcpu, __pa(vpa), ret);
279}
280
281long pSeries_lpar_hpte_insert(unsigned long hpte_group,
282 unsigned long va, unsigned long prpn,
283 unsigned long vflags, unsigned long rflags)
284{
285 unsigned long lpar_rc;
286 unsigned long flags;
287 unsigned long slot;
288 unsigned long hpte_v, hpte_r;
289 unsigned long dummy0, dummy1;
290
291 hpte_v = ((va >> 23) << HPTE_V_AVPN_SHIFT) | vflags | HPTE_V_VALID;
292 if (vflags & HPTE_V_LARGE)
293 hpte_v &= ~(1UL << HPTE_V_AVPN_SHIFT);
294
295 hpte_r = (prpn << HPTE_R_RPN_SHIFT) | rflags;
296
297 /* Now fill in the actual HPTE */
298 /* Set CEC cookie to 0 */
299 /* Zero page = 0 */
300 /* I-cache Invalidate = 0 */
301 /* I-cache synchronize = 0 */
302 /* Exact = 0 */
303 flags = 0;
304
305 /* XXX why is this here? - Anton */
306 if (rflags & (_PAGE_GUARDED|_PAGE_NO_CACHE))
307 hpte_r &= ~_PAGE_COHERENT;
308
309 lpar_rc = plpar_hcall(H_ENTER, flags, hpte_group, hpte_v,
310 hpte_r, &slot, &dummy0, &dummy1);
311
312 if (unlikely(lpar_rc == H_PTEG_Full))
313 return -1;
314
315 /*
316 * Since we try and ioremap PHBs we don't own, the pte insert
317 * will fail. However we must catch the failure in hash_page
318 * or we will loop forever, so return -2 in this case.
319 */
320 if (unlikely(lpar_rc != H_Success))
321 return -2;
322
323 /* Because of iSeries, we have to pass down the secondary
324 * bucket bit here as well
325 */
326 return (slot & 7) | (!!(vflags & HPTE_V_SECONDARY) << 3);
327}
328
329static DEFINE_SPINLOCK(pSeries_lpar_tlbie_lock);
330
331static long pSeries_lpar_hpte_remove(unsigned long hpte_group)
332{
333 unsigned long slot_offset;
334 unsigned long lpar_rc;
335 int i;
336 unsigned long dummy1, dummy2;
337
338 /* pick a random slot to start at */
339 slot_offset = mftb() & 0x7;
340
341 for (i = 0; i < HPTES_PER_GROUP; i++) {
342
343 /* don't remove a bolted entry */
344 lpar_rc = plpar_pte_remove(H_ANDCOND, hpte_group + slot_offset,
345 (0x1UL << 4), &dummy1, &dummy2);
346
347 if (lpar_rc == H_Success)
348 return i;
349
350 BUG_ON(lpar_rc != H_Not_Found);
351
352 slot_offset++;
353 slot_offset &= 0x7;
354 }
355
356 return -1;
357}
358
359static void pSeries_lpar_hptab_clear(void)
360{
361 unsigned long size_bytes = 1UL << ppc64_pft_size;
362 unsigned long hpte_count = size_bytes >> 4;
363 unsigned long dummy1, dummy2;
364 int i;
365
366 /* TODO: Use bulk call */
367 for (i = 0; i < hpte_count; i++)
368 plpar_pte_remove(0, i, 0, &dummy1, &dummy2);
369}
370
371/*
372 * NOTE: for updatepp ops we are fortunate that the linux "newpp" bits and
373 * the low 3 bits of flags happen to line up. So no transform is needed.
374 * We can probably optimize here and assume the high bits of newpp are
375 * already zero. For now I am paranoid.
376 */
377static long pSeries_lpar_hpte_updatepp(unsigned long slot, unsigned long newpp,
378 unsigned long va, int large, int local)
379{
380 unsigned long lpar_rc;
381 unsigned long flags = (newpp & 7) | H_AVPN;
382 unsigned long avpn = va >> 23;
383
384 if (large)
385 avpn &= ~0x1UL;
386
387 lpar_rc = plpar_pte_protect(flags, slot, (avpn << 7));
388
389 if (lpar_rc == H_Not_Found)
390 return -1;
391
392 BUG_ON(lpar_rc != H_Success);
393
394 return 0;
395}
396
397static unsigned long pSeries_lpar_hpte_getword0(unsigned long slot)
398{
399 unsigned long dword0;
400 unsigned long lpar_rc;
401 unsigned long dummy_word1;
402 unsigned long flags;
403
404 /* Read 1 pte at a time */
405 /* Do not need RPN to logical page translation */
406 /* No cross CEC PFT access */
407 flags = 0;
408
409 lpar_rc = plpar_pte_read(flags, slot, &dword0, &dummy_word1);
410
411 BUG_ON(lpar_rc != H_Success);
412
413 return dword0;
414}
415
416static long pSeries_lpar_hpte_find(unsigned long vpn)
417{
418 unsigned long hash;
419 unsigned long i, j;
420 long slot;
421 unsigned long hpte_v;
422
423 hash = hpt_hash(vpn, 0);
424
425 for (j = 0; j < 2; j++) {
426 slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
427 for (i = 0; i < HPTES_PER_GROUP; i++) {
428 hpte_v = pSeries_lpar_hpte_getword0(slot);
429
430 if ((HPTE_V_AVPN_VAL(hpte_v) == (vpn >> 11))
431 && (hpte_v & HPTE_V_VALID)
432 && (!!(hpte_v & HPTE_V_SECONDARY) == j)) {
433 /* HPTE matches */
434 if (j)
435 slot = -slot;
436 return slot;
437 }
438 ++slot;
439 }
440 hash = ~hash;
441 }
442
443 return -1;
444}
445
446static void pSeries_lpar_hpte_updateboltedpp(unsigned long newpp,
447 unsigned long ea)
448{
449 unsigned long lpar_rc;
450 unsigned long vsid, va, vpn, flags;
451 long slot;
452
453 vsid = get_kernel_vsid(ea);
454 va = (vsid << 28) | (ea & 0x0fffffff);
455 vpn = va >> PAGE_SHIFT;
456
457 slot = pSeries_lpar_hpte_find(vpn);
458 BUG_ON(slot == -1);
459
460 flags = newpp & 7;
461 lpar_rc = plpar_pte_protect(flags, slot, 0);
462
463 BUG_ON(lpar_rc != H_Success);
464}
465
466static void pSeries_lpar_hpte_invalidate(unsigned long slot, unsigned long va,
467 int large, int local)
468{
469 unsigned long avpn = va >> 23;
470 unsigned long lpar_rc;
471 unsigned long dummy1, dummy2;
472
473 if (large)
474 avpn &= ~0x1UL;
475
476 lpar_rc = plpar_pte_remove(H_AVPN, slot, (avpn << 7), &dummy1,
477 &dummy2);
478
479 if (lpar_rc == H_Not_Found)
480 return;
481
482 BUG_ON(lpar_rc != H_Success);
483}
484
485/*
486 * Take a spinlock around flushes to avoid bouncing the hypervisor tlbie
487 * lock.
488 */
489void pSeries_lpar_flush_hash_range(unsigned long context, unsigned long number,
490 int local)
491{
492 int i;
493 unsigned long flags = 0;
494 struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch);
495 int lock_tlbie = !cpu_has_feature(CPU_FTR_LOCKLESS_TLBIE);
496
497 if (lock_tlbie)
498 spin_lock_irqsave(&pSeries_lpar_tlbie_lock, flags);
499
500 for (i = 0; i < number; i++)
501 flush_hash_page(context, batch->addr[i], batch->pte[i], local);
502
503 if (lock_tlbie)
504 spin_unlock_irqrestore(&pSeries_lpar_tlbie_lock, flags);
505}
506
507void hpte_init_lpar(void)
508{
509 ppc_md.hpte_invalidate = pSeries_lpar_hpte_invalidate;
510 ppc_md.hpte_updatepp = pSeries_lpar_hpte_updatepp;
511 ppc_md.hpte_updateboltedpp = pSeries_lpar_hpte_updateboltedpp;
512 ppc_md.hpte_insert = pSeries_lpar_hpte_insert;
513 ppc_md.hpte_remove = pSeries_lpar_hpte_remove;
514 ppc_md.flush_hash_range = pSeries_lpar_flush_hash_range;
515 ppc_md.hpte_clear_all = pSeries_lpar_hptab_clear;
516
517 htab_finish_init();
518}
diff --git a/arch/ppc64/kernel/pSeries_nvram.c b/arch/ppc64/kernel/pSeries_nvram.c
deleted file mode 100644
index 18abfb1f4e24..000000000000
--- a/arch/ppc64/kernel/pSeries_nvram.c
+++ /dev/null
@@ -1,148 +0,0 @@
1/*
2 * c 2001 PPC 64 Team, IBM Corp
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * /dev/nvram driver for PPC64
10 *
11 * This perhaps should live in drivers/char
12 */
13
14
15#include <linux/types.h>
16#include <linux/errno.h>
17#include <linux/init.h>
18#include <linux/slab.h>
19#include <linux/spinlock.h>
20#include <asm/uaccess.h>
21#include <asm/nvram.h>
22#include <asm/rtas.h>
23#include <asm/prom.h>
24#include <asm/machdep.h>
25
26static unsigned int nvram_size;
27static int nvram_fetch, nvram_store;
28static char nvram_buf[NVRW_CNT]; /* assume this is in the first 4GB */
29static DEFINE_SPINLOCK(nvram_lock);
30
31
32static ssize_t pSeries_nvram_read(char *buf, size_t count, loff_t *index)
33{
34 unsigned int i;
35 unsigned long len;
36 int done;
37 unsigned long flags;
38 char *p = buf;
39
40
41 if (nvram_size == 0 || nvram_fetch == RTAS_UNKNOWN_SERVICE)
42 return -ENODEV;
43
44 if (*index >= nvram_size)
45 return 0;
46
47 i = *index;
48 if (i + count > nvram_size)
49 count = nvram_size - i;
50
51 spin_lock_irqsave(&nvram_lock, flags);
52
53 for (; count != 0; count -= len) {
54 len = count;
55 if (len > NVRW_CNT)
56 len = NVRW_CNT;
57
58 if ((rtas_call(nvram_fetch, 3, 2, &done, i, __pa(nvram_buf),
59 len) != 0) || len != done) {
60 spin_unlock_irqrestore(&nvram_lock, flags);
61 return -EIO;
62 }
63
64 memcpy(p, nvram_buf, len);
65
66 p += len;
67 i += len;
68 }
69
70 spin_unlock_irqrestore(&nvram_lock, flags);
71
72 *index = i;
73 return p - buf;
74}
75
76static ssize_t pSeries_nvram_write(char *buf, size_t count, loff_t *index)
77{
78 unsigned int i;
79 unsigned long len;
80 int done;
81 unsigned long flags;
82 const char *p = buf;
83
84 if (nvram_size == 0 || nvram_store == RTAS_UNKNOWN_SERVICE)
85 return -ENODEV;
86
87 if (*index >= nvram_size)
88 return 0;
89
90 i = *index;
91 if (i + count > nvram_size)
92 count = nvram_size - i;
93
94 spin_lock_irqsave(&nvram_lock, flags);
95
96 for (; count != 0; count -= len) {
97 len = count;
98 if (len > NVRW_CNT)
99 len = NVRW_CNT;
100
101 memcpy(nvram_buf, p, len);
102
103 if ((rtas_call(nvram_store, 3, 2, &done, i, __pa(nvram_buf),
104 len) != 0) || len != done) {
105 spin_unlock_irqrestore(&nvram_lock, flags);
106 return -EIO;
107 }
108
109 p += len;
110 i += len;
111 }
112 spin_unlock_irqrestore(&nvram_lock, flags);
113
114 *index = i;
115 return p - buf;
116}
117
118static ssize_t pSeries_nvram_get_size(void)
119{
120 return nvram_size ? nvram_size : -ENODEV;
121}
122
123int __init pSeries_nvram_init(void)
124{
125 struct device_node *nvram;
126 unsigned int *nbytes_p, proplen;
127
128 nvram = of_find_node_by_type(NULL, "nvram");
129 if (nvram == NULL)
130 return -ENODEV;
131
132 nbytes_p = (unsigned int *)get_property(nvram, "#bytes", &proplen);
133 if (nbytes_p == NULL || proplen != sizeof(unsigned int))
134 return -EIO;
135
136 nvram_size = *nbytes_p;
137
138 nvram_fetch = rtas_token("nvram-fetch");
139 nvram_store = rtas_token("nvram-store");
140 printk(KERN_INFO "PPC64 nvram contains %d bytes\n", nvram_size);
141 of_node_put(nvram);
142
143 ppc_md.nvram_read = pSeries_nvram_read;
144 ppc_md.nvram_write = pSeries_nvram_write;
145 ppc_md.nvram_size = pSeries_nvram_get_size;
146
147 return 0;
148}
diff --git a/arch/ppc64/kernel/pSeries_pci.c b/arch/ppc64/kernel/pSeries_pci.c
deleted file mode 100644
index 928f8febdb3b..000000000000
--- a/arch/ppc64/kernel/pSeries_pci.c
+++ /dev/null
@@ -1,143 +0,0 @@
1/*
2 * arch/ppc64/kernel/pSeries_pci.c
3 *
4 * Copyright (C) 2001 Dave Engebretsen, IBM Corporation
5 * Copyright (C) 2003 Anton Blanchard <anton@au.ibm.com>, IBM
6 *
7 * pSeries specific routines for PCI.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22 */
23
24#include <linux/init.h>
25#include <linux/ioport.h>
26#include <linux/kernel.h>
27#include <linux/pci.h>
28#include <linux/string.h>
29
30#include <asm/pci-bridge.h>
31#include <asm/prom.h>
32
33#include "pci.h"
34
35static int __devinitdata s7a_workaround = -1;
36
37#if 0
38void pcibios_name_device(struct pci_dev *dev)
39{
40 struct device_node *dn;
41
42 /*
43 * Add IBM loc code (slot) as a prefix to the device names for service
44 */
45 dn = pci_device_to_OF_node(dev);
46 if (dn) {
47 char *loc_code = get_property(dn, "ibm,loc-code", 0);
48 if (loc_code) {
49 int loc_len = strlen(loc_code);
50 if (loc_len < sizeof(dev->dev.name)) {
51 memmove(dev->dev.name+loc_len+1, dev->dev.name,
52 sizeof(dev->dev.name)-loc_len-1);
53 memcpy(dev->dev.name, loc_code, loc_len);
54 dev->dev.name[loc_len] = ' ';
55 dev->dev.name[sizeof(dev->dev.name)-1] = '\0';
56 }
57 }
58 }
59}
60DECLARE_PCI_FIXUP_HEADER(PCI_ANY_ID, PCI_ANY_ID, pcibios_name_device);
61#endif
62
63static void __devinit check_s7a(void)
64{
65 struct device_node *root;
66 char *model;
67
68 s7a_workaround = 0;
69 root = of_find_node_by_path("/");
70 if (root) {
71 model = get_property(root, "model", NULL);
72 if (model && !strcmp(model, "IBM,7013-S7A"))
73 s7a_workaround = 1;
74 of_node_put(root);
75 }
76}
77
78void __devinit pSeries_irq_bus_setup(struct pci_bus *bus)
79{
80 struct pci_dev *dev;
81
82 if (s7a_workaround < 0)
83 check_s7a();
84 list_for_each_entry(dev, &bus->devices, bus_list) {
85 pci_read_irq_line(dev);
86 if (s7a_workaround) {
87 if (dev->irq > 16) {
88 dev->irq -= 3;
89 pci_write_config_byte(dev, PCI_INTERRUPT_LINE,
90 dev->irq);
91 }
92 }
93 }
94}
95
96static void __init pSeries_request_regions(void)
97{
98 if (!isa_io_base)
99 return;
100
101 request_region(0x20,0x20,"pic1");
102 request_region(0xa0,0x20,"pic2");
103 request_region(0x00,0x20,"dma1");
104 request_region(0x40,0x20,"timer");
105 request_region(0x80,0x10,"dma page reg");
106 request_region(0xc0,0x20,"dma2");
107}
108
109void __init pSeries_final_fixup(void)
110{
111 phbs_remap_io();
112 pSeries_request_regions();
113
114 pci_addr_cache_build();
115}
116
117/*
118 * Assume the winbond 82c105 is the IDE controller on a
119 * p610. We should probably be more careful in case
120 * someone tries to plug in a similar adapter.
121 */
122static void fixup_winbond_82c105(struct pci_dev* dev)
123{
124 int i;
125 unsigned int reg;
126
127 if (!(systemcfg->platform & PLATFORM_PSERIES))
128 return;
129
130 printk("Using INTC for W82c105 IDE controller.\n");
131 pci_read_config_dword(dev, 0x40, &reg);
132 /* Enable LEGIRQ to use INTC instead of ISA interrupts */
133 pci_write_config_dword(dev, 0x40, reg | (1<<11));
134
135 for (i = 0; i < DEVICE_COUNT_RESOURCE; ++i) {
136 /* zap the 2nd function of the winbond chip */
137 if (dev->resource[i].flags & IORESOURCE_IO
138 && dev->bus->number == 0 && dev->devfn == 0x81)
139 dev->resource[i].flags &= ~IORESOURCE_IO;
140 }
141}
142DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_WINBOND, PCI_DEVICE_ID_WINBOND_82C105,
143 fixup_winbond_82c105);
diff --git a/arch/ppc64/kernel/pSeries_reconfig.c b/arch/ppc64/kernel/pSeries_reconfig.c
deleted file mode 100644
index 58c61219d08e..000000000000
--- a/arch/ppc64/kernel/pSeries_reconfig.c
+++ /dev/null
@@ -1,426 +0,0 @@
1/*
2 * pSeries_reconfig.c - support for dynamic reconfiguration (including PCI
3 * Hotplug and Dynamic Logical Partitioning on RPA platforms).
4 *
5 * Copyright (C) 2005 Nathan Lynch
6 * Copyright (C) 2005 IBM Corporation
7 *
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License version
11 * 2 as published by the Free Software Foundation.
12 */
13
14#include <linux/kernel.h>
15#include <linux/kref.h>
16#include <linux/notifier.h>
17#include <linux/proc_fs.h>
18
19#include <asm/prom.h>
20#include <asm/pSeries_reconfig.h>
21#include <asm/uaccess.h>
22
23
24
25/*
26 * Routines for "runtime" addition and removal of device tree nodes.
27 */
28#ifdef CONFIG_PROC_DEVICETREE
29/*
30 * Add a node to /proc/device-tree.
31 */
32static void add_node_proc_entries(struct device_node *np)
33{
34 struct proc_dir_entry *ent;
35
36 ent = proc_mkdir(strrchr(np->full_name, '/') + 1, np->parent->pde);
37 if (ent)
38 proc_device_tree_add_node(np, ent);
39}
40
41static void remove_node_proc_entries(struct device_node *np)
42{
43 struct property *pp = np->properties;
44 struct device_node *parent = np->parent;
45
46 while (pp) {
47 remove_proc_entry(pp->name, np->pde);
48 pp = pp->next;
49 }
50 if (np->pde)
51 remove_proc_entry(np->pde->name, parent->pde);
52}
53#else /* !CONFIG_PROC_DEVICETREE */
54static void add_node_proc_entries(struct device_node *np)
55{
56 return;
57}
58
59static void remove_node_proc_entries(struct device_node *np)
60{
61 return;
62}
63#endif /* CONFIG_PROC_DEVICETREE */
64
65/**
66 * derive_parent - basically like dirname(1)
67 * @path: the full_name of a node to be added to the tree
68 *
69 * Returns the node which should be the parent of the node
70 * described by path. E.g., for path = "/foo/bar", returns
71 * the node with full_name = "/foo".
72 */
73static struct device_node *derive_parent(const char *path)
74{
75 struct device_node *parent = NULL;
76 char *parent_path = "/";
77 size_t parent_path_len = strrchr(path, '/') - path + 1;
78
79 /* reject if path is "/" */
80 if (!strcmp(path, "/"))
81 return ERR_PTR(-EINVAL);
82
83 if (strrchr(path, '/') != path) {
84 parent_path = kmalloc(parent_path_len, GFP_KERNEL);
85 if (!parent_path)
86 return ERR_PTR(-ENOMEM);
87 strlcpy(parent_path, path, parent_path_len);
88 }
89 parent = of_find_node_by_path(parent_path);
90 if (!parent)
91 return ERR_PTR(-EINVAL);
92 if (strcmp(parent_path, "/"))
93 kfree(parent_path);
94 return parent;
95}
96
97static struct notifier_block *pSeries_reconfig_chain;
98
99int pSeries_reconfig_notifier_register(struct notifier_block *nb)
100{
101 return notifier_chain_register(&pSeries_reconfig_chain, nb);
102}
103
104void pSeries_reconfig_notifier_unregister(struct notifier_block *nb)
105{
106 notifier_chain_unregister(&pSeries_reconfig_chain, nb);
107}
108
109static int pSeries_reconfig_add_node(const char *path, struct property *proplist)
110{
111 struct device_node *np;
112 int err = -ENOMEM;
113
114 np = kzalloc(sizeof(*np), GFP_KERNEL);
115 if (!np)
116 goto out_err;
117
118 np->full_name = kmalloc(strlen(path) + 1, GFP_KERNEL);
119 if (!np->full_name)
120 goto out_err;
121
122 strcpy(np->full_name, path);
123
124 np->properties = proplist;
125 OF_MARK_DYNAMIC(np);
126 kref_init(&np->kref);
127
128 np->parent = derive_parent(path);
129 if (IS_ERR(np->parent)) {
130 err = PTR_ERR(np->parent);
131 goto out_err;
132 }
133
134 err = notifier_call_chain(&pSeries_reconfig_chain,
135 PSERIES_RECONFIG_ADD, np);
136 if (err == NOTIFY_BAD) {
137 printk(KERN_ERR "Failed to add device node %s\n", path);
138 err = -ENOMEM; /* For now, safe to assume kmalloc failure */
139 goto out_err;
140 }
141
142 of_attach_node(np);
143
144 add_node_proc_entries(np);
145
146 of_node_put(np->parent);
147
148 return 0;
149
150out_err:
151 if (np) {
152 of_node_put(np->parent);
153 kfree(np->full_name);
154 kfree(np);
155 }
156 return err;
157}
158
159static int pSeries_reconfig_remove_node(struct device_node *np)
160{
161 struct device_node *parent, *child;
162
163 parent = of_get_parent(np);
164 if (!parent)
165 return -EINVAL;
166
167 if ((child = of_get_next_child(np, NULL))) {
168 of_node_put(child);
169 return -EBUSY;
170 }
171
172 remove_node_proc_entries(np);
173
174 notifier_call_chain(&pSeries_reconfig_chain,
175 PSERIES_RECONFIG_REMOVE, np);
176 of_detach_node(np);
177
178 of_node_put(parent);
179 of_node_put(np); /* Must decrement the refcount */
180 return 0;
181}
182
183/*
184 * /proc/ppc64/ofdt - yucky binary interface for adding and removing
185 * OF device nodes. Should be deprecated as soon as we get an
186 * in-kernel wrapper for the RTAS ibm,configure-connector call.
187 */
188
189static void release_prop_list(const struct property *prop)
190{
191 struct property *next;
192 for (; prop; prop = next) {
193 next = prop->next;
194 kfree(prop->name);
195 kfree(prop->value);
196 kfree(prop);
197 }
198
199}
200
201/**
202 * parse_next_property - process the next property from raw input buffer
203 * @buf: input buffer, must be nul-terminated
204 * @end: end of the input buffer + 1, for validation
205 * @name: return value; set to property name in buf
206 * @length: return value; set to length of value
207 * @value: return value; set to the property value in buf
208 *
209 * Note that the caller must make copies of the name and value returned,
210 * this function does no allocation or copying of the data. Return value
211 * is set to the next name in buf, or NULL on error.
212 */
213static char * parse_next_property(char *buf, char *end, char **name, int *length,
214 unsigned char **value)
215{
216 char *tmp;
217
218 *name = buf;
219
220 tmp = strchr(buf, ' ');
221 if (!tmp) {
222 printk(KERN_ERR "property parse failed in %s at line %d\n",
223 __FUNCTION__, __LINE__);
224 return NULL;
225 }
226 *tmp = '\0';
227
228 if (++tmp >= end) {
229 printk(KERN_ERR "property parse failed in %s at line %d\n",
230 __FUNCTION__, __LINE__);
231 return NULL;
232 }
233
234 /* now we're on the length */
235 *length = -1;
236 *length = simple_strtoul(tmp, &tmp, 10);
237 if (*length == -1) {
238 printk(KERN_ERR "property parse failed in %s at line %d\n",
239 __FUNCTION__, __LINE__);
240 return NULL;
241 }
242 if (*tmp != ' ' || ++tmp >= end) {
243 printk(KERN_ERR "property parse failed in %s at line %d\n",
244 __FUNCTION__, __LINE__);
245 return NULL;
246 }
247
248 /* now we're on the value */
249 *value = tmp;
250 tmp += *length;
251 if (tmp > end) {
252 printk(KERN_ERR "property parse failed in %s at line %d\n",
253 __FUNCTION__, __LINE__);
254 return NULL;
255 }
256 else if (tmp < end && *tmp != ' ' && *tmp != '\0') {
257 printk(KERN_ERR "property parse failed in %s at line %d\n",
258 __FUNCTION__, __LINE__);
259 return NULL;
260 }
261 tmp++;
262
263 /* and now we should be on the next name, or the end */
264 return tmp;
265}
266
267static struct property *new_property(const char *name, const int length,
268 const unsigned char *value, struct property *last)
269{
270 struct property *new = kmalloc(sizeof(*new), GFP_KERNEL);
271
272 if (!new)
273 return NULL;
274 memset(new, 0, sizeof(*new));
275
276 if (!(new->name = kmalloc(strlen(name) + 1, GFP_KERNEL)))
277 goto cleanup;
278 if (!(new->value = kmalloc(length + 1, GFP_KERNEL)))
279 goto cleanup;
280
281 strcpy(new->name, name);
282 memcpy(new->value, value, length);
283 *(((char *)new->value) + length) = 0;
284 new->length = length;
285 new->next = last;
286 return new;
287
288cleanup:
289 if (new->name)
290 kfree(new->name);
291 if (new->value)
292 kfree(new->value);
293 kfree(new);
294 return NULL;
295}
296
297static int do_add_node(char *buf, size_t bufsize)
298{
299 char *path, *end, *name;
300 struct device_node *np;
301 struct property *prop = NULL;
302 unsigned char* value;
303 int length, rv = 0;
304
305 end = buf + bufsize;
306 path = buf;
307 buf = strchr(buf, ' ');
308 if (!buf)
309 return -EINVAL;
310 *buf = '\0';
311 buf++;
312
313 if ((np = of_find_node_by_path(path))) {
314 of_node_put(np);
315 return -EINVAL;
316 }
317
318 /* rv = build_prop_list(tmp, bufsize - (tmp - buf), &proplist); */
319 while (buf < end &&
320 (buf = parse_next_property(buf, end, &name, &length, &value))) {
321 struct property *last = prop;
322
323 prop = new_property(name, length, value, last);
324 if (!prop) {
325 rv = -ENOMEM;
326 prop = last;
327 goto out;
328 }
329 }
330 if (!buf) {
331 rv = -EINVAL;
332 goto out;
333 }
334
335 rv = pSeries_reconfig_add_node(path, prop);
336
337out:
338 if (rv)
339 release_prop_list(prop);
340 return rv;
341}
342
343static int do_remove_node(char *buf)
344{
345 struct device_node *node;
346 int rv = -ENODEV;
347
348 if ((node = of_find_node_by_path(buf)))
349 rv = pSeries_reconfig_remove_node(node);
350
351 of_node_put(node);
352 return rv;
353}
354
355/**
356 * ofdt_write - perform operations on the Open Firmware device tree
357 *
358 * @file: not used
359 * @buf: command and arguments
360 * @count: size of the command buffer
361 * @off: not used
362 *
363 * Operations supported at this time are addition and removal of
364 * whole nodes along with their properties. Operations on individual
365 * properties are not implemented (yet).
366 */
367static ssize_t ofdt_write(struct file *file, const char __user *buf, size_t count,
368 loff_t *off)
369{
370 int rv = 0;
371 char *kbuf;
372 char *tmp;
373
374 if (!(kbuf = kmalloc(count + 1, GFP_KERNEL))) {
375 rv = -ENOMEM;
376 goto out;
377 }
378 if (copy_from_user(kbuf, buf, count)) {
379 rv = -EFAULT;
380 goto out;
381 }
382
383 kbuf[count] = '\0';
384
385 tmp = strchr(kbuf, ' ');
386 if (!tmp) {
387 rv = -EINVAL;
388 goto out;
389 }
390 *tmp = '\0';
391 tmp++;
392
393 if (!strcmp(kbuf, "add_node"))
394 rv = do_add_node(tmp, count - (tmp - kbuf));
395 else if (!strcmp(kbuf, "remove_node"))
396 rv = do_remove_node(tmp);
397 else
398 rv = -EINVAL;
399out:
400 kfree(kbuf);
401 return rv ? rv : count;
402}
403
404static struct file_operations ofdt_fops = {
405 .write = ofdt_write
406};
407
408/* create /proc/ppc64/ofdt write-only by root */
409static int proc_ppc64_create_ofdt(void)
410{
411 struct proc_dir_entry *ent;
412
413 if (!(systemcfg->platform & PLATFORM_PSERIES))
414 return 0;
415
416 ent = create_proc_entry("ppc64/ofdt", S_IWUSR, NULL);
417 if (ent) {
418 ent->nlink = 1;
419 ent->data = NULL;
420 ent->size = 0;
421 ent->proc_fops = &ofdt_fops;
422 }
423
424 return 0;
425}
426__initcall(proc_ppc64_create_ofdt);
diff --git a/arch/ppc64/kernel/pSeries_setup.c b/arch/ppc64/kernel/pSeries_setup.c
deleted file mode 100644
index 3009701eb90d..000000000000
--- a/arch/ppc64/kernel/pSeries_setup.c
+++ /dev/null
@@ -1,622 +0,0 @@
1/*
2 * linux/arch/ppc/kernel/setup.c
3 *
4 * Copyright (C) 1995 Linus Torvalds
5 * Adapted from 'alpha' version by Gary Thomas
6 * Modified by Cort Dougan (cort@cs.nmt.edu)
7 * Modified by PPC64 Team, IBM Corp
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; either version
12 * 2 of the License, or (at your option) any later version.
13 */
14
15/*
16 * bootup setup stuff..
17 */
18
19#undef DEBUG
20
21#include <linux/config.h>
22#include <linux/cpu.h>
23#include <linux/errno.h>
24#include <linux/sched.h>
25#include <linux/kernel.h>
26#include <linux/mm.h>
27#include <linux/stddef.h>
28#include <linux/unistd.h>
29#include <linux/slab.h>
30#include <linux/user.h>
31#include <linux/a.out.h>
32#include <linux/tty.h>
33#include <linux/major.h>
34#include <linux/interrupt.h>
35#include <linux/reboot.h>
36#include <linux/init.h>
37#include <linux/ioport.h>
38#include <linux/console.h>
39#include <linux/pci.h>
40#include <linux/utsname.h>
41#include <linux/adb.h>
42#include <linux/module.h>
43#include <linux/delay.h>
44#include <linux/irq.h>
45#include <linux/seq_file.h>
46#include <linux/root_dev.h>
47
48#include <asm/mmu.h>
49#include <asm/processor.h>
50#include <asm/io.h>
51#include <asm/pgtable.h>
52#include <asm/prom.h>
53#include <asm/rtas.h>
54#include <asm/pci-bridge.h>
55#include <asm/iommu.h>
56#include <asm/dma.h>
57#include <asm/machdep.h>
58#include <asm/irq.h>
59#include <asm/time.h>
60#include <asm/nvram.h>
61#include <asm/plpar_wrappers.h>
62#include <asm/xics.h>
63#include <asm/firmware.h>
64#include <asm/pmc.h>
65
66#include "i8259.h"
67#include "mpic.h"
68#include "pci.h"
69
70#ifdef DEBUG
71#define DBG(fmt...) udbg_printf(fmt)
72#else
73#define DBG(fmt...)
74#endif
75
76extern void find_udbg_vterm(void);
77extern void system_reset_fwnmi(void); /* from head.S */
78extern void machine_check_fwnmi(void); /* from head.S */
79extern void generic_find_legacy_serial_ports(u64 *physport,
80 unsigned int *default_speed);
81
82int fwnmi_active; /* TRUE if an FWNMI handler is present */
83
84extern void pSeries_system_reset_exception(struct pt_regs *regs);
85extern int pSeries_machine_check_exception(struct pt_regs *regs);
86
87static int pseries_shared_idle(void);
88static int pseries_dedicated_idle(void);
89
90static volatile void __iomem * chrp_int_ack_special;
91struct mpic *pSeries_mpic;
92
93void pSeries_get_cpuinfo(struct seq_file *m)
94{
95 struct device_node *root;
96 const char *model = "";
97
98 root = of_find_node_by_path("/");
99 if (root)
100 model = get_property(root, "model", NULL);
101 seq_printf(m, "machine\t\t: CHRP %s\n", model);
102 of_node_put(root);
103}
104
105/* Initialize firmware assisted non-maskable interrupts if
106 * the firmware supports this feature.
107 *
108 */
109static void __init fwnmi_init(void)
110{
111 int ret;
112 int ibm_nmi_register = rtas_token("ibm,nmi-register");
113 if (ibm_nmi_register == RTAS_UNKNOWN_SERVICE)
114 return;
115 ret = rtas_call(ibm_nmi_register, 2, 1, NULL,
116 __pa((unsigned long)system_reset_fwnmi),
117 __pa((unsigned long)machine_check_fwnmi));
118 if (ret == 0)
119 fwnmi_active = 1;
120}
121
122static int pSeries_irq_cascade(struct pt_regs *regs, void *data)
123{
124 if (chrp_int_ack_special)
125 return readb(chrp_int_ack_special);
126 else
127 return i8259_irq(smp_processor_id());
128}
129
130static void __init pSeries_init_mpic(void)
131{
132 unsigned int *addrp;
133 struct device_node *np;
134 int i;
135
136 /* All ISUs are setup, complete initialization */
137 mpic_init(pSeries_mpic);
138
139 /* Check what kind of cascade ACK we have */
140 if (!(np = of_find_node_by_name(NULL, "pci"))
141 || !(addrp = (unsigned int *)
142 get_property(np, "8259-interrupt-acknowledge", NULL)))
143 printk(KERN_ERR "Cannot find pci to get ack address\n");
144 else
145 chrp_int_ack_special = ioremap(addrp[prom_n_addr_cells(np)-1], 1);
146 of_node_put(np);
147
148 /* Setup the legacy interrupts & controller */
149 for (i = 0; i < NUM_ISA_INTERRUPTS; i++)
150 irq_desc[i].handler = &i8259_pic;
151 i8259_init(0);
152
153 /* Hook cascade to mpic */
154 mpic_setup_cascade(NUM_ISA_INTERRUPTS, pSeries_irq_cascade, NULL);
155}
156
157static void __init pSeries_setup_mpic(void)
158{
159 unsigned int *opprop;
160 unsigned long openpic_addr = 0;
161 unsigned char senses[NR_IRQS - NUM_ISA_INTERRUPTS];
162 struct device_node *root;
163 int irq_count;
164
165 /* Find the Open PIC if present */
166 root = of_find_node_by_path("/");
167 opprop = (unsigned int *) get_property(root, "platform-open-pic", NULL);
168 if (opprop != 0) {
169 int n = prom_n_addr_cells(root);
170
171 for (openpic_addr = 0; n > 0; --n)
172 openpic_addr = (openpic_addr << 32) + *opprop++;
173 printk(KERN_DEBUG "OpenPIC addr: %lx\n", openpic_addr);
174 }
175 of_node_put(root);
176
177 BUG_ON(openpic_addr == 0);
178
179 /* Get the sense values from OF */
180 prom_get_irq_senses(senses, NUM_ISA_INTERRUPTS, NR_IRQS);
181
182 /* Setup the openpic driver */
183 irq_count = NR_IRQS - NUM_ISA_INTERRUPTS - 4; /* leave room for IPIs */
184 pSeries_mpic = mpic_alloc(openpic_addr, MPIC_PRIMARY,
185 16, 16, irq_count, /* isu size, irq offset, irq count */
186 NR_IRQS - 4, /* ipi offset */
187 senses, irq_count, /* sense & sense size */
188 " MPIC ");
189}
190
191static void pseries_lpar_enable_pmcs(void)
192{
193 unsigned long set, reset;
194
195 power4_enable_pmcs();
196
197 set = 1UL << 63;
198 reset = 0;
199 plpar_hcall_norets(H_PERFMON, set, reset);
200
201 /* instruct hypervisor to maintain PMCs */
202 if (firmware_has_feature(FW_FEATURE_SPLPAR))
203 get_paca()->lppaca.pmcregs_in_use = 1;
204}
205
206static void __init pSeries_setup_arch(void)
207{
208 /* Fixup ppc_md depending on the type of interrupt controller */
209 if (ppc64_interrupt_controller == IC_OPEN_PIC) {
210 ppc_md.init_IRQ = pSeries_init_mpic;
211 ppc_md.get_irq = mpic_get_irq;
212 ppc_md.cpu_irq_down = mpic_teardown_this_cpu;
213 /* Allocate the mpic now, so that find_and_init_phbs() can
214 * fill the ISUs */
215 pSeries_setup_mpic();
216 } else {
217 ppc_md.init_IRQ = xics_init_IRQ;
218 ppc_md.get_irq = xics_get_irq;
219 ppc_md.cpu_irq_down = xics_teardown_cpu;
220 }
221
222#ifdef CONFIG_SMP
223 smp_init_pSeries();
224#endif
225 /* openpic global configuration register (64-bit format). */
226 /* openpic Interrupt Source Unit pointer (64-bit format). */
227 /* python0 facility area (mmio) (64-bit format) REAL address. */
228
229 /* init to some ~sane value until calibrate_delay() runs */
230 loops_per_jiffy = 50000000;
231
232 if (ROOT_DEV == 0) {
233 printk("No ramdisk, default root is /dev/sda2\n");
234 ROOT_DEV = Root_SDA2;
235 }
236
237 fwnmi_init();
238
239 /* Find and initialize PCI host bridges */
240 init_pci_config_tokens();
241 find_and_init_phbs();
242 eeh_init();
243
244#ifdef CONFIG_DUMMY_CONSOLE
245 conswitchp = &dummy_con;
246#endif
247
248 pSeries_nvram_init();
249
250 /* Choose an idle loop */
251 if (firmware_has_feature(FW_FEATURE_SPLPAR)) {
252 vpa_init(boot_cpuid);
253 if (get_paca()->lppaca.shared_proc) {
254 printk(KERN_INFO "Using shared processor idle loop\n");
255 ppc_md.idle_loop = pseries_shared_idle;
256 } else {
257 printk(KERN_INFO "Using dedicated idle loop\n");
258 ppc_md.idle_loop = pseries_dedicated_idle;
259 }
260 } else {
261 printk(KERN_INFO "Using default idle loop\n");
262 ppc_md.idle_loop = default_idle;
263 }
264
265 if (systemcfg->platform & PLATFORM_LPAR)
266 ppc_md.enable_pmcs = pseries_lpar_enable_pmcs;
267 else
268 ppc_md.enable_pmcs = power4_enable_pmcs;
269}
270
271static int __init pSeries_init_panel(void)
272{
273 /* Manually leave the kernel version on the panel. */
274 ppc_md.progress("Linux ppc64\n", 0);
275 ppc_md.progress(system_utsname.version, 0);
276
277 return 0;
278}
279arch_initcall(pSeries_init_panel);
280
281
282/* Build up the ppc64_firmware_features bitmask field
283 * using contents of device-tree/ibm,hypertas-functions.
284 * Ultimately this functionality may be moved into prom.c prom_init().
285 */
286static void __init fw_feature_init(void)
287{
288 struct device_node * dn;
289 char * hypertas;
290 unsigned int len;
291
292 DBG(" -> fw_feature_init()\n");
293
294 ppc64_firmware_features = 0;
295 dn = of_find_node_by_path("/rtas");
296 if (dn == NULL) {
297 printk(KERN_ERR "WARNING ! Cannot find RTAS in device-tree !\n");
298 goto no_rtas;
299 }
300
301 hypertas = get_property(dn, "ibm,hypertas-functions", &len);
302 if (hypertas) {
303 while (len > 0){
304 int i, hypertas_len;
305 /* check value against table of strings */
306 for(i=0; i < FIRMWARE_MAX_FEATURES ;i++) {
307 if ((firmware_features_table[i].name) &&
308 (strcmp(firmware_features_table[i].name,hypertas))==0) {
309 /* we have a match */
310 ppc64_firmware_features |=
311 (firmware_features_table[i].val);
312 break;
313 }
314 }
315 hypertas_len = strlen(hypertas);
316 len -= hypertas_len +1;
317 hypertas+= hypertas_len +1;
318 }
319 }
320
321 of_node_put(dn);
322 no_rtas:
323 printk(KERN_INFO "firmware_features = 0x%lx\n",
324 ppc64_firmware_features);
325
326 DBG(" <- fw_feature_init()\n");
327}
328
329
330static void __init pSeries_discover_pic(void)
331{
332 struct device_node *np;
333 char *typep;
334
335 /*
336 * Setup interrupt mapping options that are needed for finish_device_tree
337 * to properly parse the OF interrupt tree & do the virtual irq mapping
338 */
339 __irq_offset_value = NUM_ISA_INTERRUPTS;
340 ppc64_interrupt_controller = IC_INVALID;
341 for (np = NULL; (np = of_find_node_by_name(np, "interrupt-controller"));) {
342 typep = (char *)get_property(np, "compatible", NULL);
343 if (strstr(typep, "open-pic"))
344 ppc64_interrupt_controller = IC_OPEN_PIC;
345 else if (strstr(typep, "ppc-xicp"))
346 ppc64_interrupt_controller = IC_PPC_XIC;
347 else
348 printk("pSeries_discover_pic: failed to recognize"
349 " interrupt-controller\n");
350 break;
351 }
352}
353
354static void pSeries_mach_cpu_die(void)
355{
356 local_irq_disable();
357 idle_task_exit();
358 /* Some hardware requires clearing the CPPR, while other hardware does not
359 * it is safe either way
360 */
361 pSeriesLP_cppr_info(0, 0);
362 rtas_stop_self();
363 /* Should never get here... */
364 BUG();
365 for(;;);
366}
367
368
369/*
370 * Early initialization. Relocation is on but do not reference unbolted pages
371 */
372static void __init pSeries_init_early(void)
373{
374 void *comport;
375 int iommu_off = 0;
376 unsigned int default_speed;
377 u64 physport;
378
379 DBG(" -> pSeries_init_early()\n");
380
381 fw_feature_init();
382
383 if (systemcfg->platform & PLATFORM_LPAR)
384 hpte_init_lpar();
385 else {
386 hpte_init_native();
387 iommu_off = (of_chosen &&
388 get_property(of_chosen, "linux,iommu-off", NULL));
389 }
390
391 generic_find_legacy_serial_ports(&physport, &default_speed);
392
393 if (systemcfg->platform & PLATFORM_LPAR)
394 find_udbg_vterm();
395 else if (physport) {
396 /* Map the uart for udbg. */
397 comport = (void *)ioremap(physport, 16);
398 udbg_init_uart(comport, default_speed);
399
400 DBG("Hello World !\n");
401 }
402
403
404 iommu_init_early_pSeries();
405
406 pSeries_discover_pic();
407
408 DBG(" <- pSeries_init_early()\n");
409}
410
411
412static int pSeries_check_legacy_ioport(unsigned int baseport)
413{
414 struct device_node *np;
415
416#define I8042_DATA_REG 0x60
417#define FDC_BASE 0x3f0
418
419
420 switch(baseport) {
421 case I8042_DATA_REG:
422 np = of_find_node_by_type(NULL, "8042");
423 if (np == NULL)
424 return -ENODEV;
425 of_node_put(np);
426 break;
427 case FDC_BASE:
428 np = of_find_node_by_type(NULL, "fdc");
429 if (np == NULL)
430 return -ENODEV;
431 of_node_put(np);
432 break;
433 }
434 return 0;
435}
436
437/*
438 * Called very early, MMU is off, device-tree isn't unflattened
439 */
440extern struct machdep_calls pSeries_md;
441
442static int __init pSeries_probe(int platform)
443{
444 if (platform != PLATFORM_PSERIES &&
445 platform != PLATFORM_PSERIES_LPAR)
446 return 0;
447
448 /* if we have some ppc_md fixups for LPAR to do, do
449 * it here ...
450 */
451
452 return 1;
453}
454
455DECLARE_PER_CPU(unsigned long, smt_snooze_delay);
456
457static inline void dedicated_idle_sleep(unsigned int cpu)
458{
459 struct paca_struct *ppaca = &paca[cpu ^ 1];
460
461 /* Only sleep if the other thread is not idle */
462 if (!(ppaca->lppaca.idle)) {
463 local_irq_disable();
464
465 /*
466 * We are about to sleep the thread and so wont be polling any
467 * more.
468 */
469 clear_thread_flag(TIF_POLLING_NRFLAG);
470
471 /*
472 * SMT dynamic mode. Cede will result in this thread going
473 * dormant, if the partner thread is still doing work. Thread
474 * wakes up if partner goes idle, an interrupt is presented, or
475 * a prod occurs. Returning from the cede enables external
476 * interrupts.
477 */
478 if (!need_resched())
479 cede_processor();
480 else
481 local_irq_enable();
482 } else {
483 /*
484 * Give the HV an opportunity at the processor, since we are
485 * not doing any work.
486 */
487 poll_pending();
488 }
489}
490
491static int pseries_dedicated_idle(void)
492{
493 long oldval;
494 struct paca_struct *lpaca = get_paca();
495 unsigned int cpu = smp_processor_id();
496 unsigned long start_snooze;
497 unsigned long *smt_snooze_delay = &__get_cpu_var(smt_snooze_delay);
498
499 while (1) {
500 /*
501 * Indicate to the HV that we are idle. Now would be
502 * a good time to find other work to dispatch.
503 */
504 lpaca->lppaca.idle = 1;
505
506 oldval = test_and_clear_thread_flag(TIF_NEED_RESCHED);
507 if (!oldval) {
508 set_thread_flag(TIF_POLLING_NRFLAG);
509
510 start_snooze = __get_tb() +
511 *smt_snooze_delay * tb_ticks_per_usec;
512
513 while (!need_resched() && !cpu_is_offline(cpu)) {
514 ppc64_runlatch_off();
515
516 /*
517 * Go into low thread priority and possibly
518 * low power mode.
519 */
520 HMT_low();
521 HMT_very_low();
522
523 if (*smt_snooze_delay != 0 &&
524 __get_tb() > start_snooze) {
525 HMT_medium();
526 dedicated_idle_sleep(cpu);
527 }
528
529 }
530
531 HMT_medium();
532 clear_thread_flag(TIF_POLLING_NRFLAG);
533 } else {
534 set_need_resched();
535 }
536
537 lpaca->lppaca.idle = 0;
538 ppc64_runlatch_on();
539
540 schedule();
541
542 if (cpu_is_offline(cpu) && system_state == SYSTEM_RUNNING)
543 cpu_die();
544 }
545}
546
547static int pseries_shared_idle(void)
548{
549 struct paca_struct *lpaca = get_paca();
550 unsigned int cpu = smp_processor_id();
551
552 while (1) {
553 /*
554 * Indicate to the HV that we are idle. Now would be
555 * a good time to find other work to dispatch.
556 */
557 lpaca->lppaca.idle = 1;
558
559 while (!need_resched() && !cpu_is_offline(cpu)) {
560 local_irq_disable();
561 ppc64_runlatch_off();
562
563 /*
564 * Yield the processor to the hypervisor. We return if
565 * an external interrupt occurs (which are driven prior
566 * to returning here) or if a prod occurs from another
567 * processor. When returning here, external interrupts
568 * are enabled.
569 *
570 * Check need_resched() again with interrupts disabled
571 * to avoid a race.
572 */
573 if (!need_resched())
574 cede_processor();
575 else
576 local_irq_enable();
577
578 HMT_medium();
579 }
580
581 lpaca->lppaca.idle = 0;
582 ppc64_runlatch_on();
583
584 schedule();
585
586 if (cpu_is_offline(cpu) && system_state == SYSTEM_RUNNING)
587 cpu_die();
588 }
589
590 return 0;
591}
592
593static int pSeries_pci_probe_mode(struct pci_bus *bus)
594{
595 if (systemcfg->platform & PLATFORM_LPAR)
596 return PCI_PROBE_DEVTREE;
597 return PCI_PROBE_NORMAL;
598}
599
600struct machdep_calls __initdata pSeries_md = {
601 .probe = pSeries_probe,
602 .setup_arch = pSeries_setup_arch,
603 .init_early = pSeries_init_early,
604 .get_cpuinfo = pSeries_get_cpuinfo,
605 .log_error = pSeries_log_error,
606 .pcibios_fixup = pSeries_final_fixup,
607 .pci_probe_mode = pSeries_pci_probe_mode,
608 .irq_bus_setup = pSeries_irq_bus_setup,
609 .restart = rtas_restart,
610 .power_off = rtas_power_off,
611 .halt = rtas_halt,
612 .panic = rtas_os_term,
613 .cpu_die = pSeries_mach_cpu_die,
614 .get_boot_time = rtas_get_boot_time,
615 .get_rtc_time = rtas_get_rtc_time,
616 .set_rtc_time = rtas_set_rtc_time,
617 .calibrate_decr = generic_calibrate_decr,
618 .progress = rtas_progress,
619 .check_legacy_ioport = pSeries_check_legacy_ioport,
620 .system_reset_exception = pSeries_system_reset_exception,
621 .machine_check_exception = pSeries_machine_check_exception,
622};
diff --git a/arch/ppc64/kernel/pSeries_smp.c b/arch/ppc64/kernel/pSeries_smp.c
deleted file mode 100644
index d2c7e2c4733b..000000000000
--- a/arch/ppc64/kernel/pSeries_smp.c
+++ /dev/null
@@ -1,517 +0,0 @@
1/*
2 * SMP support for pSeries and BPA machines.
3 *
4 * Dave Engebretsen, Peter Bergner, and
5 * Mike Corrigan {engebret|bergner|mikec}@us.ibm.com
6 *
7 * Plus various changes from other IBM teams...
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; either version
12 * 2 of the License, or (at your option) any later version.
13 */
14
15#undef DEBUG
16
17#include <linux/config.h>
18#include <linux/kernel.h>
19#include <linux/module.h>
20#include <linux/sched.h>
21#include <linux/smp.h>
22#include <linux/interrupt.h>
23#include <linux/delay.h>
24#include <linux/init.h>
25#include <linux/spinlock.h>
26#include <linux/cache.h>
27#include <linux/err.h>
28#include <linux/sysdev.h>
29#include <linux/cpu.h>
30
31#include <asm/ptrace.h>
32#include <asm/atomic.h>
33#include <asm/irq.h>
34#include <asm/page.h>
35#include <asm/pgtable.h>
36#include <asm/io.h>
37#include <asm/prom.h>
38#include <asm/smp.h>
39#include <asm/paca.h>
40#include <asm/time.h>
41#include <asm/machdep.h>
42#include <asm/xics.h>
43#include <asm/cputable.h>
44#include <asm/firmware.h>
45#include <asm/system.h>
46#include <asm/rtas.h>
47#include <asm/plpar_wrappers.h>
48#include <asm/pSeries_reconfig.h>
49
50#include "mpic.h"
51#include "bpa_iic.h"
52
53#ifdef DEBUG
54#define DBG(fmt...) udbg_printf(fmt)
55#else
56#define DBG(fmt...)
57#endif
58
59/*
60 * The primary thread of each non-boot processor is recorded here before
61 * smp init.
62 */
63static cpumask_t of_spin_map;
64
65extern void pSeries_secondary_smp_init(unsigned long);
66
67#ifdef CONFIG_HOTPLUG_CPU
68
69/* Get state of physical CPU.
70 * Return codes:
71 * 0 - The processor is in the RTAS stopped state
72 * 1 - stop-self is in progress
73 * 2 - The processor is not in the RTAS stopped state
74 * -1 - Hardware Error
75 * -2 - Hardware Busy, Try again later.
76 */
77static int query_cpu_stopped(unsigned int pcpu)
78{
79 int cpu_status;
80 int status, qcss_tok;
81
82 qcss_tok = rtas_token("query-cpu-stopped-state");
83 if (qcss_tok == RTAS_UNKNOWN_SERVICE)
84 return -1;
85 status = rtas_call(qcss_tok, 1, 2, &cpu_status, pcpu);
86 if (status != 0) {
87 printk(KERN_ERR
88 "RTAS query-cpu-stopped-state failed: %i\n", status);
89 return status;
90 }
91
92 return cpu_status;
93}
94
95int pSeries_cpu_disable(void)
96{
97 int cpu = smp_processor_id();
98
99 cpu_clear(cpu, cpu_online_map);
100 systemcfg->processorCount--;
101
102 /*fix boot_cpuid here*/
103 if (cpu == boot_cpuid)
104 boot_cpuid = any_online_cpu(cpu_online_map);
105
106 /* FIXME: abstract this to not be platform specific later on */
107 xics_migrate_irqs_away();
108 return 0;
109}
110
111void pSeries_cpu_die(unsigned int cpu)
112{
113 int tries;
114 int cpu_status;
115 unsigned int pcpu = get_hard_smp_processor_id(cpu);
116
117 for (tries = 0; tries < 25; tries++) {
118 cpu_status = query_cpu_stopped(pcpu);
119 if (cpu_status == 0 || cpu_status == -1)
120 break;
121 msleep(200);
122 }
123 if (cpu_status != 0) {
124 printk("Querying DEAD? cpu %i (%i) shows %i\n",
125 cpu, pcpu, cpu_status);
126 }
127
128 /* Isolation and deallocation are definatly done by
129 * drslot_chrp_cpu. If they were not they would be
130 * done here. Change isolate state to Isolate and
131 * change allocation-state to Unusable.
132 */
133 paca[cpu].cpu_start = 0;
134}
135
136/*
137 * Update cpu_present_map and paca(s) for a new cpu node. The wrinkle
138 * here is that a cpu device node may represent up to two logical cpus
139 * in the SMT case. We must honor the assumption in other code that
140 * the logical ids for sibling SMT threads x and y are adjacent, such
141 * that x^1 == y and y^1 == x.
142 */
143static int pSeries_add_processor(struct device_node *np)
144{
145 unsigned int cpu;
146 cpumask_t candidate_map, tmp = CPU_MASK_NONE;
147 int err = -ENOSPC, len, nthreads, i;
148 u32 *intserv;
149
150 intserv = (u32 *)get_property(np, "ibm,ppc-interrupt-server#s", &len);
151 if (!intserv)
152 return 0;
153
154 nthreads = len / sizeof(u32);
155 for (i = 0; i < nthreads; i++)
156 cpu_set(i, tmp);
157
158 lock_cpu_hotplug();
159
160 BUG_ON(!cpus_subset(cpu_present_map, cpu_possible_map));
161
162 /* Get a bitmap of unoccupied slots. */
163 cpus_xor(candidate_map, cpu_possible_map, cpu_present_map);
164 if (cpus_empty(candidate_map)) {
165 /* If we get here, it most likely means that NR_CPUS is
166 * less than the partition's max processors setting.
167 */
168 printk(KERN_ERR "Cannot add cpu %s; this system configuration"
169 " supports %d logical cpus.\n", np->full_name,
170 cpus_weight(cpu_possible_map));
171 goto out_unlock;
172 }
173
174 while (!cpus_empty(tmp))
175 if (cpus_subset(tmp, candidate_map))
176 /* Found a range where we can insert the new cpu(s) */
177 break;
178 else
179 cpus_shift_left(tmp, tmp, nthreads);
180
181 if (cpus_empty(tmp)) {
182 printk(KERN_ERR "Unable to find space in cpu_present_map for"
183 " processor %s with %d thread(s)\n", np->name,
184 nthreads);
185 goto out_unlock;
186 }
187
188 for_each_cpu_mask(cpu, tmp) {
189 BUG_ON(cpu_isset(cpu, cpu_present_map));
190 cpu_set(cpu, cpu_present_map);
191 set_hard_smp_processor_id(cpu, *intserv++);
192 }
193 err = 0;
194out_unlock:
195 unlock_cpu_hotplug();
196 return err;
197}
198
199/*
200 * Update the present map for a cpu node which is going away, and set
201 * the hard id in the paca(s) to -1 to be consistent with boot time
202 * convention for non-present cpus.
203 */
204static void pSeries_remove_processor(struct device_node *np)
205{
206 unsigned int cpu;
207 int len, nthreads, i;
208 u32 *intserv;
209
210 intserv = (u32 *)get_property(np, "ibm,ppc-interrupt-server#s", &len);
211 if (!intserv)
212 return;
213
214 nthreads = len / sizeof(u32);
215
216 lock_cpu_hotplug();
217 for (i = 0; i < nthreads; i++) {
218 for_each_present_cpu(cpu) {
219 if (get_hard_smp_processor_id(cpu) != intserv[i])
220 continue;
221 BUG_ON(cpu_online(cpu));
222 cpu_clear(cpu, cpu_present_map);
223 set_hard_smp_processor_id(cpu, -1);
224 break;
225 }
226 if (cpu == NR_CPUS)
227 printk(KERN_WARNING "Could not find cpu to remove "
228 "with physical id 0x%x\n", intserv[i]);
229 }
230 unlock_cpu_hotplug();
231}
232
233static int pSeries_smp_notifier(struct notifier_block *nb, unsigned long action, void *node)
234{
235 int err = NOTIFY_OK;
236
237 switch (action) {
238 case PSERIES_RECONFIG_ADD:
239 if (pSeries_add_processor(node))
240 err = NOTIFY_BAD;
241 break;
242 case PSERIES_RECONFIG_REMOVE:
243 pSeries_remove_processor(node);
244 break;
245 default:
246 err = NOTIFY_DONE;
247 break;
248 }
249 return err;
250}
251
252static struct notifier_block pSeries_smp_nb = {
253 .notifier_call = pSeries_smp_notifier,
254};
255
256#endif /* CONFIG_HOTPLUG_CPU */
257
258/**
259 * smp_startup_cpu() - start the given cpu
260 *
261 * At boot time, there is nothing to do for primary threads which were
262 * started from Open Firmware. For anything else, call RTAS with the
263 * appropriate start location.
264 *
265 * Returns:
266 * 0 - failure
267 * 1 - success
268 */
269static inline int __devinit smp_startup_cpu(unsigned int lcpu)
270{
271 int status;
272 unsigned long start_here = __pa((u32)*((unsigned long *)
273 pSeries_secondary_smp_init));
274 unsigned int pcpu;
275 int start_cpu;
276
277 if (cpu_isset(lcpu, of_spin_map))
278 /* Already started by OF and sitting in spin loop */
279 return 1;
280
281 pcpu = get_hard_smp_processor_id(lcpu);
282
283 /* Fixup atomic count: it exited inside IRQ handler. */
284 paca[lcpu].__current->thread_info->preempt_count = 0;
285
286 /*
287 * If the RTAS start-cpu token does not exist then presume the
288 * cpu is already spinning.
289 */
290 start_cpu = rtas_token("start-cpu");
291 if (start_cpu == RTAS_UNKNOWN_SERVICE)
292 return 1;
293
294 status = rtas_call(start_cpu, 3, 1, NULL, pcpu, start_here, lcpu);
295 if (status != 0) {
296 printk(KERN_ERR "start-cpu failed: %i\n", status);
297 return 0;
298 }
299
300 return 1;
301}
302
303#ifdef CONFIG_XICS
304static inline void smp_xics_do_message(int cpu, int msg)
305{
306 set_bit(msg, &xics_ipi_message[cpu].value);
307 mb();
308 xics_cause_IPI(cpu);
309}
310
311static void smp_xics_message_pass(int target, int msg)
312{
313 unsigned int i;
314
315 if (target < NR_CPUS) {
316 smp_xics_do_message(target, msg);
317 } else {
318 for_each_online_cpu(i) {
319 if (target == MSG_ALL_BUT_SELF
320 && i == smp_processor_id())
321 continue;
322 smp_xics_do_message(i, msg);
323 }
324 }
325}
326
327static int __init smp_xics_probe(void)
328{
329 xics_request_IPIs();
330
331 return cpus_weight(cpu_possible_map);
332}
333
334static void __devinit smp_xics_setup_cpu(int cpu)
335{
336 if (cpu != boot_cpuid)
337 xics_setup_cpu();
338
339 if (firmware_has_feature(FW_FEATURE_SPLPAR))
340 vpa_init(cpu);
341
342 cpu_clear(cpu, of_spin_map);
343
344}
345#endif /* CONFIG_XICS */
346#ifdef CONFIG_BPA_IIC
347static void smp_iic_message_pass(int target, int msg)
348{
349 unsigned int i;
350
351 if (target < NR_CPUS) {
352 iic_cause_IPI(target, msg);
353 } else {
354 for_each_online_cpu(i) {
355 if (target == MSG_ALL_BUT_SELF
356 && i == smp_processor_id())
357 continue;
358 iic_cause_IPI(i, msg);
359 }
360 }
361}
362
363static int __init smp_iic_probe(void)
364{
365 iic_request_IPIs();
366
367 return cpus_weight(cpu_possible_map);
368}
369
370static void __devinit smp_iic_setup_cpu(int cpu)
371{
372 if (cpu != boot_cpuid)
373 iic_setup_cpu();
374}
375#endif /* CONFIG_BPA_IIC */
376
377static DEFINE_SPINLOCK(timebase_lock);
378static unsigned long timebase = 0;
379
380static void __devinit pSeries_give_timebase(void)
381{
382 spin_lock(&timebase_lock);
383 rtas_call(rtas_token("freeze-time-base"), 0, 1, NULL);
384 timebase = get_tb();
385 spin_unlock(&timebase_lock);
386
387 while (timebase)
388 barrier();
389 rtas_call(rtas_token("thaw-time-base"), 0, 1, NULL);
390}
391
392static void __devinit pSeries_take_timebase(void)
393{
394 while (!timebase)
395 barrier();
396 spin_lock(&timebase_lock);
397 set_tb(timebase >> 32, timebase & 0xffffffff);
398 timebase = 0;
399 spin_unlock(&timebase_lock);
400}
401
402static void __devinit smp_pSeries_kick_cpu(int nr)
403{
404 BUG_ON(nr < 0 || nr >= NR_CPUS);
405
406 if (!smp_startup_cpu(nr))
407 return;
408
409 /*
410 * The processor is currently spinning, waiting for the
411 * cpu_start field to become non-zero After we set cpu_start,
412 * the processor will continue on to secondary_start
413 */
414 paca[nr].cpu_start = 1;
415}
416
417static int smp_pSeries_cpu_bootable(unsigned int nr)
418{
419 /* Special case - we inhibit secondary thread startup
420 * during boot if the user requests it. Odd-numbered
421 * cpus are assumed to be secondary threads.
422 */
423 if (system_state < SYSTEM_RUNNING &&
424 cpu_has_feature(CPU_FTR_SMT) &&
425 !smt_enabled_at_boot && nr % 2 != 0)
426 return 0;
427
428 return 1;
429}
430#ifdef CONFIG_MPIC
431static struct smp_ops_t pSeries_mpic_smp_ops = {
432 .message_pass = smp_mpic_message_pass,
433 .probe = smp_mpic_probe,
434 .kick_cpu = smp_pSeries_kick_cpu,
435 .setup_cpu = smp_mpic_setup_cpu,
436};
437#endif
438#ifdef CONFIG_XICS
439static struct smp_ops_t pSeries_xics_smp_ops = {
440 .message_pass = smp_xics_message_pass,
441 .probe = smp_xics_probe,
442 .kick_cpu = smp_pSeries_kick_cpu,
443 .setup_cpu = smp_xics_setup_cpu,
444 .cpu_bootable = smp_pSeries_cpu_bootable,
445};
446#endif
447#ifdef CONFIG_BPA_IIC
448static struct smp_ops_t bpa_iic_smp_ops = {
449 .message_pass = smp_iic_message_pass,
450 .probe = smp_iic_probe,
451 .kick_cpu = smp_pSeries_kick_cpu,
452 .setup_cpu = smp_iic_setup_cpu,
453 .cpu_bootable = smp_pSeries_cpu_bootable,
454};
455#endif
456
457/* This is called very early */
458void __init smp_init_pSeries(void)
459{
460 int i;
461
462 DBG(" -> smp_init_pSeries()\n");
463
464 switch (ppc64_interrupt_controller) {
465#ifdef CONFIG_MPIC
466 case IC_OPEN_PIC:
467 smp_ops = &pSeries_mpic_smp_ops;
468 break;
469#endif
470#ifdef CONFIG_XICS
471 case IC_PPC_XIC:
472 smp_ops = &pSeries_xics_smp_ops;
473 break;
474#endif
475#ifdef CONFIG_BPA_IIC
476 case IC_BPA_IIC:
477 smp_ops = &bpa_iic_smp_ops;
478 break;
479#endif
480 default:
481 panic("Invalid interrupt controller");
482 }
483
484#ifdef CONFIG_HOTPLUG_CPU
485 smp_ops->cpu_disable = pSeries_cpu_disable;
486 smp_ops->cpu_die = pSeries_cpu_die;
487
488 /* Processors can be added/removed only on LPAR */
489 if (systemcfg->platform == PLATFORM_PSERIES_LPAR)
490 pSeries_reconfig_notifier_register(&pSeries_smp_nb);
491#endif
492
493 /* Mark threads which are still spinning in hold loops. */
494 if (cpu_has_feature(CPU_FTR_SMT)) {
495 for_each_present_cpu(i) {
496 if (i % 2 == 0)
497 /*
498 * Even-numbered logical cpus correspond to
499 * primary threads.
500 */
501 cpu_set(i, of_spin_map);
502 }
503 } else {
504 of_spin_map = cpu_present_map;
505 }
506
507 cpu_clear(boot_cpuid, of_spin_map);
508
509 /* Non-lpar has additional take/give timebase */
510 if (rtas_token("freeze-time-base") != RTAS_UNKNOWN_SERVICE) {
511 smp_ops->give_timebase = pSeries_give_timebase;
512 smp_ops->take_timebase = pSeries_take_timebase;
513 }
514
515 DBG(" <- smp_init_pSeries()\n");
516}
517
diff --git a/arch/ppc64/kernel/pSeries_vio.c b/arch/ppc64/kernel/pSeries_vio.c
deleted file mode 100644
index e0ae06f58f86..000000000000
--- a/arch/ppc64/kernel/pSeries_vio.c
+++ /dev/null
@@ -1,273 +0,0 @@
1/*
2 * IBM PowerPC pSeries Virtual I/O Infrastructure Support.
3 *
4 * Copyright (c) 2003-2005 IBM Corp.
5 * Dave Engebretsen engebret@us.ibm.com
6 * Santiago Leon santil@us.ibm.com
7 * Hollis Blanchard <hollisb@us.ibm.com>
8 * Stephen Rothwell
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version
13 * 2 of the License, or (at your option) any later version.
14 */
15
16#include <linux/init.h>
17#include <linux/module.h>
18#include <linux/mm.h>
19#include <linux/kobject.h>
20#include <asm/iommu.h>
21#include <asm/dma.h>
22#include <asm/prom.h>
23#include <asm/vio.h>
24#include <asm/hvcall.h>
25
26extern struct subsystem devices_subsys; /* needed for vio_find_name() */
27
28static void probe_bus_pseries(void)
29{
30 struct device_node *node_vroot, *of_node;
31
32 node_vroot = find_devices("vdevice");
33 if ((node_vroot == NULL) || (node_vroot->child == NULL))
34 /* this machine doesn't do virtual IO, and that's ok */
35 return;
36
37 /*
38 * Create struct vio_devices for each virtual device in the device tree.
39 * Drivers will associate with them later.
40 */
41 for (of_node = node_vroot->child; of_node != NULL;
42 of_node = of_node->sibling) {
43 printk(KERN_DEBUG "%s: processing %p\n", __FUNCTION__, of_node);
44 vio_register_device_node(of_node);
45 }
46}
47
48/**
49 * vio_match_device_pseries: - Tell if a pSeries VIO device matches a
50 * vio_device_id
51 */
52static int vio_match_device_pseries(const struct vio_device_id *id,
53 const struct vio_dev *dev)
54{
55 return (strncmp(dev->type, id->type, strlen(id->type)) == 0) &&
56 device_is_compatible(dev->dev.platform_data, id->compat);
57}
58
59static void vio_release_device_pseries(struct device *dev)
60{
61 /* XXX free TCE table */
62 of_node_put(dev->platform_data);
63}
64
65static ssize_t viodev_show_devspec(struct device *dev,
66 struct device_attribute *attr, char *buf)
67{
68 struct device_node *of_node = dev->platform_data;
69
70 return sprintf(buf, "%s\n", of_node->full_name);
71}
72DEVICE_ATTR(devspec, S_IRUSR | S_IRGRP | S_IROTH, viodev_show_devspec, NULL);
73
74static void vio_unregister_device_pseries(struct vio_dev *viodev)
75{
76 device_remove_file(&viodev->dev, &dev_attr_devspec);
77}
78
79static struct vio_bus_ops vio_bus_ops_pseries = {
80 .match = vio_match_device_pseries,
81 .unregister_device = vio_unregister_device_pseries,
82 .release_device = vio_release_device_pseries,
83};
84
85/**
86 * vio_bus_init_pseries: - Initialize the pSeries virtual IO bus
87 */
88static int __init vio_bus_init_pseries(void)
89{
90 int err;
91
92 err = vio_bus_init(&vio_bus_ops_pseries);
93 if (err == 0)
94 probe_bus_pseries();
95 return err;
96}
97
98__initcall(vio_bus_init_pseries);
99
100/**
101 * vio_build_iommu_table: - gets the dma information from OF and
102 * builds the TCE tree.
103 * @dev: the virtual device.
104 *
105 * Returns a pointer to the built tce tree, or NULL if it can't
106 * find property.
107*/
108static struct iommu_table *vio_build_iommu_table(struct vio_dev *dev)
109{
110 unsigned int *dma_window;
111 struct iommu_table *newTceTable;
112 unsigned long offset;
113 int dma_window_property_size;
114
115 dma_window = (unsigned int *) get_property(dev->dev.platform_data, "ibm,my-dma-window", &dma_window_property_size);
116 if(!dma_window) {
117 return NULL;
118 }
119
120 newTceTable = (struct iommu_table *) kmalloc(sizeof(struct iommu_table), GFP_KERNEL);
121
122 /* There should be some code to extract the phys-encoded offset
123 using prom_n_addr_cells(). However, according to a comment
124 on earlier versions, it's always zero, so we don't bother */
125 offset = dma_window[1] >> PAGE_SHIFT;
126
127 /* TCE table size - measured in tce entries */
128 newTceTable->it_size = dma_window[4] >> PAGE_SHIFT;
129 /* offset for VIO should always be 0 */
130 newTceTable->it_offset = offset;
131 newTceTable->it_busno = 0;
132 newTceTable->it_index = (unsigned long)dma_window[0];
133 newTceTable->it_type = TCE_VB;
134
135 return iommu_init_table(newTceTable);
136}
137
138/**
139 * vio_register_device_node: - Register a new vio device.
140 * @of_node: The OF node for this device.
141 *
142 * Creates and initializes a vio_dev structure from the data in
143 * of_node (dev.platform_data) and adds it to the list of virtual devices.
144 * Returns a pointer to the created vio_dev or NULL if node has
145 * NULL device_type or compatible fields.
146 */
147struct vio_dev * __devinit vio_register_device_node(struct device_node *of_node)
148{
149 struct vio_dev *viodev;
150 unsigned int *unit_address;
151 unsigned int *irq_p;
152
153 /* we need the 'device_type' property, in order to match with drivers */
154 if ((NULL == of_node->type)) {
155 printk(KERN_WARNING
156 "%s: node %s missing 'device_type'\n", __FUNCTION__,
157 of_node->name ? of_node->name : "<unknown>");
158 return NULL;
159 }
160
161 unit_address = (unsigned int *)get_property(of_node, "reg", NULL);
162 if (!unit_address) {
163 printk(KERN_WARNING "%s: node %s missing 'reg'\n", __FUNCTION__,
164 of_node->name ? of_node->name : "<unknown>");
165 return NULL;
166 }
167
168 /* allocate a vio_dev for this node */
169 viodev = kmalloc(sizeof(struct vio_dev), GFP_KERNEL);
170 if (!viodev) {
171 return NULL;
172 }
173 memset(viodev, 0, sizeof(struct vio_dev));
174
175 viodev->dev.platform_data = of_node_get(of_node);
176
177 viodev->irq = NO_IRQ;
178 irq_p = (unsigned int *)get_property(of_node, "interrupts", NULL);
179 if (irq_p) {
180 int virq = virt_irq_create_mapping(*irq_p);
181 if (virq == NO_IRQ) {
182 printk(KERN_ERR "Unable to allocate interrupt "
183 "number for %s\n", of_node->full_name);
184 } else
185 viodev->irq = irq_offset_up(virq);
186 }
187
188 snprintf(viodev->dev.bus_id, BUS_ID_SIZE, "%x", *unit_address);
189 viodev->name = of_node->name;
190 viodev->type = of_node->type;
191 viodev->unit_address = *unit_address;
192 viodev->iommu_table = vio_build_iommu_table(viodev);
193
194 /* register with generic device framework */
195 if (vio_register_device(viodev) == NULL) {
196 /* XXX free TCE table */
197 kfree(viodev);
198 return NULL;
199 }
200 device_create_file(&viodev->dev, &dev_attr_devspec);
201
202 return viodev;
203}
204EXPORT_SYMBOL(vio_register_device_node);
205
206/**
207 * vio_get_attribute: - get attribute for virtual device
208 * @vdev: The vio device to get property.
209 * @which: The property/attribute to be extracted.
210 * @length: Pointer to length of returned data size (unused if NULL).
211 *
212 * Calls prom.c's get_property() to return the value of the
213 * attribute specified by the preprocessor constant @which
214*/
215const void * vio_get_attribute(struct vio_dev *vdev, void* which, int* length)
216{
217 return get_property(vdev->dev.platform_data, (char*)which, length);
218}
219EXPORT_SYMBOL(vio_get_attribute);
220
221/* vio_find_name() - internal because only vio.c knows how we formatted the
222 * kobject name
223 * XXX once vio_bus_type.devices is actually used as a kset in
224 * drivers/base/bus.c, this function should be removed in favor of
225 * "device_find(kobj_name, &vio_bus_type)"
226 */
227static struct vio_dev *vio_find_name(const char *kobj_name)
228{
229 struct kobject *found;
230
231 found = kset_find_obj(&devices_subsys.kset, kobj_name);
232 if (!found)
233 return NULL;
234
235 return to_vio_dev(container_of(found, struct device, kobj));
236}
237
238/**
239 * vio_find_node - find an already-registered vio_dev
240 * @vnode: device_node of the virtual device we're looking for
241 */
242struct vio_dev *vio_find_node(struct device_node *vnode)
243{
244 uint32_t *unit_address;
245 char kobj_name[BUS_ID_SIZE];
246
247 /* construct the kobject name from the device node */
248 unit_address = (uint32_t *)get_property(vnode, "reg", NULL);
249 if (!unit_address)
250 return NULL;
251 snprintf(kobj_name, BUS_ID_SIZE, "%x", *unit_address);
252
253 return vio_find_name(kobj_name);
254}
255EXPORT_SYMBOL(vio_find_node);
256
257int vio_enable_interrupts(struct vio_dev *dev)
258{
259 int rc = h_vio_signal(dev->unit_address, VIO_IRQ_ENABLE);
260 if (rc != H_Success)
261 printk(KERN_ERR "vio: Error 0x%x enabling interrupts\n", rc);
262 return rc;
263}
264EXPORT_SYMBOL(vio_enable_interrupts);
265
266int vio_disable_interrupts(struct vio_dev *dev)
267{
268 int rc = h_vio_signal(dev->unit_address, VIO_IRQ_DISABLE);
269 if (rc != H_Success)
270 printk(KERN_ERR "vio: Error 0x%x disabling interrupts\n", rc);
271 return rc;
272}
273EXPORT_SYMBOL(vio_disable_interrupts);
diff --git a/arch/ppc64/kernel/pci.c b/arch/ppc64/kernel/pci.c
index ff4be1da69d5..3d2106b022a1 100644
--- a/arch/ppc64/kernel/pci.c
+++ b/arch/ppc64/kernel/pci.c
@@ -31,8 +31,7 @@
31#include <asm/irq.h> 31#include <asm/irq.h>
32#include <asm/machdep.h> 32#include <asm/machdep.h>
33#include <asm/udbg.h> 33#include <asm/udbg.h>
34 34#include <asm/ppc-pci.h>
35#include "pci.h"
36 35
37#ifdef DEBUG 36#ifdef DEBUG
38#define DBG(fmt...) udbg_printf(fmt) 37#define DBG(fmt...) udbg_printf(fmt)
@@ -727,16 +726,17 @@ static pgprot_t __pci_mmap_set_pgprot(struct pci_dev *dev, struct resource *rp,
727 * above routine 726 * above routine
728 */ 727 */
729pgprot_t pci_phys_mem_access_prot(struct file *file, 728pgprot_t pci_phys_mem_access_prot(struct file *file,
730 unsigned long offset, 729 unsigned long pfn,
731 unsigned long size, 730 unsigned long size,
732 pgprot_t protection) 731 pgprot_t protection)
733{ 732{
734 struct pci_dev *pdev = NULL; 733 struct pci_dev *pdev = NULL;
735 struct resource *found = NULL; 734 struct resource *found = NULL;
736 unsigned long prot = pgprot_val(protection); 735 unsigned long prot = pgprot_val(protection);
736 unsigned long offset = pfn << PAGE_SHIFT;
737 int i; 737 int i;
738 738
739 if (page_is_ram(offset >> PAGE_SHIFT)) 739 if (page_is_ram(pfn))
740 return __pgprot(prot); 740 return __pgprot(prot);
741 741
742 prot |= _PAGE_NO_CACHE | _PAGE_GUARDED; 742 prot |= _PAGE_NO_CACHE | _PAGE_GUARDED;
@@ -881,9 +881,9 @@ static void __devinit pci_process_ISA_OF_ranges(struct device_node *isa_node,
881} 881}
882 882
883void __devinit pci_process_bridge_OF_ranges(struct pci_controller *hose, 883void __devinit pci_process_bridge_OF_ranges(struct pci_controller *hose,
884 struct device_node *dev) 884 struct device_node *dev, int prim)
885{ 885{
886 unsigned int *ranges; 886 unsigned int *ranges, pci_space;
887 unsigned long size; 887 unsigned long size;
888 int rlen = 0; 888 int rlen = 0;
889 int memno = 0; 889 int memno = 0;
@@ -906,16 +906,39 @@ void __devinit pci_process_bridge_OF_ranges(struct pci_controller *hose,
906 ranges = (unsigned int *) get_property(dev, "ranges", &rlen); 906 ranges = (unsigned int *) get_property(dev, "ranges", &rlen);
907 while ((rlen -= np * sizeof(unsigned int)) >= 0) { 907 while ((rlen -= np * sizeof(unsigned int)) >= 0) {
908 res = NULL; 908 res = NULL;
909 pci_addr = (unsigned long)ranges[1] << 32 | ranges[2]; 909 pci_space = ranges[0];
910 pci_addr = ((unsigned long)ranges[1] << 32) | ranges[2];
910 911
911 cpu_phys_addr = ranges[3]; 912 cpu_phys_addr = ranges[3];
912 if (na == 2) 913 if (na >= 2)
913 cpu_phys_addr = cpu_phys_addr << 32 | ranges[4]; 914 cpu_phys_addr = (cpu_phys_addr << 32) | ranges[4];
914 915
915 size = (unsigned long)ranges[na+3] << 32 | ranges[na+4]; 916 size = ((unsigned long)ranges[na+3] << 32) | ranges[na+4];
917 ranges += np;
916 if (size == 0) 918 if (size == 0)
917 continue; 919 continue;
918 switch ((ranges[0] >> 24) & 0x3) { 920
921 /* Now consume following elements while they are contiguous */
922 while (rlen >= np * sizeof(unsigned int)) {
923 unsigned long addr, phys;
924
925 if (ranges[0] != pci_space)
926 break;
927 addr = ((unsigned long)ranges[1] << 32) | ranges[2];
928 phys = ranges[3];
929 if (na >= 2)
930 phys = (phys << 32) | ranges[4];
931 if (addr != pci_addr + size ||
932 phys != cpu_phys_addr + size)
933 break;
934
935 size += ((unsigned long)ranges[na+3] << 32)
936 | ranges[na+4];
937 ranges += np;
938 rlen -= np * sizeof(unsigned int);
939 }
940
941 switch ((pci_space >> 24) & 0x3) {
919 case 1: /* I/O space */ 942 case 1: /* I/O space */
920 hose->io_base_phys = cpu_phys_addr; 943 hose->io_base_phys = cpu_phys_addr;
921 hose->pci_io_size = size; 944 hose->pci_io_size = size;
@@ -949,7 +972,6 @@ void __devinit pci_process_bridge_OF_ranges(struct pci_controller *hose,
949 res->sibling = NULL; 972 res->sibling = NULL;
950 res->child = NULL; 973 res->child = NULL;
951 } 974 }
952 ranges += np;
953 } 975 }
954} 976}
955 977
diff --git a/arch/ppc64/kernel/pci.h b/arch/ppc64/kernel/pci.h
deleted file mode 100644
index 5eb2cc320566..000000000000
--- a/arch/ppc64/kernel/pci.h
+++ /dev/null
@@ -1,54 +0,0 @@
1/*
2 * c 2001 PPC 64 Team, IBM Corp
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 */
9#ifndef __PPC_KERNEL_PCI_H__
10#define __PPC_KERNEL_PCI_H__
11
12#include <linux/pci.h>
13#include <asm/pci-bridge.h>
14
15extern unsigned long isa_io_base;
16
17extern void pci_setup_pci_controller(struct pci_controller *hose);
18extern void pci_setup_phb_io(struct pci_controller *hose, int primary);
19extern void pci_setup_phb_io_dynamic(struct pci_controller *hose, int primary);
20
21
22extern struct list_head hose_list;
23extern int global_phb_number;
24
25extern unsigned long find_and_init_phbs(void);
26
27extern struct pci_dev *ppc64_isabridge_dev; /* may be NULL if no ISA bus */
28
29/* PCI device_node operations */
30struct device_node;
31typedef void *(*traverse_func)(struct device_node *me, void *data);
32void *traverse_pci_devices(struct device_node *start, traverse_func pre,
33 void *data);
34
35void pci_devs_phb_init(void);
36void pci_devs_phb_init_dynamic(struct pci_controller *phb);
37
38/* PCI address cache management routines */
39void pci_addr_cache_insert_device(struct pci_dev *dev);
40void pci_addr_cache_remove_device(struct pci_dev *dev);
41
42/* From rtas_pci.h */
43void init_pci_config_tokens (void);
44unsigned long get_phb_buid (struct device_node *);
45
46/* From pSeries_pci.h */
47extern void pSeries_final_fixup(void);
48extern void pSeries_irq_bus_setup(struct pci_bus *bus);
49
50extern unsigned long pci_probe_only;
51extern unsigned long pci_assign_all_buses;
52extern int pci_read_irq_line(struct pci_dev *pci_dev);
53
54#endif /* __PPC_KERNEL_PCI_H__ */
diff --git a/arch/ppc64/kernel/pci_direct_iommu.c b/arch/ppc64/kernel/pci_direct_iommu.c
index 54055c81017a..e1a32f802c0b 100644
--- a/arch/ppc64/kernel/pci_direct_iommu.c
+++ b/arch/ppc64/kernel/pci_direct_iommu.c
@@ -27,8 +27,7 @@
27#include <asm/machdep.h> 27#include <asm/machdep.h>
28#include <asm/pmac_feature.h> 28#include <asm/pmac_feature.h>
29#include <asm/abs_addr.h> 29#include <asm/abs_addr.h>
30 30#include <asm/ppc-pci.h>
31#include "pci.h"
32 31
33static void *pci_direct_alloc_coherent(struct device *hwdev, size_t size, 32static void *pci_direct_alloc_coherent(struct device *hwdev, size_t size,
34 dma_addr_t *dma_handle, gfp_t flag) 33 dma_addr_t *dma_handle, gfp_t flag)
diff --git a/arch/ppc64/kernel/pci_dn.c b/arch/ppc64/kernel/pci_dn.c
index a86389d07d57..493bbe43f5b4 100644
--- a/arch/ppc64/kernel/pci_dn.c
+++ b/arch/ppc64/kernel/pci_dn.c
@@ -30,8 +30,7 @@
30#include <asm/prom.h> 30#include <asm/prom.h>
31#include <asm/pci-bridge.h> 31#include <asm/pci-bridge.h>
32#include <asm/pSeries_reconfig.h> 32#include <asm/pSeries_reconfig.h>
33 33#include <asm/ppc-pci.h>
34#include "pci.h"
35 34
36/* 35/*
37 * Traverse_func that inits the PCI fields of the device node. 36 * Traverse_func that inits the PCI fields of the device node.
diff --git a/arch/ppc64/kernel/pci_iommu.c b/arch/ppc64/kernel/pci_iommu.c
index d9e33b7d4203..bdf15dbbf4f0 100644
--- a/arch/ppc64/kernel/pci_iommu.c
+++ b/arch/ppc64/kernel/pci_iommu.c
@@ -1,8 +1,8 @@
1/* 1/*
2 * arch/ppc64/kernel/pci_iommu.c 2 * arch/ppc64/kernel/pci_iommu.c
3 * Copyright (C) 2001 Mike Corrigan & Dave Engebretsen, IBM Corporation 3 * Copyright (C) 2001 Mike Corrigan & Dave Engebretsen, IBM Corporation
4 * 4 *
5 * Rewrite, cleanup, new allocation schemes: 5 * Rewrite, cleanup, new allocation schemes:
6 * Copyright (C) 2004 Olof Johansson, IBM Corporation 6 * Copyright (C) 2004 Olof Johansson, IBM Corporation
7 * 7 *
8 * Dynamic DMA mapping support, platform-independent parts. 8 * Dynamic DMA mapping support, platform-independent parts.
@@ -11,19 +11,18 @@
11 * it under the terms of the GNU General Public License as published by 11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or 12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version. 13 * (at your option) any later version.
14 * 14 *
15 * This program is distributed in the hope that it will be useful, 15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of 16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details. 18 * GNU General Public License for more details.
19 * 19 *
20 * You should have received a copy of the GNU General Public License 20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software 21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 */ 23 */
24 24
25 25
26#include <linux/config.h>
27#include <linux/init.h> 26#include <linux/init.h>
28#include <linux/types.h> 27#include <linux/types.h>
29#include <linux/slab.h> 28#include <linux/slab.h>
@@ -37,11 +36,7 @@
37#include <asm/iommu.h> 36#include <asm/iommu.h>
38#include <asm/pci-bridge.h> 37#include <asm/pci-bridge.h>
39#include <asm/machdep.h> 38#include <asm/machdep.h>
40#include "pci.h" 39#include <asm/ppc-pci.h>
41
42#ifdef CONFIG_PPC_ISERIES
43#include <asm/iSeries/iSeries_pci.h>
44#endif /* CONFIG_PPC_ISERIES */
45 40
46/* 41/*
47 * We can use ->sysdata directly and avoid the extra work in 42 * We can use ->sysdata directly and avoid the extra work in
@@ -61,13 +56,7 @@ static inline struct iommu_table *devnode_table(struct device *dev)
61 } else 56 } else
62 pdev = to_pci_dev(dev); 57 pdev = to_pci_dev(dev);
63 58
64#ifdef CONFIG_PPC_ISERIES
65 return ISERIES_DEVNODE(pdev)->iommu_table;
66#endif /* CONFIG_PPC_ISERIES */
67
68#ifdef CONFIG_PPC_MULTIPLATFORM
69 return PCI_DN(PCI_GET_DN(pdev))->iommu_table; 59 return PCI_DN(PCI_GET_DN(pdev))->iommu_table;
70#endif /* CONFIG_PPC_MULTIPLATFORM */
71} 60}
72 61
73 62
diff --git a/arch/ppc64/kernel/pmac.h b/arch/ppc64/kernel/pmac.h
deleted file mode 100644
index 40e1c5030f74..000000000000
--- a/arch/ppc64/kernel/pmac.h
+++ /dev/null
@@ -1,31 +0,0 @@
1#ifndef __PMAC_H__
2#define __PMAC_H__
3
4#include <linux/pci.h>
5#include <linux/ide.h>
6
7/*
8 * Declaration for the various functions exported by the
9 * pmac_* files. Mostly for use by pmac_setup
10 */
11
12extern void pmac_get_boot_time(struct rtc_time *tm);
13extern void pmac_get_rtc_time(struct rtc_time *tm);
14extern int pmac_set_rtc_time(struct rtc_time *tm);
15extern void pmac_read_rtc_time(void);
16extern void pmac_calibrate_decr(void);
17
18extern void pmac_pcibios_fixup(void);
19extern void pmac_pci_init(void);
20extern void pmac_setup_pci_dma(void);
21extern void pmac_check_ht_link(void);
22
23extern void pmac_setup_smp(void);
24
25extern unsigned long pmac_ide_get_base(int index);
26extern void pmac_ide_init_hwif_ports(hw_regs_t *hw,
27 unsigned long data_port, unsigned long ctrl_port, int *irq);
28
29extern void pmac_nvram_init(void);
30
31#endif /* __PMAC_H__ */
diff --git a/arch/ppc64/kernel/pmac_feature.c b/arch/ppc64/kernel/pmac_feature.c
deleted file mode 100644
index eb4e6c3f694d..000000000000
--- a/arch/ppc64/kernel/pmac_feature.c
+++ /dev/null
@@ -1,767 +0,0 @@
1/*
2 * arch/ppc/platforms/pmac_feature.c
3 *
4 * Copyright (C) 1996-2001 Paul Mackerras (paulus@cs.anu.edu.au)
5 * Ben. Herrenschmidt (benh@kernel.crashing.org)
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
11 *
12 * TODO:
13 *
14 * - Replace mdelay with some schedule loop if possible
15 * - Shorten some obfuscated delays on some routines (like modem
16 * power)
17 * - Refcount some clocks (see darwin)
18 * - Split split split...
19 *
20 */
21#include <linux/config.h>
22#include <linux/types.h>
23#include <linux/init.h>
24#include <linux/delay.h>
25#include <linux/kernel.h>
26#include <linux/sched.h>
27#include <linux/spinlock.h>
28#include <linux/adb.h>
29#include <linux/pmu.h>
30#include <linux/ioport.h>
31#include <linux/pci.h>
32#include <asm/sections.h>
33#include <asm/errno.h>
34#include <asm/keylargo.h>
35#include <asm/uninorth.h>
36#include <asm/io.h>
37#include <asm/prom.h>
38#include <asm/machdep.h>
39#include <asm/pmac_feature.h>
40#include <asm/dbdma.h>
41#include <asm/pci-bridge.h>
42#include <asm/pmac_low_i2c.h>
43
44#undef DEBUG_FEATURE
45
46#ifdef DEBUG_FEATURE
47#define DBG(fmt...) printk(KERN_DEBUG fmt)
48#else
49#define DBG(fmt...)
50#endif
51
52/*
53 * We use a single global lock to protect accesses. Each driver has
54 * to take care of its own locking
55 */
56static DEFINE_SPINLOCK(feature_lock __pmacdata);
57
58#define LOCK(flags) spin_lock_irqsave(&feature_lock, flags);
59#define UNLOCK(flags) spin_unlock_irqrestore(&feature_lock, flags);
60
61
62/*
63 * Instance of some macio stuffs
64 */
65struct macio_chip macio_chips[MAX_MACIO_CHIPS] __pmacdata;
66
67struct macio_chip* __pmac macio_find(struct device_node* child, int type)
68{
69 while(child) {
70 int i;
71
72 for (i=0; i < MAX_MACIO_CHIPS && macio_chips[i].of_node; i++)
73 if (child == macio_chips[i].of_node &&
74 (!type || macio_chips[i].type == type))
75 return &macio_chips[i];
76 child = child->parent;
77 }
78 return NULL;
79}
80EXPORT_SYMBOL_GPL(macio_find);
81
82static const char* macio_names[] __pmacdata =
83{
84 "Unknown",
85 "Grand Central",
86 "OHare",
87 "OHareII",
88 "Heathrow",
89 "Gatwick",
90 "Paddington",
91 "Keylargo",
92 "Pangea",
93 "Intrepid",
94 "K2"
95};
96
97
98
99/*
100 * Uninorth reg. access. Note that Uni-N regs are big endian
101 */
102
103#define UN_REG(r) (uninorth_base + ((r) >> 2))
104#define UN_IN(r) (in_be32(UN_REG(r)))
105#define UN_OUT(r,v) (out_be32(UN_REG(r), (v)))
106#define UN_BIS(r,v) (UN_OUT((r), UN_IN(r) | (v)))
107#define UN_BIC(r,v) (UN_OUT((r), UN_IN(r) & ~(v)))
108
109static struct device_node* uninorth_node __pmacdata;
110static u32* uninorth_base __pmacdata;
111static u32 uninorth_rev __pmacdata;
112static void *u3_ht;
113
114extern struct device_node *k2_skiplist[2];
115
116/*
117 * For each motherboard family, we have a table of functions pointers
118 * that handle the various features.
119 */
120
121typedef long (*feature_call)(struct device_node* node, long param, long value);
122
123struct feature_table_entry {
124 unsigned int selector;
125 feature_call function;
126};
127
128struct pmac_mb_def
129{
130 const char* model_string;
131 const char* model_name;
132 int model_id;
133 struct feature_table_entry* features;
134 unsigned long board_flags;
135};
136static struct pmac_mb_def pmac_mb __pmacdata;
137
138/*
139 * Here are the chip specific feature functions
140 */
141
142
143static long __pmac g5_read_gpio(struct device_node* node, long param, long value)
144{
145 struct macio_chip* macio = &macio_chips[0];
146
147 return MACIO_IN8(param);
148}
149
150
151static long __pmac g5_write_gpio(struct device_node* node, long param, long value)
152{
153 struct macio_chip* macio = &macio_chips[0];
154
155 MACIO_OUT8(param, (u8)(value & 0xff));
156 return 0;
157}
158
159static long __pmac g5_gmac_enable(struct device_node* node, long param, long value)
160{
161 struct macio_chip* macio = &macio_chips[0];
162 unsigned long flags;
163
164 if (node == NULL)
165 return -ENODEV;
166
167 LOCK(flags);
168 if (value) {
169 MACIO_BIS(KEYLARGO_FCR1, K2_FCR1_GMAC_CLK_ENABLE);
170 mb();
171 k2_skiplist[0] = NULL;
172 } else {
173 k2_skiplist[0] = node;
174 mb();
175 MACIO_BIC(KEYLARGO_FCR1, K2_FCR1_GMAC_CLK_ENABLE);
176 }
177
178 UNLOCK(flags);
179 mdelay(1);
180
181 return 0;
182}
183
184static long __pmac g5_fw_enable(struct device_node* node, long param, long value)
185{
186 struct macio_chip* macio = &macio_chips[0];
187 unsigned long flags;
188
189 if (node == NULL)
190 return -ENODEV;
191
192 LOCK(flags);
193 if (value) {
194 MACIO_BIS(KEYLARGO_FCR1, K2_FCR1_FW_CLK_ENABLE);
195 mb();
196 k2_skiplist[1] = NULL;
197 } else {
198 k2_skiplist[1] = node;
199 mb();
200 MACIO_BIC(KEYLARGO_FCR1, K2_FCR1_FW_CLK_ENABLE);
201 }
202
203 UNLOCK(flags);
204 mdelay(1);
205
206 return 0;
207}
208
209static long __pmac g5_mpic_enable(struct device_node* node, long param, long value)
210{
211 unsigned long flags;
212
213 if (node->parent == NULL || strcmp(node->parent->name, "u3"))
214 return 0;
215
216 LOCK(flags);
217 UN_BIS(U3_TOGGLE_REG, U3_MPIC_RESET | U3_MPIC_OUTPUT_ENABLE);
218 UNLOCK(flags);
219
220 return 0;
221}
222
223static long __pmac g5_eth_phy_reset(struct device_node* node, long param, long value)
224{
225 struct macio_chip* macio = &macio_chips[0];
226 struct device_node *phy;
227 int need_reset;
228
229 /*
230 * We must not reset the combo PHYs, only the BCM5221 found in
231 * the iMac G5.
232 */
233 phy = of_get_next_child(node, NULL);
234 if (!phy)
235 return -ENODEV;
236 need_reset = device_is_compatible(phy, "B5221");
237 of_node_put(phy);
238 if (!need_reset)
239 return 0;
240
241 /* PHY reset is GPIO 29, not in device-tree unfortunately */
242 MACIO_OUT8(K2_GPIO_EXTINT_0 + 29,
243 KEYLARGO_GPIO_OUTPUT_ENABLE | KEYLARGO_GPIO_OUTOUT_DATA);
244 /* Thankfully, this is now always called at a time when we can
245 * schedule by sungem.
246 */
247 msleep(10);
248 MACIO_OUT8(K2_GPIO_EXTINT_0 + 29, 0);
249
250 return 0;
251}
252
253static long __pmac g5_i2s_enable(struct device_node *node, long param, long value)
254{
255 /* Very crude implementation for now */
256 struct macio_chip* macio = &macio_chips[0];
257 unsigned long flags;
258
259 if (value == 0)
260 return 0; /* don't disable yet */
261
262 LOCK(flags);
263 MACIO_BIS(KEYLARGO_FCR3, KL3_CLK45_ENABLE | KL3_CLK49_ENABLE |
264 KL3_I2S0_CLK18_ENABLE);
265 udelay(10);
266 MACIO_BIS(KEYLARGO_FCR1, K2_FCR1_I2S0_CELL_ENABLE |
267 K2_FCR1_I2S0_CLK_ENABLE_BIT | K2_FCR1_I2S0_ENABLE);
268 udelay(10);
269 MACIO_BIC(KEYLARGO_FCR1, K2_FCR1_I2S0_RESET);
270 UNLOCK(flags);
271 udelay(10);
272
273 return 0;
274}
275
276
277#ifdef CONFIG_SMP
278static long __pmac g5_reset_cpu(struct device_node* node, long param, long value)
279{
280 unsigned int reset_io = 0;
281 unsigned long flags;
282 struct macio_chip* macio;
283 struct device_node* np;
284
285 macio = &macio_chips[0];
286 if (macio->type != macio_keylargo2)
287 return -ENODEV;
288
289 np = find_path_device("/cpus");
290 if (np == NULL)
291 return -ENODEV;
292 for (np = np->child; np != NULL; np = np->sibling) {
293 u32* num = (u32 *)get_property(np, "reg", NULL);
294 u32* rst = (u32 *)get_property(np, "soft-reset", NULL);
295 if (num == NULL || rst == NULL)
296 continue;
297 if (param == *num) {
298 reset_io = *rst;
299 break;
300 }
301 }
302 if (np == NULL || reset_io == 0)
303 return -ENODEV;
304
305 LOCK(flags);
306 MACIO_OUT8(reset_io, KEYLARGO_GPIO_OUTPUT_ENABLE);
307 (void)MACIO_IN8(reset_io);
308 udelay(1);
309 MACIO_OUT8(reset_io, 0);
310 (void)MACIO_IN8(reset_io);
311 UNLOCK(flags);
312
313 return 0;
314}
315#endif /* CONFIG_SMP */
316
317/*
318 * This can be called from pmac_smp so isn't static
319 *
320 * This takes the second CPU off the bus on dual CPU machines
321 * running UP
322 */
323void __pmac g5_phy_disable_cpu1(void)
324{
325 UN_OUT(U3_API_PHY_CONFIG_1, 0);
326}
327
328static long __pmac generic_get_mb_info(struct device_node* node, long param, long value)
329{
330 switch(param) {
331 case PMAC_MB_INFO_MODEL:
332 return pmac_mb.model_id;
333 case PMAC_MB_INFO_FLAGS:
334 return pmac_mb.board_flags;
335 case PMAC_MB_INFO_NAME:
336 /* hack hack hack... but should work */
337 *((const char **)value) = pmac_mb.model_name;
338 return 0;
339 }
340 return -EINVAL;
341}
342
343
344/*
345 * Table definitions
346 */
347
348/* Used on any machine
349 */
350static struct feature_table_entry any_features[] __pmacdata = {
351 { PMAC_FTR_GET_MB_INFO, generic_get_mb_info },
352 { 0, NULL }
353};
354
355/* G5 features
356 */
357static struct feature_table_entry g5_features[] __pmacdata = {
358 { PMAC_FTR_GMAC_ENABLE, g5_gmac_enable },
359 { PMAC_FTR_1394_ENABLE, g5_fw_enable },
360 { PMAC_FTR_ENABLE_MPIC, g5_mpic_enable },
361 { PMAC_FTR_READ_GPIO, g5_read_gpio },
362 { PMAC_FTR_WRITE_GPIO, g5_write_gpio },
363 { PMAC_FTR_GMAC_PHY_RESET, g5_eth_phy_reset },
364 { PMAC_FTR_SOUND_CHIP_ENABLE, g5_i2s_enable },
365#ifdef CONFIG_SMP
366 { PMAC_FTR_RESET_CPU, g5_reset_cpu },
367#endif /* CONFIG_SMP */
368 { 0, NULL }
369};
370
371static struct pmac_mb_def pmac_mb_defs[] __pmacdata = {
372 { "PowerMac7,2", "PowerMac G5",
373 PMAC_TYPE_POWERMAC_G5, g5_features,
374 0,
375 },
376 { "PowerMac7,3", "PowerMac G5",
377 PMAC_TYPE_POWERMAC_G5, g5_features,
378 0,
379 },
380 { "PowerMac8,1", "iMac G5",
381 PMAC_TYPE_IMAC_G5, g5_features,
382 0,
383 },
384 { "PowerMac9,1", "PowerMac G5",
385 PMAC_TYPE_POWERMAC_G5_U3L, g5_features,
386 0,
387 },
388 { "RackMac3,1", "XServe G5",
389 PMAC_TYPE_XSERVE_G5, g5_features,
390 0,
391 },
392};
393
394/*
395 * The toplevel feature_call callback
396 */
397long __pmac pmac_do_feature_call(unsigned int selector, ...)
398{
399 struct device_node* node;
400 long param, value;
401 int i;
402 feature_call func = NULL;
403 va_list args;
404
405 if (pmac_mb.features)
406 for (i=0; pmac_mb.features[i].function; i++)
407 if (pmac_mb.features[i].selector == selector) {
408 func = pmac_mb.features[i].function;
409 break;
410 }
411 if (!func)
412 for (i=0; any_features[i].function; i++)
413 if (any_features[i].selector == selector) {
414 func = any_features[i].function;
415 break;
416 }
417 if (!func)
418 return -ENODEV;
419
420 va_start(args, selector);
421 node = (struct device_node*)va_arg(args, void*);
422 param = va_arg(args, long);
423 value = va_arg(args, long);
424 va_end(args);
425
426 return func(node, param, value);
427}
428
429static int __init probe_motherboard(void)
430{
431 int i;
432 struct macio_chip* macio = &macio_chips[0];
433 const char* model = NULL;
434 struct device_node *dt;
435
436 /* Lookup known motherboard type in device-tree. First try an
437 * exact match on the "model" property, then try a "compatible"
438 * match is none is found.
439 */
440 dt = find_devices("device-tree");
441 if (dt != NULL)
442 model = (const char *) get_property(dt, "model", NULL);
443 for(i=0; model && i<(sizeof(pmac_mb_defs)/sizeof(struct pmac_mb_def)); i++) {
444 if (strcmp(model, pmac_mb_defs[i].model_string) == 0) {
445 pmac_mb = pmac_mb_defs[i];
446 goto found;
447 }
448 }
449 for(i=0; i<(sizeof(pmac_mb_defs)/sizeof(struct pmac_mb_def)); i++) {
450 if (machine_is_compatible(pmac_mb_defs[i].model_string)) {
451 pmac_mb = pmac_mb_defs[i];
452 goto found;
453 }
454 }
455
456 /* Fallback to selection depending on mac-io chip type */
457 switch(macio->type) {
458 case macio_keylargo2:
459 pmac_mb.model_id = PMAC_TYPE_UNKNOWN_K2;
460 pmac_mb.model_name = "Unknown K2-based";
461 pmac_mb.features = g5_features;
462
463 default:
464 return -ENODEV;
465 }
466found:
467 /* Check for "mobile" machine */
468 if (model && (strncmp(model, "PowerBook", 9) == 0
469 || strncmp(model, "iBook", 5) == 0))
470 pmac_mb.board_flags |= PMAC_MB_MOBILE;
471
472
473 printk(KERN_INFO "PowerMac motherboard: %s\n", pmac_mb.model_name);
474 return 0;
475}
476
477/* Initialize the Core99 UniNorth host bridge and memory controller
478 */
479static void __init probe_uninorth(void)
480{
481 uninorth_node = of_find_node_by_name(NULL, "u3");
482 if (uninorth_node && uninorth_node->n_addrs > 0) {
483 /* Small hack until I figure out if parsing in prom.c is correct. I should
484 * get rid of those pre-parsed junk anyway
485 */
486 unsigned long address = uninorth_node->addrs[0].address;
487 uninorth_base = ioremap(address, 0x40000);
488 uninorth_rev = in_be32(UN_REG(UNI_N_VERSION));
489 u3_ht = ioremap(address + U3_HT_CONFIG_BASE, 0x1000);
490 } else
491 uninorth_node = NULL;
492
493 if (!uninorth_node)
494 return;
495
496 printk(KERN_INFO "Found U3 memory controller & host bridge, revision: %d\n",
497 uninorth_rev);
498 printk(KERN_INFO "Mapped at 0x%08lx\n", (unsigned long)uninorth_base);
499
500}
501
502static void __init probe_one_macio(const char* name, const char* compat, int type)
503{
504 struct device_node* node;
505 int i;
506 volatile u32* base;
507 u32* revp;
508
509 node = find_devices(name);
510 if (!node || !node->n_addrs)
511 return;
512 if (compat)
513 do {
514 if (device_is_compatible(node, compat))
515 break;
516 node = node->next;
517 } while (node);
518 if (!node)
519 return;
520 for(i=0; i<MAX_MACIO_CHIPS; i++) {
521 if (!macio_chips[i].of_node)
522 break;
523 if (macio_chips[i].of_node == node)
524 return;
525 }
526 if (i >= MAX_MACIO_CHIPS) {
527 printk(KERN_ERR "pmac_feature: Please increase MAX_MACIO_CHIPS !\n");
528 printk(KERN_ERR "pmac_feature: %s skipped\n", node->full_name);
529 return;
530 }
531 base = (volatile u32*)ioremap(node->addrs[0].address, node->addrs[0].size);
532 if (!base) {
533 printk(KERN_ERR "pmac_feature: Can't map mac-io chip !\n");
534 return;
535 }
536 if (type == macio_keylargo) {
537 u32* did = (u32 *)get_property(node, "device-id", NULL);
538 if (*did == 0x00000025)
539 type = macio_pangea;
540 if (*did == 0x0000003e)
541 type = macio_intrepid;
542 }
543 macio_chips[i].of_node = node;
544 macio_chips[i].type = type;
545 macio_chips[i].base = base;
546 macio_chips[i].flags = MACIO_FLAG_SCCB_ON | MACIO_FLAG_SCCB_ON;
547 macio_chips[i].name = macio_names[type];
548 revp = (u32 *)get_property(node, "revision-id", NULL);
549 if (revp)
550 macio_chips[i].rev = *revp;
551 printk(KERN_INFO "Found a %s mac-io controller, rev: %d, mapped at 0x%p\n",
552 macio_names[type], macio_chips[i].rev, macio_chips[i].base);
553}
554
555static int __init
556probe_macios(void)
557{
558 probe_one_macio("mac-io", "K2-Keylargo", macio_keylargo2);
559
560 macio_chips[0].lbus.index = 0;
561 macio_chips[1].lbus.index = 1;
562
563 return (macio_chips[0].of_node == NULL) ? -ENODEV : 0;
564}
565
566static void __init
567set_initial_features(void)
568{
569 struct device_node *np;
570
571 if (macio_chips[0].type == macio_keylargo2) {
572#ifndef CONFIG_SMP
573 /* On SMP machines running UP, we have the second CPU eating
574 * bus cycles. We need to take it off the bus. This is done
575 * from pmac_smp for SMP kernels running on one CPU
576 */
577 np = of_find_node_by_type(NULL, "cpu");
578 if (np != NULL)
579 np = of_find_node_by_type(np, "cpu");
580 if (np != NULL) {
581 g5_phy_disable_cpu1();
582 of_node_put(np);
583 }
584#endif /* CONFIG_SMP */
585 /* Enable GMAC for now for PCI probing. It will be disabled
586 * later on after PCI probe
587 */
588 np = of_find_node_by_name(NULL, "ethernet");
589 while(np) {
590 if (device_is_compatible(np, "K2-GMAC"))
591 g5_gmac_enable(np, 0, 1);
592 np = of_find_node_by_name(np, "ethernet");
593 }
594
595 /* Enable FW before PCI probe. Will be disabled later on
596 * Note: We should have a batter way to check that we are
597 * dealing with uninorth internal cell and not a PCI cell
598 * on the external PCI. The code below works though.
599 */
600 np = of_find_node_by_name(NULL, "firewire");
601 while(np) {
602 if (device_is_compatible(np, "pci106b,5811")) {
603 macio_chips[0].flags |= MACIO_FLAG_FW_SUPPORTED;
604 g5_fw_enable(np, 0, 1);
605 }
606 np = of_find_node_by_name(np, "firewire");
607 }
608 }
609}
610
611void __init
612pmac_feature_init(void)
613{
614 /* Detect the UniNorth memory controller */
615 probe_uninorth();
616
617 /* Probe mac-io controllers */
618 if (probe_macios()) {
619 printk(KERN_WARNING "No mac-io chip found\n");
620 return;
621 }
622
623 /* Setup low-level i2c stuffs */
624 pmac_init_low_i2c();
625
626 /* Probe machine type */
627 if (probe_motherboard())
628 printk(KERN_WARNING "Unknown PowerMac !\n");
629
630 /* Set some initial features (turn off some chips that will
631 * be later turned on)
632 */
633 set_initial_features();
634}
635
636int __init pmac_feature_late_init(void)
637{
638#if 0
639 struct device_node* np;
640
641 /* Request some resources late */
642 if (uninorth_node)
643 request_OF_resource(uninorth_node, 0, NULL);
644 np = find_devices("hammerhead");
645 if (np)
646 request_OF_resource(np, 0, NULL);
647 np = find_devices("interrupt-controller");
648 if (np)
649 request_OF_resource(np, 0, NULL);
650#endif
651 return 0;
652}
653
654device_initcall(pmac_feature_late_init);
655
656#if 0
657static void dump_HT_speeds(char *name, u32 cfg, u32 frq)
658{
659 int freqs[16] = { 200,300,400,500,600,800,1000,0,0,0,0,0,0,0,0,0 };
660 int bits[8] = { 8,16,0,32,2,4,0,0 };
661 int freq = (frq >> 8) & 0xf;
662
663 if (freqs[freq] == 0)
664 printk("%s: Unknown HT link frequency %x\n", name, freq);
665 else
666 printk("%s: %d MHz on main link, (%d in / %d out) bits width\n",
667 name, freqs[freq],
668 bits[(cfg >> 28) & 0x7], bits[(cfg >> 24) & 0x7]);
669}
670#endif
671
672void __init pmac_check_ht_link(void)
673{
674#if 0 /* Disabled for now */
675 u32 ufreq, freq, ucfg, cfg;
676 struct device_node *pcix_node;
677 struct pci_dn *pdn;
678 u8 px_bus, px_devfn;
679 struct pci_controller *px_hose;
680
681 (void)in_be32(u3_ht + U3_HT_LINK_COMMAND);
682 ucfg = cfg = in_be32(u3_ht + U3_HT_LINK_CONFIG);
683 ufreq = freq = in_be32(u3_ht + U3_HT_LINK_FREQ);
684 dump_HT_speeds("U3 HyperTransport", cfg, freq);
685
686 pcix_node = of_find_compatible_node(NULL, "pci", "pci-x");
687 if (pcix_node == NULL) {
688 printk("No PCI-X bridge found\n");
689 return;
690 }
691 pdn = pcix_node->data;
692 px_hose = pdn->phb;
693 px_bus = pdn->busno;
694 px_devfn = pdn->devfn;
695
696 early_read_config_dword(px_hose, px_bus, px_devfn, 0xc4, &cfg);
697 early_read_config_dword(px_hose, px_bus, px_devfn, 0xcc, &freq);
698 dump_HT_speeds("PCI-X HT Uplink", cfg, freq);
699 early_read_config_dword(px_hose, px_bus, px_devfn, 0xc8, &cfg);
700 early_read_config_dword(px_hose, px_bus, px_devfn, 0xd0, &freq);
701 dump_HT_speeds("PCI-X HT Downlink", cfg, freq);
702#endif
703}
704
705/*
706 * Early video resume hook
707 */
708
709static void (*pmac_early_vresume_proc)(void *data) __pmacdata;
710static void *pmac_early_vresume_data __pmacdata;
711
712void pmac_set_early_video_resume(void (*proc)(void *data), void *data)
713{
714 if (_machine != _MACH_Pmac)
715 return;
716 preempt_disable();
717 pmac_early_vresume_proc = proc;
718 pmac_early_vresume_data = data;
719 preempt_enable();
720}
721EXPORT_SYMBOL(pmac_set_early_video_resume);
722
723
724/*
725 * AGP related suspend/resume code
726 */
727
728static struct pci_dev *pmac_agp_bridge __pmacdata;
729static int (*pmac_agp_suspend)(struct pci_dev *bridge) __pmacdata;
730static int (*pmac_agp_resume)(struct pci_dev *bridge) __pmacdata;
731
732void __pmac pmac_register_agp_pm(struct pci_dev *bridge,
733 int (*suspend)(struct pci_dev *bridge),
734 int (*resume)(struct pci_dev *bridge))
735{
736 if (suspend || resume) {
737 pmac_agp_bridge = bridge;
738 pmac_agp_suspend = suspend;
739 pmac_agp_resume = resume;
740 return;
741 }
742 if (bridge != pmac_agp_bridge)
743 return;
744 pmac_agp_suspend = pmac_agp_resume = NULL;
745 return;
746}
747EXPORT_SYMBOL(pmac_register_agp_pm);
748
749void __pmac pmac_suspend_agp_for_card(struct pci_dev *dev)
750{
751 if (pmac_agp_bridge == NULL || pmac_agp_suspend == NULL)
752 return;
753 if (pmac_agp_bridge->bus != dev->bus)
754 return;
755 pmac_agp_suspend(pmac_agp_bridge);
756}
757EXPORT_SYMBOL(pmac_suspend_agp_for_card);
758
759void __pmac pmac_resume_agp_for_card(struct pci_dev *dev)
760{
761 if (pmac_agp_bridge == NULL || pmac_agp_resume == NULL)
762 return;
763 if (pmac_agp_bridge->bus != dev->bus)
764 return;
765 pmac_agp_resume(pmac_agp_bridge);
766}
767EXPORT_SYMBOL(pmac_resume_agp_for_card);
diff --git a/arch/ppc64/kernel/pmac_low_i2c.c b/arch/ppc64/kernel/pmac_low_i2c.c
deleted file mode 100644
index f3f39e8e337a..000000000000
--- a/arch/ppc64/kernel/pmac_low_i2c.c
+++ /dev/null
@@ -1,523 +0,0 @@
1/*
2 * arch/ppc/platforms/pmac_low_i2c.c
3 *
4 * Copyright (C) 2003 Ben. Herrenschmidt (benh@kernel.crashing.org)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 *
11 * This file contains some low-level i2c access routines that
12 * need to be used by various bits of the PowerMac platform code
13 * at times where the real asynchronous & interrupt driven driver
14 * cannot be used. The API borrows some semantics from the darwin
15 * driver in order to ease the implementation of the platform
16 * properties parser
17 */
18
19#undef DEBUG
20
21#include <linux/config.h>
22#include <linux/types.h>
23#include <linux/sched.h>
24#include <linux/init.h>
25#include <linux/module.h>
26#include <linux/adb.h>
27#include <linux/pmu.h>
28#include <asm/keylargo.h>
29#include <asm/uninorth.h>
30#include <asm/io.h>
31#include <asm/prom.h>
32#include <asm/machdep.h>
33#include <asm/pmac_low_i2c.h>
34
35#define MAX_LOW_I2C_HOST 4
36
37#ifdef DEBUG
38#define DBG(x...) do {\
39 printk(KERN_DEBUG "KW:" x); \
40 } while(0)
41#else
42#define DBG(x...)
43#endif
44
45struct low_i2c_host;
46
47typedef int (*low_i2c_func_t)(struct low_i2c_host *host, u8 addr, u8 sub, u8 *data, int len);
48
49struct low_i2c_host
50{
51 struct device_node *np; /* OF device node */
52 struct semaphore mutex; /* Access mutex for use by i2c-keywest */
53 low_i2c_func_t func; /* Access function */
54 unsigned int is_open : 1; /* Poor man's access control */
55 int mode; /* Current mode */
56 int channel; /* Current channel */
57 int num_channels; /* Number of channels */
58 void __iomem *base; /* For keywest-i2c, base address */
59 int bsteps; /* And register stepping */
60 int speed; /* And speed */
61};
62
63static struct low_i2c_host low_i2c_hosts[MAX_LOW_I2C_HOST];
64
65/* No locking is necessary on allocation, we are running way before
66 * anything can race with us
67 */
68static struct low_i2c_host *find_low_i2c_host(struct device_node *np)
69{
70 int i;
71
72 for (i = 0; i < MAX_LOW_I2C_HOST; i++)
73 if (low_i2c_hosts[i].np == np)
74 return &low_i2c_hosts[i];
75 return NULL;
76}
77
78/*
79 *
80 * i2c-keywest implementation (UniNorth, U2, U3, Keylargo's)
81 *
82 */
83
84/*
85 * Keywest i2c definitions borrowed from drivers/i2c/i2c-keywest.h,
86 * should be moved somewhere in include/asm-ppc/
87 */
88/* Register indices */
89typedef enum {
90 reg_mode = 0,
91 reg_control,
92 reg_status,
93 reg_isr,
94 reg_ier,
95 reg_addr,
96 reg_subaddr,
97 reg_data
98} reg_t;
99
100
101/* Mode register */
102#define KW_I2C_MODE_100KHZ 0x00
103#define KW_I2C_MODE_50KHZ 0x01
104#define KW_I2C_MODE_25KHZ 0x02
105#define KW_I2C_MODE_DUMB 0x00
106#define KW_I2C_MODE_STANDARD 0x04
107#define KW_I2C_MODE_STANDARDSUB 0x08
108#define KW_I2C_MODE_COMBINED 0x0C
109#define KW_I2C_MODE_MODE_MASK 0x0C
110#define KW_I2C_MODE_CHAN_MASK 0xF0
111
112/* Control register */
113#define KW_I2C_CTL_AAK 0x01
114#define KW_I2C_CTL_XADDR 0x02
115#define KW_I2C_CTL_STOP 0x04
116#define KW_I2C_CTL_START 0x08
117
118/* Status register */
119#define KW_I2C_STAT_BUSY 0x01
120#define KW_I2C_STAT_LAST_AAK 0x02
121#define KW_I2C_STAT_LAST_RW 0x04
122#define KW_I2C_STAT_SDA 0x08
123#define KW_I2C_STAT_SCL 0x10
124
125/* IER & ISR registers */
126#define KW_I2C_IRQ_DATA 0x01
127#define KW_I2C_IRQ_ADDR 0x02
128#define KW_I2C_IRQ_STOP 0x04
129#define KW_I2C_IRQ_START 0x08
130#define KW_I2C_IRQ_MASK 0x0F
131
132/* State machine states */
133enum {
134 state_idle,
135 state_addr,
136 state_read,
137 state_write,
138 state_stop,
139 state_dead
140};
141
142#define WRONG_STATE(name) do {\
143 printk(KERN_DEBUG "KW: wrong state. Got %s, state: %s (isr: %02x)\n", \
144 name, __kw_state_names[state], isr); \
145 } while(0)
146
147static const char *__kw_state_names[] = {
148 "state_idle",
149 "state_addr",
150 "state_read",
151 "state_write",
152 "state_stop",
153 "state_dead"
154};
155
156static inline u8 __kw_read_reg(struct low_i2c_host *host, reg_t reg)
157{
158 return readb(host->base + (((unsigned int)reg) << host->bsteps));
159}
160
161static inline void __kw_write_reg(struct low_i2c_host *host, reg_t reg, u8 val)
162{
163 writeb(val, host->base + (((unsigned)reg) << host->bsteps));
164 (void)__kw_read_reg(host, reg_subaddr);
165}
166
167#define kw_write_reg(reg, val) __kw_write_reg(host, reg, val)
168#define kw_read_reg(reg) __kw_read_reg(host, reg)
169
170
171/* Don't schedule, the g5 fan controller is too
172 * timing sensitive
173 */
174static u8 kw_wait_interrupt(struct low_i2c_host* host)
175{
176 int i, j;
177 u8 isr;
178
179 for (i = 0; i < 100000; i++) {
180 isr = kw_read_reg(reg_isr) & KW_I2C_IRQ_MASK;
181 if (isr != 0)
182 return isr;
183
184 /* This code is used with the timebase frozen, we cannot rely
185 * on udelay ! For now, just use a bogus loop
186 */
187 for (j = 1; j < 10000; j++)
188 mb();
189 }
190 return isr;
191}
192
193static int kw_handle_interrupt(struct low_i2c_host *host, int state, int rw, int *rc, u8 **data, int *len, u8 isr)
194{
195 u8 ack;
196
197 DBG("kw_handle_interrupt(%s, isr: %x)\n", __kw_state_names[state], isr);
198
199 if (isr == 0) {
200 if (state != state_stop) {
201 DBG("KW: Timeout !\n");
202 *rc = -EIO;
203 goto stop;
204 }
205 if (state == state_stop) {
206 ack = kw_read_reg(reg_status);
207 if (!(ack & KW_I2C_STAT_BUSY)) {
208 state = state_idle;
209 kw_write_reg(reg_ier, 0x00);
210 }
211 }
212 return state;
213 }
214
215 if (isr & KW_I2C_IRQ_ADDR) {
216 ack = kw_read_reg(reg_status);
217 if (state != state_addr) {
218 kw_write_reg(reg_isr, KW_I2C_IRQ_ADDR);
219 WRONG_STATE("KW_I2C_IRQ_ADDR");
220 *rc = -EIO;
221 goto stop;
222 }
223 if ((ack & KW_I2C_STAT_LAST_AAK) == 0) {
224 *rc = -ENODEV;
225 DBG("KW: NAK on address\n");
226 return state_stop;
227 } else {
228 if (rw) {
229 state = state_read;
230 if (*len > 1)
231 kw_write_reg(reg_control, KW_I2C_CTL_AAK);
232 } else {
233 state = state_write;
234 kw_write_reg(reg_data, **data);
235 (*data)++; (*len)--;
236 }
237 }
238 kw_write_reg(reg_isr, KW_I2C_IRQ_ADDR);
239 }
240
241 if (isr & KW_I2C_IRQ_DATA) {
242 if (state == state_read) {
243 **data = kw_read_reg(reg_data);
244 (*data)++; (*len)--;
245 kw_write_reg(reg_isr, KW_I2C_IRQ_DATA);
246 if ((*len) == 0)
247 state = state_stop;
248 else if ((*len) == 1)
249 kw_write_reg(reg_control, 0);
250 } else if (state == state_write) {
251 ack = kw_read_reg(reg_status);
252 if ((ack & KW_I2C_STAT_LAST_AAK) == 0) {
253 DBG("KW: nack on data write\n");
254 *rc = -EIO;
255 goto stop;
256 } else if (*len) {
257 kw_write_reg(reg_data, **data);
258 (*data)++; (*len)--;
259 } else {
260 kw_write_reg(reg_control, KW_I2C_CTL_STOP);
261 state = state_stop;
262 *rc = 0;
263 }
264 kw_write_reg(reg_isr, KW_I2C_IRQ_DATA);
265 } else {
266 kw_write_reg(reg_isr, KW_I2C_IRQ_DATA);
267 WRONG_STATE("KW_I2C_IRQ_DATA");
268 if (state != state_stop) {
269 *rc = -EIO;
270 goto stop;
271 }
272 }
273 }
274
275 if (isr & KW_I2C_IRQ_STOP) {
276 kw_write_reg(reg_isr, KW_I2C_IRQ_STOP);
277 if (state != state_stop) {
278 WRONG_STATE("KW_I2C_IRQ_STOP");
279 *rc = -EIO;
280 }
281 return state_idle;
282 }
283
284 if (isr & KW_I2C_IRQ_START)
285 kw_write_reg(reg_isr, KW_I2C_IRQ_START);
286
287 return state;
288
289 stop:
290 kw_write_reg(reg_control, KW_I2C_CTL_STOP);
291 return state_stop;
292}
293
294static int keywest_low_i2c_func(struct low_i2c_host *host, u8 addr, u8 subaddr, u8 *data, int len)
295{
296 u8 mode_reg = host->speed;
297 int state = state_addr;
298 int rc = 0;
299
300 /* Setup mode & subaddress if any */
301 switch(host->mode) {
302 case pmac_low_i2c_mode_dumb:
303 printk(KERN_ERR "low_i2c: Dumb mode not supported !\n");
304 return -EINVAL;
305 case pmac_low_i2c_mode_std:
306 mode_reg |= KW_I2C_MODE_STANDARD;
307 break;
308 case pmac_low_i2c_mode_stdsub:
309 mode_reg |= KW_I2C_MODE_STANDARDSUB;
310 break;
311 case pmac_low_i2c_mode_combined:
312 mode_reg |= KW_I2C_MODE_COMBINED;
313 break;
314 }
315
316 /* Setup channel & clear pending irqs */
317 kw_write_reg(reg_isr, kw_read_reg(reg_isr));
318 kw_write_reg(reg_mode, mode_reg | (host->channel << 4));
319 kw_write_reg(reg_status, 0);
320
321 /* Set up address and r/w bit */
322 kw_write_reg(reg_addr, addr);
323
324 /* Set up the sub address */
325 if ((mode_reg & KW_I2C_MODE_MODE_MASK) == KW_I2C_MODE_STANDARDSUB
326 || (mode_reg & KW_I2C_MODE_MODE_MASK) == KW_I2C_MODE_COMBINED)
327 kw_write_reg(reg_subaddr, subaddr);
328
329 /* Start sending address & disable interrupt*/
330 kw_write_reg(reg_ier, 0 /*KW_I2C_IRQ_MASK*/);
331 kw_write_reg(reg_control, KW_I2C_CTL_XADDR);
332
333 /* State machine, to turn into an interrupt handler */
334 while(state != state_idle) {
335 u8 isr = kw_wait_interrupt(host);
336 state = kw_handle_interrupt(host, state, addr & 1, &rc, &data, &len, isr);
337 }
338
339 return rc;
340}
341
342static void keywest_low_i2c_add(struct device_node *np)
343{
344 struct low_i2c_host *host = find_low_i2c_host(NULL);
345 u32 *psteps, *prate, steps, aoffset = 0;
346 struct device_node *parent;
347
348 if (host == NULL) {
349 printk(KERN_ERR "low_i2c: Can't allocate host for %s\n",
350 np->full_name);
351 return;
352 }
353 memset(host, 0, sizeof(*host));
354
355 init_MUTEX(&host->mutex);
356 host->np = of_node_get(np);
357 psteps = (u32 *)get_property(np, "AAPL,address-step", NULL);
358 steps = psteps ? (*psteps) : 0x10;
359 for (host->bsteps = 0; (steps & 0x01) == 0; host->bsteps++)
360 steps >>= 1;
361 parent = of_get_parent(np);
362 host->num_channels = 1;
363 if (parent && parent->name[0] == 'u') {
364 host->num_channels = 2;
365 aoffset = 3;
366 }
367 /* Select interface rate */
368 host->speed = KW_I2C_MODE_100KHZ;
369 prate = (u32 *)get_property(np, "AAPL,i2c-rate", NULL);
370 if (prate) switch(*prate) {
371 case 100:
372 host->speed = KW_I2C_MODE_100KHZ;
373 break;
374 case 50:
375 host->speed = KW_I2C_MODE_50KHZ;
376 break;
377 case 25:
378 host->speed = KW_I2C_MODE_25KHZ;
379 break;
380 }
381
382 host->mode = pmac_low_i2c_mode_std;
383 host->base = ioremap(np->addrs[0].address + aoffset,
384 np->addrs[0].size);
385 host->func = keywest_low_i2c_func;
386}
387
388/*
389 *
390 * PMU implementation
391 *
392 */
393
394
395#ifdef CONFIG_ADB_PMU
396
397static int pmu_low_i2c_func(struct low_i2c_host *host, u8 addr, u8 sub, u8 *data, int len)
398{
399 // TODO
400 return -ENODEV;
401}
402
403static void pmu_low_i2c_add(struct device_node *np)
404{
405 struct low_i2c_host *host = find_low_i2c_host(NULL);
406
407 if (host == NULL) {
408 printk(KERN_ERR "low_i2c: Can't allocate host for %s\n",
409 np->full_name);
410 return;
411 }
412 memset(host, 0, sizeof(*host));
413
414 init_MUTEX(&host->mutex);
415 host->np = of_node_get(np);
416 host->num_channels = 3;
417 host->mode = pmac_low_i2c_mode_std;
418 host->func = pmu_low_i2c_func;
419}
420
421#endif /* CONFIG_ADB_PMU */
422
423void __init pmac_init_low_i2c(void)
424{
425 struct device_node *np;
426
427 /* Probe keywest-i2c busses */
428 np = of_find_compatible_node(NULL, "i2c", "keywest-i2c");
429 while(np) {
430 keywest_low_i2c_add(np);
431 np = of_find_compatible_node(np, "i2c", "keywest-i2c");
432 }
433
434#ifdef CONFIG_ADB_PMU
435 /* Probe PMU busses */
436 np = of_find_node_by_name(NULL, "via-pmu");
437 if (np)
438 pmu_low_i2c_add(np);
439#endif /* CONFIG_ADB_PMU */
440
441 /* TODO: Add CUDA support as well */
442}
443
444int pmac_low_i2c_lock(struct device_node *np)
445{
446 struct low_i2c_host *host = find_low_i2c_host(np);
447
448 if (!host)
449 return -ENODEV;
450 down(&host->mutex);
451 return 0;
452}
453EXPORT_SYMBOL(pmac_low_i2c_lock);
454
455int pmac_low_i2c_unlock(struct device_node *np)
456{
457 struct low_i2c_host *host = find_low_i2c_host(np);
458
459 if (!host)
460 return -ENODEV;
461 up(&host->mutex);
462 return 0;
463}
464EXPORT_SYMBOL(pmac_low_i2c_unlock);
465
466
467int pmac_low_i2c_open(struct device_node *np, int channel)
468{
469 struct low_i2c_host *host = find_low_i2c_host(np);
470
471 if (!host)
472 return -ENODEV;
473
474 if (channel >= host->num_channels)
475 return -EINVAL;
476
477 down(&host->mutex);
478 host->is_open = 1;
479 host->channel = channel;
480
481 return 0;
482}
483EXPORT_SYMBOL(pmac_low_i2c_open);
484
485int pmac_low_i2c_close(struct device_node *np)
486{
487 struct low_i2c_host *host = find_low_i2c_host(np);
488
489 if (!host)
490 return -ENODEV;
491
492 host->is_open = 0;
493 up(&host->mutex);
494
495 return 0;
496}
497EXPORT_SYMBOL(pmac_low_i2c_close);
498
499int pmac_low_i2c_setmode(struct device_node *np, int mode)
500{
501 struct low_i2c_host *host = find_low_i2c_host(np);
502
503 if (!host)
504 return -ENODEV;
505 WARN_ON(!host->is_open);
506 host->mode = mode;
507
508 return 0;
509}
510EXPORT_SYMBOL(pmac_low_i2c_setmode);
511
512int pmac_low_i2c_xfer(struct device_node *np, u8 addrdir, u8 subaddr, u8 *data, int len)
513{
514 struct low_i2c_host *host = find_low_i2c_host(np);
515
516 if (!host)
517 return -ENODEV;
518 WARN_ON(!host->is_open);
519
520 return host->func(host, addrdir, subaddr, data, len);
521}
522EXPORT_SYMBOL(pmac_low_i2c_xfer);
523
diff --git a/arch/ppc64/kernel/pmac_nvram.c b/arch/ppc64/kernel/pmac_nvram.c
deleted file mode 100644
index e32a902236e3..000000000000
--- a/arch/ppc64/kernel/pmac_nvram.c
+++ /dev/null
@@ -1,495 +0,0 @@
1/*
2 * arch/ppc/platforms/pmac_nvram.c
3 *
4 * Copyright (C) 2002 Benjamin Herrenschmidt (benh@kernel.crashing.org)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 *
11 * Todo: - add support for the OF persistent properties
12 */
13#include <linux/config.h>
14#include <linux/module.h>
15#include <linux/kernel.h>
16#include <linux/stddef.h>
17#include <linux/string.h>
18#include <linux/init.h>
19#include <linux/slab.h>
20#include <linux/delay.h>
21#include <linux/errno.h>
22#include <linux/bootmem.h>
23#include <linux/completion.h>
24#include <linux/spinlock.h>
25#include <asm/sections.h>
26#include <asm/io.h>
27#include <asm/system.h>
28#include <asm/prom.h>
29#include <asm/machdep.h>
30#include <asm/nvram.h>
31
32#define DEBUG
33
34#ifdef DEBUG
35#define DBG(x...) printk(x)
36#else
37#define DBG(x...)
38#endif
39
40#define NVRAM_SIZE 0x2000 /* 8kB of non-volatile RAM */
41
42#define CORE99_SIGNATURE 0x5a
43#define CORE99_ADLER_START 0x14
44
45/* On Core99, nvram is either a sharp, a micron or an AMD flash */
46#define SM_FLASH_STATUS_DONE 0x80
47#define SM_FLASH_STATUS_ERR 0x38
48
49#define SM_FLASH_CMD_ERASE_CONFIRM 0xd0
50#define SM_FLASH_CMD_ERASE_SETUP 0x20
51#define SM_FLASH_CMD_RESET 0xff
52#define SM_FLASH_CMD_WRITE_SETUP 0x40
53#define SM_FLASH_CMD_CLEAR_STATUS 0x50
54#define SM_FLASH_CMD_READ_STATUS 0x70
55
56/* CHRP NVRAM header */
57struct chrp_header {
58 u8 signature;
59 u8 cksum;
60 u16 len;
61 char name[12];
62 u8 data[0];
63};
64
65struct core99_header {
66 struct chrp_header hdr;
67 u32 adler;
68 u32 generation;
69 u32 reserved[2];
70};
71
72/*
73 * Read and write the non-volatile RAM on PowerMacs and CHRP machines.
74 */
75static volatile unsigned char *nvram_data;
76static int core99_bank = 0;
77// XXX Turn that into a sem
78static DEFINE_SPINLOCK(nv_lock);
79
80extern int system_running;
81
82static int (*core99_write_bank)(int bank, u8* datas);
83static int (*core99_erase_bank)(int bank);
84
85static char *nvram_image __pmacdata;
86
87
88static ssize_t __pmac core99_nvram_read(char *buf, size_t count, loff_t *index)
89{
90 int i;
91
92 if (nvram_image == NULL)
93 return -ENODEV;
94 if (*index > NVRAM_SIZE)
95 return 0;
96
97 i = *index;
98 if (i + count > NVRAM_SIZE)
99 count = NVRAM_SIZE - i;
100
101 memcpy(buf, &nvram_image[i], count);
102 *index = i + count;
103 return count;
104}
105
106static ssize_t __pmac core99_nvram_write(char *buf, size_t count, loff_t *index)
107{
108 int i;
109
110 if (nvram_image == NULL)
111 return -ENODEV;
112 if (*index > NVRAM_SIZE)
113 return 0;
114
115 i = *index;
116 if (i + count > NVRAM_SIZE)
117 count = NVRAM_SIZE - i;
118
119 memcpy(&nvram_image[i], buf, count);
120 *index = i + count;
121 return count;
122}
123
124static ssize_t __pmac core99_nvram_size(void)
125{
126 if (nvram_image == NULL)
127 return -ENODEV;
128 return NVRAM_SIZE;
129}
130
131static u8 __pmac chrp_checksum(struct chrp_header* hdr)
132{
133 u8 *ptr;
134 u16 sum = hdr->signature;
135 for (ptr = (u8 *)&hdr->len; ptr < hdr->data; ptr++)
136 sum += *ptr;
137 while (sum > 0xFF)
138 sum = (sum & 0xFF) + (sum>>8);
139 return sum;
140}
141
142static u32 __pmac core99_calc_adler(u8 *buffer)
143{
144 int cnt;
145 u32 low, high;
146
147 buffer += CORE99_ADLER_START;
148 low = 1;
149 high = 0;
150 for (cnt=0; cnt<(NVRAM_SIZE-CORE99_ADLER_START); cnt++) {
151 if ((cnt % 5000) == 0) {
152 high %= 65521UL;
153 high %= 65521UL;
154 }
155 low += buffer[cnt];
156 high += low;
157 }
158 low %= 65521UL;
159 high %= 65521UL;
160
161 return (high << 16) | low;
162}
163
164static u32 __pmac core99_check(u8* datas)
165{
166 struct core99_header* hdr99 = (struct core99_header*)datas;
167
168 if (hdr99->hdr.signature != CORE99_SIGNATURE) {
169 DBG("Invalid signature\n");
170 return 0;
171 }
172 if (hdr99->hdr.cksum != chrp_checksum(&hdr99->hdr)) {
173 DBG("Invalid checksum\n");
174 return 0;
175 }
176 if (hdr99->adler != core99_calc_adler(datas)) {
177 DBG("Invalid adler\n");
178 return 0;
179 }
180 return hdr99->generation;
181}
182
183static int __pmac sm_erase_bank(int bank)
184{
185 int stat, i;
186 unsigned long timeout;
187
188 u8* base = (u8 *)nvram_data + core99_bank*NVRAM_SIZE;
189
190 DBG("nvram: Sharp/Micron Erasing bank %d...\n", bank);
191
192 out_8(base, SM_FLASH_CMD_ERASE_SETUP);
193 out_8(base, SM_FLASH_CMD_ERASE_CONFIRM);
194 timeout = 0;
195 do {
196 if (++timeout > 1000000) {
197 printk(KERN_ERR "nvram: Sharp/Miron flash erase timeout !\n");
198 break;
199 }
200 out_8(base, SM_FLASH_CMD_READ_STATUS);
201 stat = in_8(base);
202 } while (!(stat & SM_FLASH_STATUS_DONE));
203
204 out_8(base, SM_FLASH_CMD_CLEAR_STATUS);
205 out_8(base, SM_FLASH_CMD_RESET);
206
207 for (i=0; i<NVRAM_SIZE; i++)
208 if (base[i] != 0xff) {
209 printk(KERN_ERR "nvram: Sharp/Micron flash erase failed !\n");
210 return -ENXIO;
211 }
212 return 0;
213}
214
215static int __pmac sm_write_bank(int bank, u8* datas)
216{
217 int i, stat = 0;
218 unsigned long timeout;
219
220 u8* base = (u8 *)nvram_data + core99_bank*NVRAM_SIZE;
221
222 DBG("nvram: Sharp/Micron Writing bank %d...\n", bank);
223
224 for (i=0; i<NVRAM_SIZE; i++) {
225 out_8(base+i, SM_FLASH_CMD_WRITE_SETUP);
226 udelay(1);
227 out_8(base+i, datas[i]);
228 timeout = 0;
229 do {
230 if (++timeout > 1000000) {
231 printk(KERN_ERR "nvram: Sharp/Micron flash write timeout !\n");
232 break;
233 }
234 out_8(base, SM_FLASH_CMD_READ_STATUS);
235 stat = in_8(base);
236 } while (!(stat & SM_FLASH_STATUS_DONE));
237 if (!(stat & SM_FLASH_STATUS_DONE))
238 break;
239 }
240 out_8(base, SM_FLASH_CMD_CLEAR_STATUS);
241 out_8(base, SM_FLASH_CMD_RESET);
242 for (i=0; i<NVRAM_SIZE; i++)
243 if (base[i] != datas[i]) {
244 printk(KERN_ERR "nvram: Sharp/Micron flash write failed !\n");
245 return -ENXIO;
246 }
247 return 0;
248}
249
250static int __pmac amd_erase_bank(int bank)
251{
252 int i, stat = 0;
253 unsigned long timeout;
254
255 u8* base = (u8 *)nvram_data + core99_bank*NVRAM_SIZE;
256
257 DBG("nvram: AMD Erasing bank %d...\n", bank);
258
259 /* Unlock 1 */
260 out_8(base+0x555, 0xaa);
261 udelay(1);
262 /* Unlock 2 */
263 out_8(base+0x2aa, 0x55);
264 udelay(1);
265
266 /* Sector-Erase */
267 out_8(base+0x555, 0x80);
268 udelay(1);
269 out_8(base+0x555, 0xaa);
270 udelay(1);
271 out_8(base+0x2aa, 0x55);
272 udelay(1);
273 out_8(base, 0x30);
274 udelay(1);
275
276 timeout = 0;
277 do {
278 if (++timeout > 1000000) {
279 printk(KERN_ERR "nvram: AMD flash erase timeout !\n");
280 break;
281 }
282 stat = in_8(base) ^ in_8(base);
283 } while (stat != 0);
284
285 /* Reset */
286 out_8(base, 0xf0);
287 udelay(1);
288
289 for (i=0; i<NVRAM_SIZE; i++)
290 if (base[i] != 0xff) {
291 printk(KERN_ERR "nvram: AMD flash erase failed !\n");
292 return -ENXIO;
293 }
294 return 0;
295}
296
297static int __pmac amd_write_bank(int bank, u8* datas)
298{
299 int i, stat = 0;
300 unsigned long timeout;
301
302 u8* base = (u8 *)nvram_data + core99_bank*NVRAM_SIZE;
303
304 DBG("nvram: AMD Writing bank %d...\n", bank);
305
306 for (i=0; i<NVRAM_SIZE; i++) {
307 /* Unlock 1 */
308 out_8(base+0x555, 0xaa);
309 udelay(1);
310 /* Unlock 2 */
311 out_8(base+0x2aa, 0x55);
312 udelay(1);
313
314 /* Write single word */
315 out_8(base+0x555, 0xa0);
316 udelay(1);
317 out_8(base+i, datas[i]);
318
319 timeout = 0;
320 do {
321 if (++timeout > 1000000) {
322 printk(KERN_ERR "nvram: AMD flash write timeout !\n");
323 break;
324 }
325 stat = in_8(base) ^ in_8(base);
326 } while (stat != 0);
327 if (stat != 0)
328 break;
329 }
330
331 /* Reset */
332 out_8(base, 0xf0);
333 udelay(1);
334
335 for (i=0; i<NVRAM_SIZE; i++)
336 if (base[i] != datas[i]) {
337 printk(KERN_ERR "nvram: AMD flash write failed !\n");
338 return -ENXIO;
339 }
340 return 0;
341}
342
343
344static int __pmac core99_nvram_sync(void)
345{
346 struct core99_header* hdr99;
347 unsigned long flags;
348
349 spin_lock_irqsave(&nv_lock, flags);
350 if (!memcmp(nvram_image, (u8*)nvram_data + core99_bank*NVRAM_SIZE,
351 NVRAM_SIZE))
352 goto bail;
353
354 DBG("Updating nvram...\n");
355
356 hdr99 = (struct core99_header*)nvram_image;
357 hdr99->generation++;
358 hdr99->hdr.signature = CORE99_SIGNATURE;
359 hdr99->hdr.cksum = chrp_checksum(&hdr99->hdr);
360 hdr99->adler = core99_calc_adler(nvram_image);
361 core99_bank = core99_bank ? 0 : 1;
362 if (core99_erase_bank)
363 if (core99_erase_bank(core99_bank)) {
364 printk("nvram: Error erasing bank %d\n", core99_bank);
365 goto bail;
366 }
367 if (core99_write_bank)
368 if (core99_write_bank(core99_bank, nvram_image))
369 printk("nvram: Error writing bank %d\n", core99_bank);
370 bail:
371 spin_unlock_irqrestore(&nv_lock, flags);
372
373 return 0;
374}
375
376int __init pmac_nvram_init(void)
377{
378 struct device_node *dp;
379 u32 gen_bank0, gen_bank1;
380 int i;
381
382 dp = find_devices("nvram");
383 if (dp == NULL) {
384 printk(KERN_ERR "Can't find NVRAM device\n");
385 return -ENODEV;
386 }
387 if (!device_is_compatible(dp, "nvram,flash")) {
388 printk(KERN_ERR "Incompatible type of NVRAM\n");
389 return -ENXIO;
390 }
391
392 nvram_image = alloc_bootmem(NVRAM_SIZE);
393 if (nvram_image == NULL) {
394 printk(KERN_ERR "nvram: can't allocate ram image\n");
395 return -ENOMEM;
396 }
397 nvram_data = ioremap(dp->addrs[0].address, NVRAM_SIZE*2);
398
399 DBG("nvram: Checking bank 0...\n");
400
401 gen_bank0 = core99_check((u8 *)nvram_data);
402 gen_bank1 = core99_check((u8 *)nvram_data + NVRAM_SIZE);
403 core99_bank = (gen_bank0 < gen_bank1) ? 1 : 0;
404
405 DBG("nvram: gen0=%d, gen1=%d\n", gen_bank0, gen_bank1);
406 DBG("nvram: Active bank is: %d\n", core99_bank);
407
408 for (i=0; i<NVRAM_SIZE; i++)
409 nvram_image[i] = nvram_data[i + core99_bank*NVRAM_SIZE];
410
411 ppc_md.nvram_read = core99_nvram_read;
412 ppc_md.nvram_write = core99_nvram_write;
413 ppc_md.nvram_size = core99_nvram_size;
414 ppc_md.nvram_sync = core99_nvram_sync;
415
416 /*
417 * Maybe we could be smarter here though making an exclusive list
418 * of known flash chips is a bit nasty as older OF didn't provide us
419 * with a useful "compatible" entry. A solution would be to really
420 * identify the chip using flash id commands and base ourselves on
421 * a list of known chips IDs
422 */
423 if (device_is_compatible(dp, "amd-0137")) {
424 core99_erase_bank = amd_erase_bank;
425 core99_write_bank = amd_write_bank;
426 } else {
427 core99_erase_bank = sm_erase_bank;
428 core99_write_bank = sm_write_bank;
429 }
430
431 return 0;
432}
433
434int __pmac pmac_get_partition(int partition)
435{
436 struct nvram_partition *part;
437 const char *name;
438 int sig;
439
440 switch(partition) {
441 case pmac_nvram_OF:
442 name = "common";
443 sig = NVRAM_SIG_SYS;
444 break;
445 case pmac_nvram_XPRAM:
446 name = "APL,MacOS75";
447 sig = NVRAM_SIG_OS;
448 break;
449 case pmac_nvram_NR:
450 default:
451 /* Oldworld stuff */
452 return -ENODEV;
453 }
454
455 part = nvram_find_partition(sig, name);
456 if (part == NULL)
457 return 0;
458
459 return part->index;
460}
461
462u8 __pmac pmac_xpram_read(int xpaddr)
463{
464 int offset = pmac_get_partition(pmac_nvram_XPRAM);
465 loff_t index;
466 u8 buf;
467 ssize_t count;
468
469 if (offset < 0 || xpaddr < 0 || xpaddr > 0x100)
470 return 0xff;
471 index = offset + xpaddr;
472
473 count = ppc_md.nvram_read(&buf, 1, &index);
474 if (count != 1)
475 return 0xff;
476 return buf;
477}
478
479void __pmac pmac_xpram_write(int xpaddr, u8 data)
480{
481 int offset = pmac_get_partition(pmac_nvram_XPRAM);
482 loff_t index;
483 u8 buf;
484
485 if (offset < 0 || xpaddr < 0 || xpaddr > 0x100)
486 return;
487 index = offset + xpaddr;
488 buf = data;
489
490 ppc_md.nvram_write(&buf, 1, &index);
491}
492
493EXPORT_SYMBOL(pmac_get_partition);
494EXPORT_SYMBOL(pmac_xpram_read);
495EXPORT_SYMBOL(pmac_xpram_write);
diff --git a/arch/ppc64/kernel/pmac_pci.c b/arch/ppc64/kernel/pmac_pci.c
deleted file mode 100644
index dc40a0cad0b4..000000000000
--- a/arch/ppc64/kernel/pmac_pci.c
+++ /dev/null
@@ -1,793 +0,0 @@
1/*
2 * Support for PCI bridges found on Power Macintoshes.
3 * At present the "bandit" and "chaos" bridges are supported.
4 * Fortunately you access configuration space in the same
5 * way with either bridge.
6 *
7 * Copyright (C) 2003 Benjamin Herrenschmuidt (benh@kernel.crashing.org)
8 * Copyright (C) 1997 Paul Mackerras (paulus@samba.org)
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version
13 * 2 of the License, or (at your option) any later version.
14 */
15
16#include <linux/kernel.h>
17#include <linux/pci.h>
18#include <linux/delay.h>
19#include <linux/string.h>
20#include <linux/init.h>
21#include <linux/bootmem.h>
22
23#include <asm/sections.h>
24#include <asm/io.h>
25#include <asm/prom.h>
26#include <asm/pci-bridge.h>
27#include <asm/machdep.h>
28#include <asm/pmac_feature.h>
29#include <asm/iommu.h>
30
31#include "pci.h"
32#include "pmac.h"
33
34#define DEBUG
35
36#ifdef DEBUG
37#define DBG(x...) printk(x)
38#else
39#define DBG(x...)
40#endif
41
42/* XXX Could be per-controller, but I don't think we risk anything by
43 * assuming we won't have both UniNorth and Bandit */
44static int has_uninorth;
45static struct pci_controller *u3_agp;
46struct device_node *k2_skiplist[2];
47
48static int __init fixup_one_level_bus_range(struct device_node *node, int higher)
49{
50 for (; node != 0;node = node->sibling) {
51 int * bus_range;
52 unsigned int *class_code;
53 int len;
54
55 /* For PCI<->PCI bridges or CardBus bridges, we go down */
56 class_code = (unsigned int *) get_property(node, "class-code", NULL);
57 if (!class_code || ((*class_code >> 8) != PCI_CLASS_BRIDGE_PCI &&
58 (*class_code >> 8) != PCI_CLASS_BRIDGE_CARDBUS))
59 continue;
60 bus_range = (int *) get_property(node, "bus-range", &len);
61 if (bus_range != NULL && len > 2 * sizeof(int)) {
62 if (bus_range[1] > higher)
63 higher = bus_range[1];
64 }
65 higher = fixup_one_level_bus_range(node->child, higher);
66 }
67 return higher;
68}
69
70/* This routine fixes the "bus-range" property of all bridges in the
71 * system since they tend to have their "last" member wrong on macs
72 *
73 * Note that the bus numbers manipulated here are OF bus numbers, they
74 * are not Linux bus numbers.
75 */
76static void __init fixup_bus_range(struct device_node *bridge)
77{
78 int * bus_range;
79 int len;
80
81 /* Lookup the "bus-range" property for the hose */
82 bus_range = (int *) get_property(bridge, "bus-range", &len);
83 if (bus_range == NULL || len < 2 * sizeof(int)) {
84 printk(KERN_WARNING "Can't get bus-range for %s\n",
85 bridge->full_name);
86 return;
87 }
88 bus_range[1] = fixup_one_level_bus_range(bridge->child, bus_range[1]);
89}
90
91/*
92 * Apple MacRISC (U3, UniNorth, Bandit, Chaos) PCI controllers.
93 *
94 * The "Bandit" version is present in all early PCI PowerMacs,
95 * and up to the first ones using Grackle. Some machines may
96 * have 2 bandit controllers (2 PCI busses).
97 *
98 * "Chaos" is used in some "Bandit"-type machines as a bridge
99 * for the separate display bus. It is accessed the same
100 * way as bandit, but cannot be probed for devices. It therefore
101 * has its own config access functions.
102 *
103 * The "UniNorth" version is present in all Core99 machines
104 * (iBook, G4, new IMacs, and all the recent Apple machines).
105 * It contains 3 controllers in one ASIC.
106 *
107 * The U3 is the bridge used on G5 machines. It contains on
108 * AGP bus which is dealt with the old UniNorth access routines
109 * and an HyperTransport bus which uses its own set of access
110 * functions.
111 */
112
113#define MACRISC_CFA0(devfn, off) \
114 ((1 << (unsigned long)PCI_SLOT(dev_fn)) \
115 | (((unsigned long)PCI_FUNC(dev_fn)) << 8) \
116 | (((unsigned long)(off)) & 0xFCUL))
117
118#define MACRISC_CFA1(bus, devfn, off) \
119 ((((unsigned long)(bus)) << 16) \
120 |(((unsigned long)(devfn)) << 8) \
121 |(((unsigned long)(off)) & 0xFCUL) \
122 |1UL)
123
124static unsigned long __pmac macrisc_cfg_access(struct pci_controller* hose,
125 u8 bus, u8 dev_fn, u8 offset)
126{
127 unsigned int caddr;
128
129 if (bus == hose->first_busno) {
130 if (dev_fn < (11 << 3))
131 return 0;
132 caddr = MACRISC_CFA0(dev_fn, offset);
133 } else
134 caddr = MACRISC_CFA1(bus, dev_fn, offset);
135
136 /* Uninorth will return garbage if we don't read back the value ! */
137 do {
138 out_le32(hose->cfg_addr, caddr);
139 } while (in_le32(hose->cfg_addr) != caddr);
140
141 offset &= has_uninorth ? 0x07 : 0x03;
142 return ((unsigned long)hose->cfg_data) + offset;
143}
144
145static int __pmac macrisc_read_config(struct pci_bus *bus, unsigned int devfn,
146 int offset, int len, u32 *val)
147{
148 struct pci_controller *hose;
149 unsigned long addr;
150
151 hose = pci_bus_to_host(bus);
152 if (hose == NULL)
153 return PCIBIOS_DEVICE_NOT_FOUND;
154
155 addr = macrisc_cfg_access(hose, bus->number, devfn, offset);
156 if (!addr)
157 return PCIBIOS_DEVICE_NOT_FOUND;
158 /*
159 * Note: the caller has already checked that offset is
160 * suitably aligned and that len is 1, 2 or 4.
161 */
162 switch (len) {
163 case 1:
164 *val = in_8((u8 *)addr);
165 break;
166 case 2:
167 *val = in_le16((u16 *)addr);
168 break;
169 default:
170 *val = in_le32((u32 *)addr);
171 break;
172 }
173 return PCIBIOS_SUCCESSFUL;
174}
175
176static int __pmac macrisc_write_config(struct pci_bus *bus, unsigned int devfn,
177 int offset, int len, u32 val)
178{
179 struct pci_controller *hose;
180 unsigned long addr;
181
182 hose = pci_bus_to_host(bus);
183 if (hose == NULL)
184 return PCIBIOS_DEVICE_NOT_FOUND;
185
186 addr = macrisc_cfg_access(hose, bus->number, devfn, offset);
187 if (!addr)
188 return PCIBIOS_DEVICE_NOT_FOUND;
189 /*
190 * Note: the caller has already checked that offset is
191 * suitably aligned and that len is 1, 2 or 4.
192 */
193 switch (len) {
194 case 1:
195 out_8((u8 *)addr, val);
196 (void) in_8((u8 *)addr);
197 break;
198 case 2:
199 out_le16((u16 *)addr, val);
200 (void) in_le16((u16 *)addr);
201 break;
202 default:
203 out_le32((u32 *)addr, val);
204 (void) in_le32((u32 *)addr);
205 break;
206 }
207 return PCIBIOS_SUCCESSFUL;
208}
209
210static struct pci_ops macrisc_pci_ops =
211{
212 macrisc_read_config,
213 macrisc_write_config
214};
215
216/*
217 * These versions of U3 HyperTransport config space access ops do not
218 * implement self-view of the HT host yet
219 */
220
221/*
222 * This function deals with some "special cases" devices.
223 *
224 * 0 -> No special case
225 * 1 -> Skip the device but act as if the access was successfull
226 * (return 0xff's on reads, eventually, cache config space
227 * accesses in a later version)
228 * -1 -> Hide the device (unsuccessful acess)
229 */
230static int u3_ht_skip_device(struct pci_controller *hose,
231 struct pci_bus *bus, unsigned int devfn)
232{
233 struct device_node *busdn, *dn;
234 int i;
235
236 /* We only allow config cycles to devices that are in OF device-tree
237 * as we are apparently having some weird things going on with some
238 * revs of K2 on recent G5s
239 */
240 if (bus->self)
241 busdn = pci_device_to_OF_node(bus->self);
242 else
243 busdn = hose->arch_data;
244 for (dn = busdn->child; dn; dn = dn->sibling)
245 if (dn->data && PCI_DN(dn)->devfn == devfn)
246 break;
247 if (dn == NULL)
248 return -1;
249
250 /*
251 * When a device in K2 is powered down, we die on config
252 * cycle accesses. Fix that here.
253 */
254 for (i=0; i<2; i++)
255 if (k2_skiplist[i] == dn)
256 return 1;
257
258 return 0;
259}
260
261#define U3_HT_CFA0(devfn, off) \
262 ((((unsigned long)devfn) << 8) | offset)
263#define U3_HT_CFA1(bus, devfn, off) \
264 (U3_HT_CFA0(devfn, off) \
265 + (((unsigned long)bus) << 16) \
266 + 0x01000000UL)
267
268static unsigned long __pmac u3_ht_cfg_access(struct pci_controller* hose,
269 u8 bus, u8 devfn, u8 offset)
270{
271 if (bus == hose->first_busno) {
272 /* For now, we don't self probe U3 HT bridge */
273 if (PCI_SLOT(devfn) == 0)
274 return 0;
275 return ((unsigned long)hose->cfg_data) + U3_HT_CFA0(devfn, offset);
276 } else
277 return ((unsigned long)hose->cfg_data) + U3_HT_CFA1(bus, devfn, offset);
278}
279
280static int __pmac u3_ht_read_config(struct pci_bus *bus, unsigned int devfn,
281 int offset, int len, u32 *val)
282{
283 struct pci_controller *hose;
284 unsigned long addr;
285
286
287 hose = pci_bus_to_host(bus);
288 if (hose == NULL)
289 return PCIBIOS_DEVICE_NOT_FOUND;
290
291 addr = u3_ht_cfg_access(hose, bus->number, devfn, offset);
292 if (!addr)
293 return PCIBIOS_DEVICE_NOT_FOUND;
294
295 switch (u3_ht_skip_device(hose, bus, devfn)) {
296 case 0:
297 break;
298 case 1:
299 switch (len) {
300 case 1:
301 *val = 0xff; break;
302 case 2:
303 *val = 0xffff; break;
304 default:
305 *val = 0xfffffffful; break;
306 }
307 return PCIBIOS_SUCCESSFUL;
308 default:
309 return PCIBIOS_DEVICE_NOT_FOUND;
310 }
311
312 /*
313 * Note: the caller has already checked that offset is
314 * suitably aligned and that len is 1, 2 or 4.
315 */
316 switch (len) {
317 case 1:
318 *val = in_8((u8 *)addr);
319 break;
320 case 2:
321 *val = in_le16((u16 *)addr);
322 break;
323 default:
324 *val = in_le32((u32 *)addr);
325 break;
326 }
327 return PCIBIOS_SUCCESSFUL;
328}
329
330static int __pmac u3_ht_write_config(struct pci_bus *bus, unsigned int devfn,
331 int offset, int len, u32 val)
332{
333 struct pci_controller *hose;
334 unsigned long addr;
335
336 hose = pci_bus_to_host(bus);
337 if (hose == NULL)
338 return PCIBIOS_DEVICE_NOT_FOUND;
339
340 addr = u3_ht_cfg_access(hose, bus->number, devfn, offset);
341 if (!addr)
342 return PCIBIOS_DEVICE_NOT_FOUND;
343
344 switch (u3_ht_skip_device(hose, bus, devfn)) {
345 case 0:
346 break;
347 case 1:
348 return PCIBIOS_SUCCESSFUL;
349 default:
350 return PCIBIOS_DEVICE_NOT_FOUND;
351 }
352
353 /*
354 * Note: the caller has already checked that offset is
355 * suitably aligned and that len is 1, 2 or 4.
356 */
357 switch (len) {
358 case 1:
359 out_8((u8 *)addr, val);
360 (void) in_8((u8 *)addr);
361 break;
362 case 2:
363 out_le16((u16 *)addr, val);
364 (void) in_le16((u16 *)addr);
365 break;
366 default:
367 out_le32((u32 *)addr, val);
368 (void) in_le32((u32 *)addr);
369 break;
370 }
371 return PCIBIOS_SUCCESSFUL;
372}
373
374static struct pci_ops u3_ht_pci_ops =
375{
376 u3_ht_read_config,
377 u3_ht_write_config
378};
379
380static void __init setup_u3_agp(struct pci_controller* hose)
381{
382 /* On G5, we move AGP up to high bus number so we don't need
383 * to reassign bus numbers for HT. If we ever have P2P bridges
384 * on AGP, we'll have to move pci_assign_all_busses to the
385 * pci_controller structure so we enable it for AGP and not for
386 * HT childs.
387 * We hard code the address because of the different size of
388 * the reg address cell, we shall fix that by killing struct
389 * reg_property and using some accessor functions instead
390 */
391 hose->first_busno = 0xf0;
392 hose->last_busno = 0xff;
393 has_uninorth = 1;
394 hose->ops = &macrisc_pci_ops;
395 hose->cfg_addr = ioremap(0xf0000000 + 0x800000, 0x1000);
396 hose->cfg_data = ioremap(0xf0000000 + 0xc00000, 0x1000);
397
398 u3_agp = hose;
399}
400
401static void __init setup_u3_ht(struct pci_controller* hose)
402{
403 struct device_node *np = (struct device_node *)hose->arch_data;
404 int i, cur;
405
406 hose->ops = &u3_ht_pci_ops;
407
408 /* We hard code the address because of the different size of
409 * the reg address cell, we shall fix that by killing struct
410 * reg_property and using some accessor functions instead
411 */
412 hose->cfg_data = (volatile unsigned char *)ioremap(0xf2000000, 0x02000000);
413
414 /*
415 * /ht node doesn't expose a "ranges" property, so we "remove" regions that
416 * have been allocated to AGP. So far, this version of the code doesn't assign
417 * any of the 0xfxxxxxxx "fine" memory regions to /ht.
418 * We need to fix that sooner or later by either parsing all child "ranges"
419 * properties or figuring out the U3 address space decoding logic and
420 * then read it's configuration register (if any).
421 */
422 hose->io_base_phys = 0xf4000000;
423 hose->io_base_virt = ioremap(hose->io_base_phys, 0x00400000);
424 isa_io_base = pci_io_base = (unsigned long) hose->io_base_virt;
425 hose->io_resource.name = np->full_name;
426 hose->io_resource.start = 0;
427 hose->io_resource.end = 0x003fffff;
428 hose->io_resource.flags = IORESOURCE_IO;
429 hose->pci_mem_offset = 0;
430 hose->first_busno = 0;
431 hose->last_busno = 0xef;
432 hose->mem_resources[0].name = np->full_name;
433 hose->mem_resources[0].start = 0x80000000;
434 hose->mem_resources[0].end = 0xefffffff;
435 hose->mem_resources[0].flags = IORESOURCE_MEM;
436
437 if (u3_agp == NULL) {
438 DBG("U3 has no AGP, using full resource range\n");
439 return;
440 }
441
442 /* We "remove" the AGP resources from the resources allocated to HT, that
443 * is we create "holes". However, that code does assumptions that so far
444 * happen to be true (cross fingers...), typically that resources in the
445 * AGP node are properly ordered
446 */
447 cur = 0;
448 for (i=0; i<3; i++) {
449 struct resource *res = &u3_agp->mem_resources[i];
450 if (res->flags != IORESOURCE_MEM)
451 continue;
452 /* We don't care about "fine" resources */
453 if (res->start >= 0xf0000000)
454 continue;
455 /* Check if it's just a matter of "shrinking" us in one direction */
456 if (hose->mem_resources[cur].start == res->start) {
457 DBG("U3/HT: shrink start of %d, %08lx -> %08lx\n",
458 cur, hose->mem_resources[cur].start, res->end + 1);
459 hose->mem_resources[cur].start = res->end + 1;
460 continue;
461 }
462 if (hose->mem_resources[cur].end == res->end) {
463 DBG("U3/HT: shrink end of %d, %08lx -> %08lx\n",
464 cur, hose->mem_resources[cur].end, res->start - 1);
465 hose->mem_resources[cur].end = res->start - 1;
466 continue;
467 }
468 /* No, it's not the case, we need a hole */
469 if (cur == 2) {
470 /* not enough resources for a hole, we drop part of the range */
471 printk(KERN_WARNING "Running out of resources for /ht host !\n");
472 hose->mem_resources[cur].end = res->start - 1;
473 continue;
474 }
475 cur++;
476 DBG("U3/HT: hole, %d end at %08lx, %d start at %08lx\n",
477 cur-1, res->start - 1, cur, res->end + 1);
478 hose->mem_resources[cur].name = np->full_name;
479 hose->mem_resources[cur].flags = IORESOURCE_MEM;
480 hose->mem_resources[cur].start = res->end + 1;
481 hose->mem_resources[cur].end = hose->mem_resources[cur-1].end;
482 hose->mem_resources[cur-1].end = res->start - 1;
483 }
484}
485
486static void __init pmac_process_bridge_OF_ranges(struct pci_controller *hose,
487 struct device_node *dev, int primary)
488{
489 static unsigned int static_lc_ranges[2024];
490 unsigned int *dt_ranges, *lc_ranges, *ranges, *prev;
491 unsigned int size;
492 int rlen = 0, orig_rlen;
493 int memno = 0;
494 struct resource *res;
495 int np, na = prom_n_addr_cells(dev);
496
497 np = na + 5;
498
499 /* First we try to merge ranges to fix a problem with some pmacs
500 * that can have more than 3 ranges, fortunately using contiguous
501 * addresses -- BenH
502 */
503 dt_ranges = (unsigned int *) get_property(dev, "ranges", &rlen);
504 if (!dt_ranges)
505 return;
506 /* lc_ranges = alloc_bootmem(rlen);*/
507 lc_ranges = static_lc_ranges;
508 if (!lc_ranges)
509 return; /* what can we do here ? */
510 memcpy(lc_ranges, dt_ranges, rlen);
511 orig_rlen = rlen;
512
513 /* Let's work on a copy of the "ranges" property instead of damaging
514 * the device-tree image in memory
515 */
516 ranges = lc_ranges;
517 prev = NULL;
518 while ((rlen -= np * sizeof(unsigned int)) >= 0) {
519 if (prev) {
520 if (prev[0] == ranges[0] && prev[1] == ranges[1] &&
521 (prev[2] + prev[na+4]) == ranges[2] &&
522 (prev[na+2] + prev[na+4]) == ranges[na+2]) {
523 prev[na+4] += ranges[na+4];
524 ranges[0] = 0;
525 ranges += np;
526 continue;
527 }
528 }
529 prev = ranges;
530 ranges += np;
531 }
532
533 /*
534 * The ranges property is laid out as an array of elements,
535 * each of which comprises:
536 * cells 0 - 2: a PCI address
537 * cells 3 or 3+4: a CPU physical address
538 * (size depending on dev->n_addr_cells)
539 * cells 4+5 or 5+6: the size of the range
540 */
541 ranges = lc_ranges;
542 rlen = orig_rlen;
543 while (ranges && (rlen -= np * sizeof(unsigned int)) >= 0) {
544 res = NULL;
545 size = ranges[na+4];
546 switch (ranges[0] >> 24) {
547 case 1: /* I/O space */
548 if (ranges[2] != 0)
549 break;
550 hose->io_base_phys = ranges[na+2];
551 /* limit I/O space to 16MB */
552 if (size > 0x01000000)
553 size = 0x01000000;
554 hose->io_base_virt = ioremap(ranges[na+2], size);
555 if (primary)
556 isa_io_base = (unsigned long) hose->io_base_virt;
557 res = &hose->io_resource;
558 res->flags = IORESOURCE_IO;
559 res->start = ranges[2];
560 break;
561 case 2: /* memory space */
562 memno = 0;
563 if (ranges[1] == 0 && ranges[2] == 0
564 && ranges[na+4] <= (16 << 20)) {
565 /* 1st 16MB, i.e. ISA memory area */
566#if 0
567 if (primary)
568 isa_mem_base = ranges[na+2];
569#endif
570 memno = 1;
571 }
572 while (memno < 3 && hose->mem_resources[memno].flags)
573 ++memno;
574 if (memno == 0)
575 hose->pci_mem_offset = ranges[na+2] - ranges[2];
576 if (memno < 3) {
577 res = &hose->mem_resources[memno];
578 res->flags = IORESOURCE_MEM;
579 res->start = ranges[na+2];
580 }
581 break;
582 }
583 if (res != NULL) {
584 res->name = dev->full_name;
585 res->end = res->start + size - 1;
586 res->parent = NULL;
587 res->sibling = NULL;
588 res->child = NULL;
589 }
590 ranges += np;
591 }
592}
593
594/*
595 * We assume that if we have a G3 powermac, we have one bridge called
596 * "pci" (a MPC106) and no bandit or chaos bridges, and contrariwise,
597 * if we have one or more bandit or chaos bridges, we don't have a MPC106.
598 */
599static int __init add_bridge(struct device_node *dev)
600{
601 int len;
602 struct pci_controller *hose;
603 char* disp_name;
604 int *bus_range;
605 int primary = 1;
606 struct property *of_prop;
607
608 DBG("Adding PCI host bridge %s\n", dev->full_name);
609
610 bus_range = (int *) get_property(dev, "bus-range", &len);
611 if (bus_range == NULL || len < 2 * sizeof(int)) {
612 printk(KERN_WARNING "Can't get bus-range for %s, assume bus 0\n",
613 dev->full_name);
614 }
615
616 hose = alloc_bootmem(sizeof(struct pci_controller));
617 if (hose == NULL)
618 return -ENOMEM;
619 pci_setup_pci_controller(hose);
620
621 hose->arch_data = dev;
622 hose->first_busno = bus_range ? bus_range[0] : 0;
623 hose->last_busno = bus_range ? bus_range[1] : 0xff;
624
625 of_prop = alloc_bootmem(sizeof(struct property) +
626 sizeof(hose->global_number));
627 if (of_prop) {
628 memset(of_prop, 0, sizeof(struct property));
629 of_prop->name = "linux,pci-domain";
630 of_prop->length = sizeof(hose->global_number);
631 of_prop->value = (unsigned char *)&of_prop[1];
632 memcpy(of_prop->value, &hose->global_number, sizeof(hose->global_number));
633 prom_add_property(dev, of_prop);
634 }
635
636 disp_name = NULL;
637 if (device_is_compatible(dev, "u3-agp")) {
638 setup_u3_agp(hose);
639 disp_name = "U3-AGP";
640 primary = 0;
641 } else if (device_is_compatible(dev, "u3-ht")) {
642 setup_u3_ht(hose);
643 disp_name = "U3-HT";
644 primary = 1;
645 }
646 printk(KERN_INFO "Found %s PCI host bridge. Firmware bus number: %d->%d\n",
647 disp_name, hose->first_busno, hose->last_busno);
648
649 /* Interpret the "ranges" property */
650 /* This also maps the I/O region and sets isa_io/mem_base */
651 pmac_process_bridge_OF_ranges(hose, dev, primary);
652
653 /* Fixup "bus-range" OF property */
654 fixup_bus_range(dev);
655
656 return 0;
657}
658
659/*
660 * We use our own read_irq_line here because PCI_INTERRUPT_PIN is
661 * crap on some of Apple ASICs. We unconditionally use the Open Firmware
662 * interrupt number as this is always right.
663 */
664static int pmac_pci_read_irq_line(struct pci_dev *pci_dev)
665{
666 struct device_node *node;
667
668 node = pci_device_to_OF_node(pci_dev);
669 if (node == NULL)
670 return -1;
671 if (node->n_intrs == 0)
672 return -1;
673 pci_dev->irq = node->intrs[0].line;
674 pci_write_config_byte(pci_dev, PCI_INTERRUPT_LINE, pci_dev->irq);
675
676 return 0;
677}
678
679void __init pmac_pcibios_fixup(void)
680{
681 struct pci_dev *dev = NULL;
682
683 for_each_pci_dev(dev)
684 pmac_pci_read_irq_line(dev);
685}
686
687static void __init pmac_fixup_phb_resources(void)
688{
689 struct pci_controller *hose, *tmp;
690
691 list_for_each_entry_safe(hose, tmp, &hose_list, list_node) {
692 unsigned long offset = (unsigned long)hose->io_base_virt - pci_io_base;
693 hose->io_resource.start += offset;
694 hose->io_resource.end += offset;
695 printk(KERN_INFO "PCI Host %d, io start: %lx; io end: %lx\n",
696 hose->global_number,
697 hose->io_resource.start, hose->io_resource.end);
698 }
699}
700
701void __init pmac_pci_init(void)
702{
703 struct device_node *np, *root;
704 struct device_node *ht = NULL;
705
706 /* Probe root PCI hosts, that is on U3 the AGP host and the
707 * HyperTransport host. That one is actually "kept" around
708 * and actually added last as it's resource management relies
709 * on the AGP resources to have been setup first
710 */
711 root = of_find_node_by_path("/");
712 if (root == NULL) {
713 printk(KERN_CRIT "pmac_find_bridges: can't find root of device tree\n");
714 return;
715 }
716 for (np = NULL; (np = of_get_next_child(root, np)) != NULL;) {
717 if (np->name == NULL)
718 continue;
719 if (strcmp(np->name, "pci") == 0) {
720 if (add_bridge(np) == 0)
721 of_node_get(np);
722 }
723 if (strcmp(np->name, "ht") == 0) {
724 of_node_get(np);
725 ht = np;
726 }
727 }
728 of_node_put(root);
729
730 /* Now setup the HyperTransport host if we found any
731 */
732 if (ht && add_bridge(ht) != 0)
733 of_node_put(ht);
734
735 /* Fixup the IO resources on our host bridges as the common code
736 * does it only for childs of the host bridges
737 */
738 pmac_fixup_phb_resources();
739
740 /* Setup the linkage between OF nodes and PHBs */
741 pci_devs_phb_init();
742
743 /* Fixup the PCI<->OF mapping for U3 AGP due to bus renumbering. We
744 * assume there is no P2P bridge on the AGP bus, which should be a
745 * safe assumptions hopefully.
746 */
747 if (u3_agp) {
748 struct device_node *np = u3_agp->arch_data;
749 PCI_DN(np)->busno = 0xf0;
750 for (np = np->child; np; np = np->sibling)
751 PCI_DN(np)->busno = 0xf0;
752 }
753
754 pmac_check_ht_link();
755
756 /* Tell pci.c to not use the common resource allocation mecanism */
757 pci_probe_only = 1;
758
759 /* Allow all IO */
760 io_page_mask = -1;
761}
762
763/*
764 * Disable second function on K2-SATA, it's broken
765 * and disable IO BARs on first one
766 */
767static void fixup_k2_sata(struct pci_dev* dev)
768{
769 int i;
770 u16 cmd;
771
772 if (PCI_FUNC(dev->devfn) > 0) {
773 pci_read_config_word(dev, PCI_COMMAND, &cmd);
774 cmd &= ~(PCI_COMMAND_IO | PCI_COMMAND_MEMORY);
775 pci_write_config_word(dev, PCI_COMMAND, cmd);
776 for (i = 0; i < 6; i++) {
777 dev->resource[i].start = dev->resource[i].end = 0;
778 dev->resource[i].flags = 0;
779 pci_write_config_dword(dev, PCI_BASE_ADDRESS_0 + 4 * i, 0);
780 }
781 } else {
782 pci_read_config_word(dev, PCI_COMMAND, &cmd);
783 cmd &= ~PCI_COMMAND_IO;
784 pci_write_config_word(dev, PCI_COMMAND, cmd);
785 for (i = 0; i < 5; i++) {
786 dev->resource[i].start = dev->resource[i].end = 0;
787 dev->resource[i].flags = 0;
788 pci_write_config_dword(dev, PCI_BASE_ADDRESS_0 + 4 * i, 0);
789 }
790 }
791}
792DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SERVERWORKS, 0x0240, fixup_k2_sata);
793
diff --git a/arch/ppc64/kernel/pmac_setup.c b/arch/ppc64/kernel/pmac_setup.c
deleted file mode 100644
index fa8121d53b89..000000000000
--- a/arch/ppc64/kernel/pmac_setup.c
+++ /dev/null
@@ -1,525 +0,0 @@
1/*
2 * arch/ppc/platforms/setup.c
3 *
4 * PowerPC version
5 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
6 *
7 * Adapted for Power Macintosh by Paul Mackerras
8 * Copyright (C) 1996 Paul Mackerras (paulus@cs.anu.edu.au)
9 *
10 * Derived from "arch/alpha/kernel/setup.c"
11 * Copyright (C) 1995 Linus Torvalds
12 *
13 * Maintained by Benjamin Herrenschmidt (benh@kernel.crashing.org)
14 *
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License
17 * as published by the Free Software Foundation; either version
18 * 2 of the License, or (at your option) any later version.
19 *
20 */
21
22/*
23 * bootup setup stuff..
24 */
25
26#undef DEBUG
27
28#include <linux/config.h>
29#include <linux/init.h>
30#include <linux/errno.h>
31#include <linux/sched.h>
32#include <linux/kernel.h>
33#include <linux/mm.h>
34#include <linux/stddef.h>
35#include <linux/unistd.h>
36#include <linux/ptrace.h>
37#include <linux/slab.h>
38#include <linux/user.h>
39#include <linux/a.out.h>
40#include <linux/tty.h>
41#include <linux/string.h>
42#include <linux/delay.h>
43#include <linux/ioport.h>
44#include <linux/major.h>
45#include <linux/initrd.h>
46#include <linux/vt_kern.h>
47#include <linux/console.h>
48#include <linux/ide.h>
49#include <linux/pci.h>
50#include <linux/adb.h>
51#include <linux/cuda.h>
52#include <linux/pmu.h>
53#include <linux/irq.h>
54#include <linux/seq_file.h>
55#include <linux/root_dev.h>
56#include <linux/bitops.h>
57
58#include <asm/processor.h>
59#include <asm/sections.h>
60#include <asm/prom.h>
61#include <asm/system.h>
62#include <asm/io.h>
63#include <asm/pci-bridge.h>
64#include <asm/iommu.h>
65#include <asm/machdep.h>
66#include <asm/dma.h>
67#include <asm/btext.h>
68#include <asm/cputable.h>
69#include <asm/pmac_feature.h>
70#include <asm/time.h>
71#include <asm/of_device.h>
72#include <asm/lmb.h>
73#include <asm/smu.h>
74#include <asm/pmc.h>
75
76#include "pmac.h"
77#include "mpic.h"
78
79#ifdef DEBUG
80#define DBG(fmt...) udbg_printf(fmt)
81#else
82#define DBG(fmt...)
83#endif
84
85static int current_root_goodness = -1;
86#define DEFAULT_ROOT_DEVICE Root_SDA1 /* sda1 - slightly silly choice */
87
88extern int powersave_nap;
89int sccdbg;
90
91sys_ctrler_t sys_ctrler;
92EXPORT_SYMBOL(sys_ctrler);
93
94#ifdef CONFIG_PMAC_SMU
95unsigned long smu_cmdbuf_abs;
96EXPORT_SYMBOL(smu_cmdbuf_abs);
97#endif
98
99extern void udbg_init_scc(struct device_node *np);
100
101static void __pmac pmac_show_cpuinfo(struct seq_file *m)
102{
103 struct device_node *np;
104 char *pp;
105 int plen;
106 char* mbname;
107 int mbmodel = pmac_call_feature(PMAC_FTR_GET_MB_INFO, NULL,
108 PMAC_MB_INFO_MODEL, 0);
109 unsigned int mbflags = pmac_call_feature(PMAC_FTR_GET_MB_INFO, NULL,
110 PMAC_MB_INFO_FLAGS, 0);
111
112 if (pmac_call_feature(PMAC_FTR_GET_MB_INFO, NULL, PMAC_MB_INFO_NAME,
113 (long)&mbname) != 0)
114 mbname = "Unknown";
115
116 /* find motherboard type */
117 seq_printf(m, "machine\t\t: ");
118 np = of_find_node_by_path("/");
119 if (np != NULL) {
120 pp = (char *) get_property(np, "model", NULL);
121 if (pp != NULL)
122 seq_printf(m, "%s\n", pp);
123 else
124 seq_printf(m, "PowerMac\n");
125 pp = (char *) get_property(np, "compatible", &plen);
126 if (pp != NULL) {
127 seq_printf(m, "motherboard\t:");
128 while (plen > 0) {
129 int l = strlen(pp) + 1;
130 seq_printf(m, " %s", pp);
131 plen -= l;
132 pp += l;
133 }
134 seq_printf(m, "\n");
135 }
136 of_node_put(np);
137 } else
138 seq_printf(m, "PowerMac\n");
139
140 /* print parsed model */
141 seq_printf(m, "detected as\t: %d (%s)\n", mbmodel, mbname);
142 seq_printf(m, "pmac flags\t: %08x\n", mbflags);
143
144 /* Indicate newworld */
145 seq_printf(m, "pmac-generation\t: NewWorld\n");
146}
147
148
149static void __init pmac_setup_arch(void)
150{
151 /* init to some ~sane value until calibrate_delay() runs */
152 loops_per_jiffy = 50000000;
153
154 /* Probe motherboard chipset */
155 pmac_feature_init();
156#if 0
157 /* Lock-enable the SCC channel used for debug */
158 if (sccdbg) {
159 np = of_find_node_by_name(NULL, "escc");
160 if (np)
161 pmac_call_feature(PMAC_FTR_SCC_ENABLE, np,
162 PMAC_SCC_ASYNC | PMAC_SCC_FLAG_XMON, 1);
163 }
164#endif
165 /* We can NAP */
166 powersave_nap = 1;
167
168#ifdef CONFIG_ADB_PMU
169 /* Initialize the PMU if any */
170 find_via_pmu();
171#endif
172#ifdef CONFIG_PMAC_SMU
173 /* Initialize the SMU if any */
174 smu_init();
175#endif
176
177 /* Init NVRAM access */
178 pmac_nvram_init();
179
180 /* Setup SMP callback */
181#ifdef CONFIG_SMP
182 pmac_setup_smp();
183#endif
184
185 /* Lookup PCI hosts */
186 pmac_pci_init();
187
188#ifdef CONFIG_DUMMY_CONSOLE
189 conswitchp = &dummy_con;
190#endif
191
192 printk(KERN_INFO "Using native/NAP idle loop\n");
193}
194
195#ifdef CONFIG_SCSI
196void note_scsi_host(struct device_node *node, void *host)
197{
198 /* Obsolete */
199}
200#endif
201
202
203static int initializing = 1;
204
205static int pmac_late_init(void)
206{
207 initializing = 0;
208 return 0;
209}
210
211late_initcall(pmac_late_init);
212
213/* can't be __init - can be called whenever a disk is first accessed */
214void __pmac note_bootable_part(dev_t dev, int part, int goodness)
215{
216 extern dev_t boot_dev;
217 char *p;
218
219 if (!initializing)
220 return;
221 if ((goodness <= current_root_goodness) &&
222 ROOT_DEV != DEFAULT_ROOT_DEVICE)
223 return;
224 p = strstr(saved_command_line, "root=");
225 if (p != NULL && (p == saved_command_line || p[-1] == ' '))
226 return;
227
228 if (!boot_dev || dev == boot_dev) {
229 ROOT_DEV = dev + part;
230 boot_dev = 0;
231 current_root_goodness = goodness;
232 }
233}
234
235static void __pmac pmac_restart(char *cmd)
236{
237 switch(sys_ctrler) {
238#ifdef CONFIG_ADB_PMU
239 case SYS_CTRLER_PMU:
240 pmu_restart();
241 break;
242#endif
243
244#ifdef CONFIG_PMAC_SMU
245 case SYS_CTRLER_SMU:
246 smu_restart();
247 break;
248#endif
249 default:
250 ;
251 }
252}
253
254static void __pmac pmac_power_off(void)
255{
256 switch(sys_ctrler) {
257#ifdef CONFIG_ADB_PMU
258 case SYS_CTRLER_PMU:
259 pmu_shutdown();
260 break;
261#endif
262#ifdef CONFIG_PMAC_SMU
263 case SYS_CTRLER_SMU:
264 smu_shutdown();
265 break;
266#endif
267 default:
268 ;
269 }
270}
271
272static void __pmac pmac_halt(void)
273{
274 pmac_power_off();
275}
276
277#ifdef CONFIG_BOOTX_TEXT
278static void btext_putc(unsigned char c)
279{
280 btext_drawchar(c);
281}
282
283static void __init init_boot_display(void)
284{
285 char *name;
286 struct device_node *np = NULL;
287 int rc = -ENODEV;
288
289 printk("trying to initialize btext ...\n");
290
291 name = (char *)get_property(of_chosen, "linux,stdout-path", NULL);
292 if (name != NULL) {
293 np = of_find_node_by_path(name);
294 if (np != NULL) {
295 if (strcmp(np->type, "display") != 0) {
296 printk("boot stdout isn't a display !\n");
297 of_node_put(np);
298 np = NULL;
299 }
300 }
301 }
302 if (np)
303 rc = btext_initialize(np);
304 if (rc == 0)
305 return;
306
307 for (np = NULL; (np = of_find_node_by_type(np, "display"));) {
308 if (get_property(np, "linux,opened", NULL)) {
309 printk("trying %s ...\n", np->full_name);
310 rc = btext_initialize(np);
311 printk("result: %d\n", rc);
312 }
313 if (rc == 0)
314 return;
315 }
316}
317#endif /* CONFIG_BOOTX_TEXT */
318
319/*
320 * Early initialization.
321 */
322static void __init pmac_init_early(void)
323{
324 DBG(" -> pmac_init_early\n");
325
326 /* Initialize hash table, from now on, we can take hash faults
327 * and call ioremap
328 */
329 hpte_init_native();
330
331 /* Init SCC */
332 if (strstr(cmd_line, "sccdbg")) {
333 sccdbg = 1;
334 udbg_init_scc(NULL);
335 }
336#ifdef CONFIG_BOOTX_TEXT
337 else {
338 init_boot_display();
339
340 udbg_putc = btext_putc;
341 }
342#endif /* CONFIG_BOOTX_TEXT */
343
344 /* Setup interrupt mapping options */
345 ppc64_interrupt_controller = IC_OPEN_PIC;
346
347 iommu_init_early_u3();
348
349 DBG(" <- pmac_init_early\n");
350}
351
352static int pmac_u3_cascade(struct pt_regs *regs, void *data)
353{
354 return mpic_get_one_irq((struct mpic *)data, regs);
355}
356
357static __init void pmac_init_IRQ(void)
358{
359 struct device_node *irqctrler = NULL;
360 struct device_node *irqctrler2 = NULL;
361 struct device_node *np = NULL;
362 struct mpic *mpic1, *mpic2;
363
364 /* We first try to detect Apple's new Core99 chipset, since mac-io
365 * is quite different on those machines and contains an IBM MPIC2.
366 */
367 while ((np = of_find_node_by_type(np, "open-pic")) != NULL) {
368 struct device_node *parent = of_get_parent(np);
369 if (parent && !strcmp(parent->name, "u3"))
370 irqctrler2 = of_node_get(np);
371 else
372 irqctrler = of_node_get(np);
373 of_node_put(parent);
374 }
375 if (irqctrler != NULL && irqctrler->n_addrs > 0) {
376 unsigned char senses[128];
377
378 printk(KERN_INFO "PowerMac using OpenPIC irq controller at 0x%08x\n",
379 (unsigned int)irqctrler->addrs[0].address);
380
381 prom_get_irq_senses(senses, 0, 128);
382 mpic1 = mpic_alloc(irqctrler->addrs[0].address,
383 MPIC_PRIMARY | MPIC_WANTS_RESET,
384 0, 0, 128, 256, senses, 128, " K2-MPIC ");
385 BUG_ON(mpic1 == NULL);
386 mpic_init(mpic1);
387
388 if (irqctrler2 != NULL && irqctrler2->n_intrs > 0 &&
389 irqctrler2->n_addrs > 0) {
390 printk(KERN_INFO "Slave OpenPIC at 0x%08x hooked on IRQ %d\n",
391 (u32)irqctrler2->addrs[0].address,
392 irqctrler2->intrs[0].line);
393
394 pmac_call_feature(PMAC_FTR_ENABLE_MPIC, irqctrler2, 0, 0);
395 prom_get_irq_senses(senses, 128, 128 + 128);
396
397 /* We don't need to set MPIC_BROKEN_U3 here since we don't have
398 * hypertransport interrupts routed to it
399 */
400 mpic2 = mpic_alloc(irqctrler2->addrs[0].address,
401 MPIC_BIG_ENDIAN | MPIC_WANTS_RESET,
402 0, 128, 128, 0, senses, 128, " U3-MPIC ");
403 BUG_ON(mpic2 == NULL);
404 mpic_init(mpic2);
405 mpic_setup_cascade(irqctrler2->intrs[0].line,
406 pmac_u3_cascade, mpic2);
407 }
408 }
409 of_node_put(irqctrler);
410 of_node_put(irqctrler2);
411}
412
413static void __init pmac_progress(char *s, unsigned short hex)
414{
415 if (sccdbg) {
416 udbg_puts(s);
417 udbg_puts("\n");
418 }
419#ifdef CONFIG_BOOTX_TEXT
420 else if (boot_text_mapped) {
421 btext_drawstring(s);
422 btext_drawstring("\n");
423 }
424#endif /* CONFIG_BOOTX_TEXT */
425}
426
427/*
428 * pmac has no legacy IO, anything calling this function has to
429 * fail or bad things will happen
430 */
431static int pmac_check_legacy_ioport(unsigned int baseport)
432{
433 return -ENODEV;
434}
435
436static int __init pmac_declare_of_platform_devices(void)
437{
438 struct device_node *np, *npp;
439
440 npp = of_find_node_by_name(NULL, "u3");
441 if (npp) {
442 for (np = NULL; (np = of_get_next_child(npp, np)) != NULL;) {
443 if (strncmp(np->name, "i2c", 3) == 0) {
444 of_platform_device_create(np, "u3-i2c", NULL);
445 of_node_put(np);
446 break;
447 }
448 }
449 of_node_put(npp);
450 }
451 npp = of_find_node_by_type(NULL, "smu");
452 if (npp) {
453 of_platform_device_create(npp, "smu", NULL);
454 of_node_put(npp);
455 }
456
457 return 0;
458}
459
460device_initcall(pmac_declare_of_platform_devices);
461
462/*
463 * Called very early, MMU is off, device-tree isn't unflattened
464 */
465static int __init pmac_probe(int platform)
466{
467 if (platform != PLATFORM_POWERMAC)
468 return 0;
469 /*
470 * On U3, the DART (iommu) must be allocated now since it
471 * has an impact on htab_initialize (due to the large page it
472 * occupies having to be broken up so the DART itself is not
473 * part of the cacheable linar mapping
474 */
475 alloc_u3_dart_table();
476
477#ifdef CONFIG_PMAC_SMU
478 /*
479 * SMU based G5s need some memory below 2Gb, at least the current
480 * driver needs that. We have to allocate it now. We allocate 4k
481 * (1 small page) for now.
482 */
483 smu_cmdbuf_abs = lmb_alloc_base(4096, 4096, 0x80000000UL);
484#endif /* CONFIG_PMAC_SMU */
485
486 return 1;
487}
488
489static int pmac_probe_mode(struct pci_bus *bus)
490{
491 struct device_node *node = bus->sysdata;
492
493 /* We need to use normal PCI probing for the AGP bus,
494 since the device for the AGP bridge isn't in the tree. */
495 if (bus->self == NULL && device_is_compatible(node, "u3-agp"))
496 return PCI_PROBE_NORMAL;
497
498 return PCI_PROBE_DEVTREE;
499}
500
501struct machdep_calls __initdata pmac_md = {
502#ifdef CONFIG_HOTPLUG_CPU
503 .cpu_die = generic_mach_cpu_die,
504#endif
505 .probe = pmac_probe,
506 .setup_arch = pmac_setup_arch,
507 .init_early = pmac_init_early,
508 .get_cpuinfo = pmac_show_cpuinfo,
509 .init_IRQ = pmac_init_IRQ,
510 .get_irq = mpic_get_irq,
511 .pcibios_fixup = pmac_pcibios_fixup,
512 .pci_probe_mode = pmac_probe_mode,
513 .restart = pmac_restart,
514 .power_off = pmac_power_off,
515 .halt = pmac_halt,
516 .get_boot_time = pmac_get_boot_time,
517 .set_rtc_time = pmac_set_rtc_time,
518 .get_rtc_time = pmac_get_rtc_time,
519 .calibrate_decr = pmac_calibrate_decr,
520 .feature_call = pmac_do_feature_call,
521 .progress = pmac_progress,
522 .check_legacy_ioport = pmac_check_legacy_ioport,
523 .idle_loop = native_idle,
524 .enable_pmcs = power4_enable_pmcs,
525};
diff --git a/arch/ppc64/kernel/pmac_smp.c b/arch/ppc64/kernel/pmac_smp.c
deleted file mode 100644
index a23de37227bf..000000000000
--- a/arch/ppc64/kernel/pmac_smp.c
+++ /dev/null
@@ -1,330 +0,0 @@
1/*
2 * SMP support for power macintosh.
3 *
4 * We support both the old "powersurge" SMP architecture
5 * and the current Core99 (G4 PowerMac) machines.
6 *
7 * Note that we don't support the very first rev. of
8 * Apple/DayStar 2 CPUs board, the one with the funky
9 * watchdog. Hopefully, none of these should be there except
10 * maybe internally to Apple. I should probably still add some
11 * code to detect this card though and disable SMP. --BenH.
12 *
13 * Support Macintosh G4 SMP by Troy Benjegerdes (hozer@drgw.net)
14 * and Ben Herrenschmidt <benh@kernel.crashing.org>.
15 *
16 * Support for DayStar quad CPU cards
17 * Copyright (C) XLR8, Inc. 1994-2000
18 *
19 * This program is free software; you can redistribute it and/or
20 * modify it under the terms of the GNU General Public License
21 * as published by the Free Software Foundation; either version
22 * 2 of the License, or (at your option) any later version.
23 */
24
25#undef DEBUG
26
27#include <linux/config.h>
28#include <linux/kernel.h>
29#include <linux/sched.h>
30#include <linux/smp.h>
31#include <linux/smp_lock.h>
32#include <linux/interrupt.h>
33#include <linux/kernel_stat.h>
34#include <linux/init.h>
35#include <linux/spinlock.h>
36#include <linux/errno.h>
37#include <linux/irq.h>
38
39#include <asm/ptrace.h>
40#include <asm/atomic.h>
41#include <asm/irq.h>
42#include <asm/page.h>
43#include <asm/pgtable.h>
44#include <asm/sections.h>
45#include <asm/io.h>
46#include <asm/prom.h>
47#include <asm/smp.h>
48#include <asm/machdep.h>
49#include <asm/pmac_feature.h>
50#include <asm/time.h>
51#include <asm/cacheflush.h>
52#include <asm/keylargo.h>
53#include <asm/pmac_low_i2c.h>
54
55#include "mpic.h"
56
57#ifdef DEBUG
58#define DBG(fmt...) udbg_printf(fmt)
59#else
60#define DBG(fmt...)
61#endif
62
63extern void pmac_secondary_start_1(void);
64extern void pmac_secondary_start_2(void);
65extern void pmac_secondary_start_3(void);
66
67extern struct smp_ops_t *smp_ops;
68
69static void (*pmac_tb_freeze)(int freeze);
70static struct device_node *pmac_tb_clock_chip_host;
71static u8 pmac_tb_pulsar_addr;
72static DEFINE_SPINLOCK(timebase_lock);
73static unsigned long timebase;
74
75static void smp_core99_cypress_tb_freeze(int freeze)
76{
77 u8 data;
78 int rc;
79
80 /* Strangely, the device-tree says address is 0xd2, but darwin
81 * accesses 0xd0 ...
82 */
83 pmac_low_i2c_setmode(pmac_tb_clock_chip_host, pmac_low_i2c_mode_combined);
84 rc = pmac_low_i2c_xfer(pmac_tb_clock_chip_host,
85 0xd0 | pmac_low_i2c_read,
86 0x81, &data, 1);
87 if (rc != 0)
88 goto bail;
89
90 data = (data & 0xf3) | (freeze ? 0x00 : 0x0c);
91
92 pmac_low_i2c_setmode(pmac_tb_clock_chip_host, pmac_low_i2c_mode_stdsub);
93 rc = pmac_low_i2c_xfer(pmac_tb_clock_chip_host,
94 0xd0 | pmac_low_i2c_write,
95 0x81, &data, 1);
96
97 bail:
98 if (rc != 0) {
99 printk("Cypress Timebase %s rc: %d\n",
100 freeze ? "freeze" : "unfreeze", rc);
101 panic("Timebase freeze failed !\n");
102 }
103}
104
105static void smp_core99_pulsar_tb_freeze(int freeze)
106{
107 u8 data;
108 int rc;
109
110 pmac_low_i2c_setmode(pmac_tb_clock_chip_host, pmac_low_i2c_mode_combined);
111 rc = pmac_low_i2c_xfer(pmac_tb_clock_chip_host,
112 pmac_tb_pulsar_addr | pmac_low_i2c_read,
113 0x2e, &data, 1);
114 if (rc != 0)
115 goto bail;
116
117 data = (data & 0x88) | (freeze ? 0x11 : 0x22);
118
119 pmac_low_i2c_setmode(pmac_tb_clock_chip_host, pmac_low_i2c_mode_stdsub);
120 rc = pmac_low_i2c_xfer(pmac_tb_clock_chip_host,
121 pmac_tb_pulsar_addr | pmac_low_i2c_write,
122 0x2e, &data, 1);
123 bail:
124 if (rc != 0) {
125 printk(KERN_ERR "Pulsar Timebase %s rc: %d\n",
126 freeze ? "freeze" : "unfreeze", rc);
127 panic("Timebase freeze failed !\n");
128 }
129}
130
131
132static void smp_core99_give_timebase(void)
133{
134 /* Open i2c bus for synchronous access */
135 if (pmac_low_i2c_open(pmac_tb_clock_chip_host, 0))
136 panic("Can't open i2c for TB sync !\n");
137
138 spin_lock(&timebase_lock);
139 (*pmac_tb_freeze)(1);
140 mb();
141 timebase = get_tb();
142 spin_unlock(&timebase_lock);
143
144 while (timebase)
145 barrier();
146
147 spin_lock(&timebase_lock);
148 (*pmac_tb_freeze)(0);
149 spin_unlock(&timebase_lock);
150
151 /* Close i2c bus */
152 pmac_low_i2c_close(pmac_tb_clock_chip_host);
153}
154
155
156static void __devinit smp_core99_take_timebase(void)
157{
158 while (!timebase)
159 barrier();
160 spin_lock(&timebase_lock);
161 set_tb(timebase >> 32, timebase & 0xffffffff);
162 timebase = 0;
163 spin_unlock(&timebase_lock);
164}
165
166
167static int __init smp_core99_probe(void)
168{
169 struct device_node *cpus;
170 struct device_node *cc;
171 int ncpus = 0;
172
173 /* Maybe use systemconfiguration here ? */
174 if (ppc_md.progress) ppc_md.progress("smp_core99_probe", 0x345);
175
176 /* Count CPUs in the device-tree */
177 for (cpus = NULL; (cpus = of_find_node_by_type(cpus, "cpu")) != NULL;)
178 ++ncpus;
179
180 printk(KERN_INFO "PowerMac SMP probe found %d cpus\n", ncpus);
181
182 /* Nothing more to do if less than 2 of them */
183 if (ncpus <= 1)
184 return 1;
185
186 /* HW sync only on these platforms */
187 if (!machine_is_compatible("PowerMac7,2") &&
188 !machine_is_compatible("PowerMac7,3") &&
189 !machine_is_compatible("RackMac3,1"))
190 goto nohwsync;
191
192 /* Look for the clock chip */
193 for (cc = NULL; (cc = of_find_node_by_name(cc, "i2c-hwclock")) != NULL;) {
194 struct device_node *p = of_get_parent(cc);
195 u32 *reg;
196 int ok;
197 ok = p && device_is_compatible(p, "uni-n-i2c");
198 if (!ok)
199 goto next;
200 reg = (u32 *)get_property(cc, "reg", NULL);
201 if (reg == NULL)
202 goto next;
203 switch (*reg) {
204 case 0xd2:
205 if (device_is_compatible(cc, "pulsar-legacy-slewing")) {
206 pmac_tb_freeze = smp_core99_pulsar_tb_freeze;
207 pmac_tb_pulsar_addr = 0xd2;
208 printk(KERN_INFO "Timebase clock is Pulsar chip\n");
209 } else if (device_is_compatible(cc, "cy28508")) {
210 pmac_tb_freeze = smp_core99_cypress_tb_freeze;
211 printk(KERN_INFO "Timebase clock is Cypress chip\n");
212 }
213 break;
214 case 0xd4:
215 pmac_tb_freeze = smp_core99_pulsar_tb_freeze;
216 pmac_tb_pulsar_addr = 0xd4;
217 printk(KERN_INFO "Timebase clock is Pulsar chip\n");
218 break;
219 }
220 if (pmac_tb_freeze != NULL) {
221 pmac_tb_clock_chip_host = p;
222 smp_ops->give_timebase = smp_core99_give_timebase;
223 smp_ops->take_timebase = smp_core99_take_timebase;
224 of_node_put(cc);
225 of_node_put(p);
226 break;
227 }
228 next:
229 of_node_put(p);
230 }
231
232 nohwsync:
233 mpic_request_ipis();
234
235 return ncpus;
236}
237
238static void __init smp_core99_kick_cpu(int nr)
239{
240 int save_vector, j;
241 unsigned long new_vector;
242 unsigned long flags;
243 volatile unsigned int *vector
244 = ((volatile unsigned int *)(KERNELBASE+0x100));
245
246 if (nr < 1 || nr > 3)
247 return;
248 if (ppc_md.progress) ppc_md.progress("smp_core99_kick_cpu", 0x346);
249
250 local_irq_save(flags);
251 local_irq_disable();
252
253 /* Save reset vector */
254 save_vector = *vector;
255
256 /* Setup fake reset vector that does
257 * b .pmac_secondary_start - KERNELBASE
258 */
259 switch(nr) {
260 case 1:
261 new_vector = (unsigned long)pmac_secondary_start_1;
262 break;
263 case 2:
264 new_vector = (unsigned long)pmac_secondary_start_2;
265 break;
266 case 3:
267 default:
268 new_vector = (unsigned long)pmac_secondary_start_3;
269 break;
270 }
271 *vector = 0x48000002 + (new_vector - KERNELBASE);
272
273 /* flush data cache and inval instruction cache */
274 flush_icache_range((unsigned long) vector, (unsigned long) vector + 4);
275
276 /* Put some life in our friend */
277 pmac_call_feature(PMAC_FTR_RESET_CPU, NULL, nr, 0);
278 paca[nr].cpu_start = 1;
279
280 /* FIXME: We wait a bit for the CPU to take the exception, I should
281 * instead wait for the entry code to set something for me. Well,
282 * ideally, all that crap will be done in prom.c and the CPU left
283 * in a RAM-based wait loop like CHRP.
284 */
285 for (j = 1; j < 1000000; j++)
286 mb();
287
288 /* Restore our exception vector */
289 *vector = save_vector;
290 flush_icache_range((unsigned long) vector, (unsigned long) vector + 4);
291
292 local_irq_restore(flags);
293 if (ppc_md.progress) ppc_md.progress("smp_core99_kick_cpu done", 0x347);
294}
295
296static void __init smp_core99_setup_cpu(int cpu_nr)
297{
298 /* Setup MPIC */
299 mpic_setup_this_cpu();
300
301 if (cpu_nr == 0) {
302 extern void g5_phy_disable_cpu1(void);
303
304 /* If we didn't start the second CPU, we must take
305 * it off the bus
306 */
307 if (num_online_cpus() < 2)
308 g5_phy_disable_cpu1();
309 if (ppc_md.progress) ppc_md.progress("smp_core99_setup_cpu 0 done", 0x349);
310 }
311}
312
313struct smp_ops_t core99_smp_ops __pmacdata = {
314 .message_pass = smp_mpic_message_pass,
315 .probe = smp_core99_probe,
316 .kick_cpu = smp_core99_kick_cpu,
317 .setup_cpu = smp_core99_setup_cpu,
318 .give_timebase = smp_generic_give_timebase,
319 .take_timebase = smp_generic_take_timebase,
320};
321
322void __init pmac_setup_smp(void)
323{
324 smp_ops = &core99_smp_ops;
325#ifdef CONFIG_HOTPLUG_CPU
326 smp_ops->cpu_enable = generic_cpu_enable;
327 smp_ops->cpu_disable = generic_cpu_disable;
328 smp_ops->cpu_die = generic_cpu_die;
329#endif
330}
diff --git a/arch/ppc64/kernel/pmac_time.c b/arch/ppc64/kernel/pmac_time.c
deleted file mode 100644
index 41bbb8c59697..000000000000
--- a/arch/ppc64/kernel/pmac_time.c
+++ /dev/null
@@ -1,195 +0,0 @@
1/*
2 * Support for periodic interrupts (100 per second) and for getting
3 * the current time from the RTC on Power Macintoshes.
4 *
5 * We use the decrementer register for our periodic interrupts.
6 *
7 * Paul Mackerras August 1996.
8 * Copyright (C) 1996 Paul Mackerras.
9 * Copyright (C) 2003-2005 Benjamin Herrenschmidt.
10 *
11 */
12#include <linux/config.h>
13#include <linux/errno.h>
14#include <linux/sched.h>
15#include <linux/kernel.h>
16#include <linux/param.h>
17#include <linux/string.h>
18#include <linux/mm.h>
19#include <linux/init.h>
20#include <linux/time.h>
21#include <linux/adb.h>
22#include <linux/pmu.h>
23#include <linux/interrupt.h>
24
25#include <asm/sections.h>
26#include <asm/prom.h>
27#include <asm/system.h>
28#include <asm/io.h>
29#include <asm/pgtable.h>
30#include <asm/machdep.h>
31#include <asm/time.h>
32#include <asm/nvram.h>
33#include <asm/smu.h>
34
35#undef DEBUG
36
37#ifdef DEBUG
38#define DBG(x...) printk(x)
39#else
40#define DBG(x...)
41#endif
42
43/* Apparently the RTC stores seconds since 1 Jan 1904 */
44#define RTC_OFFSET 2082844800
45
46/*
47 * Calibrate the decrementer frequency with the VIA timer 1.
48 */
49#define VIA_TIMER_FREQ_6 4700000 /* time 1 frequency * 6 */
50
51extern struct timezone sys_tz;
52extern void to_tm(int tim, struct rtc_time * tm);
53
54void __pmac pmac_get_rtc_time(struct rtc_time *tm)
55{
56 switch(sys_ctrler) {
57#ifdef CONFIG_ADB_PMU
58 case SYS_CTRLER_PMU: {
59 /* TODO: Move that to a function in the PMU driver */
60 struct adb_request req;
61 unsigned int now;
62
63 if (pmu_request(&req, NULL, 1, PMU_READ_RTC) < 0)
64 return;
65 pmu_wait_complete(&req);
66 if (req.reply_len != 4)
67 printk(KERN_ERR "pmac_get_rtc_time: PMU returned a %d"
68 " bytes reply\n", req.reply_len);
69 now = (req.reply[0] << 24) + (req.reply[1] << 16)
70 + (req.reply[2] << 8) + req.reply[3];
71 DBG("get: %u -> %u\n", (int)now, (int)(now - RTC_OFFSET));
72 now -= RTC_OFFSET;
73
74 to_tm(now, tm);
75 tm->tm_year -= 1900;
76 tm->tm_mon -= 1;
77
78 DBG("-> tm_mday: %d, tm_mon: %d, tm_year: %d, %d:%02d:%02d\n",
79 tm->tm_mday, tm->tm_mon, tm->tm_year,
80 tm->tm_hour, tm->tm_min, tm->tm_sec);
81 break;
82 }
83#endif /* CONFIG_ADB_PMU */
84
85#ifdef CONFIG_PMAC_SMU
86 case SYS_CTRLER_SMU:
87 smu_get_rtc_time(tm, 1);
88 break;
89#endif /* CONFIG_PMAC_SMU */
90 default:
91 ;
92 }
93}
94
95int __pmac pmac_set_rtc_time(struct rtc_time *tm)
96{
97 switch(sys_ctrler) {
98#ifdef CONFIG_ADB_PMU
99 case SYS_CTRLER_PMU: {
100 /* TODO: Move that to a function in the PMU driver */
101 struct adb_request req;
102 unsigned int nowtime;
103
104 DBG("set: tm_mday: %d, tm_mon: %d, tm_year: %d,"
105 " %d:%02d:%02d\n",
106 tm->tm_mday, tm->tm_mon, tm->tm_year,
107 tm->tm_hour, tm->tm_min, tm->tm_sec);
108
109 nowtime = mktime(tm->tm_year + 1900, tm->tm_mon + 1,
110 tm->tm_mday, tm->tm_hour, tm->tm_min,
111 tm->tm_sec);
112
113 DBG("-> %u -> %u\n", (int)nowtime,
114 (int)(nowtime + RTC_OFFSET));
115 nowtime += RTC_OFFSET;
116
117 if (pmu_request(&req, NULL, 5, PMU_SET_RTC,
118 nowtime >> 24, nowtime >> 16,
119 nowtime >> 8, nowtime) < 0)
120 return -ENXIO;
121 pmu_wait_complete(&req);
122 if (req.reply_len != 0)
123 printk(KERN_ERR "pmac_set_rtc_time: PMU returned a %d"
124 " bytes reply\n", req.reply_len);
125 return 0;
126 }
127#endif /* CONFIG_ADB_PMU */
128
129#ifdef CONFIG_PMAC_SMU
130 case SYS_CTRLER_SMU:
131 return smu_set_rtc_time(tm, 1);
132#endif /* CONFIG_PMAC_SMU */
133 default:
134 return -ENODEV;
135 }
136}
137
138void __init pmac_get_boot_time(struct rtc_time *tm)
139{
140 pmac_get_rtc_time(tm);
141
142#ifdef disabled__CONFIG_NVRAM
143 s32 delta = 0;
144 int dst;
145
146 delta = ((s32)pmac_xpram_read(PMAC_XPRAM_MACHINE_LOC + 0x9)) << 16;
147 delta |= ((s32)pmac_xpram_read(PMAC_XPRAM_MACHINE_LOC + 0xa)) << 8;
148 delta |= pmac_xpram_read(PMAC_XPRAM_MACHINE_LOC + 0xb);
149 if (delta & 0x00800000UL)
150 delta |= 0xFF000000UL;
151 dst = ((pmac_xpram_read(PMAC_XPRAM_MACHINE_LOC + 0x8) & 0x80) != 0);
152 printk("GMT Delta read from XPRAM: %d minutes, DST: %s\n", delta/60,
153 dst ? "on" : "off");
154#endif
155}
156
157/*
158 * Query the OF and get the decr frequency.
159 * FIXME: merge this with generic_calibrate_decr
160 */
161void __init pmac_calibrate_decr(void)
162{
163 struct device_node *cpu;
164 unsigned int freq, *fp;
165 struct div_result divres;
166
167 /*
168 * The cpu node should have a timebase-frequency property
169 * to tell us the rate at which the decrementer counts.
170 */
171 cpu = find_type_devices("cpu");
172 if (cpu == 0)
173 panic("can't find cpu node in time_init");
174 fp = (unsigned int *) get_property(cpu, "timebase-frequency", NULL);
175 if (fp == 0)
176 panic("can't get cpu timebase frequency");
177 freq = *fp;
178 printk("time_init: decrementer frequency = %u.%.6u MHz\n",
179 freq/1000000, freq%1000000);
180 tb_ticks_per_jiffy = freq / HZ;
181 tb_ticks_per_sec = tb_ticks_per_jiffy * HZ;
182 tb_ticks_per_usec = freq / 1000000;
183 tb_to_us = mulhwu_scale_factor(freq, 1000000);
184 div128_by_32( 1024*1024, 0, tb_ticks_per_sec, &divres );
185 tb_to_xs = divres.result_low;
186 ppc_tb_freq = freq;
187
188 fp = (unsigned int *)get_property(cpu, "clock-frequency", NULL);
189 if (fp == 0)
190 panic("can't get cpu processor frequency");
191 ppc_proc_freq = *fp;
192
193 setup_default_decr();
194}
195
diff --git a/arch/ppc64/kernel/pmc.c b/arch/ppc64/kernel/pmc.c
deleted file mode 100644
index 63d9481c3ec2..000000000000
--- a/arch/ppc64/kernel/pmc.c
+++ /dev/null
@@ -1,88 +0,0 @@
1/*
2 * linux/arch/ppc64/kernel/pmc.c
3 *
4 * Copyright (C) 2004 David Gibson, IBM Corporation.
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11
12#include <linux/config.h>
13#include <linux/errno.h>
14#include <linux/spinlock.h>
15#include <linux/module.h>
16
17#include <asm/processor.h>
18#include <asm/pmc.h>
19
20/* Ensure exceptions are disabled */
21static void dummy_perf(struct pt_regs *regs)
22{
23 unsigned int mmcr0 = mfspr(SPRN_MMCR0);
24
25 mmcr0 &= ~(MMCR0_PMXE|MMCR0_PMAO);
26 mtspr(SPRN_MMCR0, mmcr0);
27}
28
29static DEFINE_SPINLOCK(pmc_owner_lock);
30static void *pmc_owner_caller; /* mostly for debugging */
31perf_irq_t perf_irq = dummy_perf;
32
33int reserve_pmc_hardware(perf_irq_t new_perf_irq)
34{
35 int err = 0;
36
37 spin_lock(&pmc_owner_lock);
38
39 if (pmc_owner_caller) {
40 printk(KERN_WARNING "reserve_pmc_hardware: "
41 "PMC hardware busy (reserved by caller %p)\n",
42 pmc_owner_caller);
43 err = -EBUSY;
44 goto out;
45 }
46
47 pmc_owner_caller = __builtin_return_address(0);
48 perf_irq = new_perf_irq ? : dummy_perf;
49
50 out:
51 spin_unlock(&pmc_owner_lock);
52 return err;
53}
54EXPORT_SYMBOL_GPL(reserve_pmc_hardware);
55
56void release_pmc_hardware(void)
57{
58 spin_lock(&pmc_owner_lock);
59
60 WARN_ON(! pmc_owner_caller);
61
62 pmc_owner_caller = NULL;
63 perf_irq = dummy_perf;
64
65 spin_unlock(&pmc_owner_lock);
66}
67EXPORT_SYMBOL_GPL(release_pmc_hardware);
68
69void power4_enable_pmcs(void)
70{
71 unsigned long hid0;
72
73 hid0 = mfspr(HID0);
74 hid0 |= 1UL << (63 - 20);
75
76 /* POWER4 requires the following sequence */
77 asm volatile(
78 "sync\n"
79 "mtspr %1, %0\n"
80 "mfspr %0, %1\n"
81 "mfspr %0, %1\n"
82 "mfspr %0, %1\n"
83 "mfspr %0, %1\n"
84 "mfspr %0, %1\n"
85 "mfspr %0, %1\n"
86 "isync" : "=&r" (hid0) : "i" (HID0), "0" (hid0):
87 "memory");
88}
diff --git a/arch/ppc64/kernel/ppc_ksyms.c b/arch/ppc64/kernel/ppc_ksyms.c
index 705742f4eec6..84006e26342c 100644
--- a/arch/ppc64/kernel/ppc_ksyms.c
+++ b/arch/ppc64/kernel/ppc_ksyms.c
@@ -19,7 +19,6 @@
19#include <asm/hw_irq.h> 19#include <asm/hw_irq.h>
20#include <asm/abs_addr.h> 20#include <asm/abs_addr.h>
21#include <asm/cacheflush.h> 21#include <asm/cacheflush.h>
22#include <asm/iSeries/HvCallSc.h>
23 22
24EXPORT_SYMBOL(strcpy); 23EXPORT_SYMBOL(strcpy);
25EXPORT_SYMBOL(strncpy); 24EXPORT_SYMBOL(strncpy);
@@ -46,17 +45,6 @@ EXPORT_SYMBOL(__strnlen_user);
46 45
47EXPORT_SYMBOL(reloc_offset); 46EXPORT_SYMBOL(reloc_offset);
48 47
49#ifdef CONFIG_PPC_ISERIES
50EXPORT_SYMBOL(HvCall0);
51EXPORT_SYMBOL(HvCall1);
52EXPORT_SYMBOL(HvCall2);
53EXPORT_SYMBOL(HvCall3);
54EXPORT_SYMBOL(HvCall4);
55EXPORT_SYMBOL(HvCall5);
56EXPORT_SYMBOL(HvCall6);
57EXPORT_SYMBOL(HvCall7);
58#endif
59
60EXPORT_SYMBOL(_insb); 48EXPORT_SYMBOL(_insb);
61EXPORT_SYMBOL(_outsb); 49EXPORT_SYMBOL(_outsb);
62EXPORT_SYMBOL(_insw); 50EXPORT_SYMBOL(_insw);
@@ -77,14 +65,6 @@ EXPORT_SYMBOL(giveup_altivec);
77EXPORT_SYMBOL(__flush_icache_range); 65EXPORT_SYMBOL(__flush_icache_range);
78EXPORT_SYMBOL(flush_dcache_range); 66EXPORT_SYMBOL(flush_dcache_range);
79 67
80#ifdef CONFIG_SMP
81#ifdef CONFIG_PPC_ISERIES
82EXPORT_SYMBOL(local_get_flags);
83EXPORT_SYMBOL(local_irq_disable);
84EXPORT_SYMBOL(local_irq_restore);
85#endif
86#endif
87
88EXPORT_SYMBOL(memcpy); 68EXPORT_SYMBOL(memcpy);
89EXPORT_SYMBOL(memset); 69EXPORT_SYMBOL(memset);
90EXPORT_SYMBOL(memmove); 70EXPORT_SYMBOL(memmove);
diff --git a/arch/ppc64/kernel/process.c b/arch/ppc64/kernel/process.c
deleted file mode 100644
index 887005358eb1..000000000000
--- a/arch/ppc64/kernel/process.c
+++ /dev/null
@@ -1,713 +0,0 @@
1/*
2 * linux/arch/ppc64/kernel/process.c
3 *
4 * Derived from "arch/i386/kernel/process.c"
5 * Copyright (C) 1995 Linus Torvalds
6 *
7 * Updated and modified by Cort Dougan (cort@cs.nmt.edu) and
8 * Paul Mackerras (paulus@cs.anu.edu.au)
9 *
10 * PowerPC version
11 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
12 *
13 * This program is free software; you can redistribute it and/or
14 * modify it under the terms of the GNU General Public License
15 * as published by the Free Software Foundation; either version
16 * 2 of the License, or (at your option) any later version.
17 */
18
19#include <linux/config.h>
20#include <linux/module.h>
21#include <linux/errno.h>
22#include <linux/sched.h>
23#include <linux/kernel.h>
24#include <linux/mm.h>
25#include <linux/smp.h>
26#include <linux/smp_lock.h>
27#include <linux/stddef.h>
28#include <linux/unistd.h>
29#include <linux/slab.h>
30#include <linux/user.h>
31#include <linux/elf.h>
32#include <linux/init.h>
33#include <linux/init_task.h>
34#include <linux/prctl.h>
35#include <linux/ptrace.h>
36#include <linux/kallsyms.h>
37#include <linux/interrupt.h>
38#include <linux/utsname.h>
39#include <linux/kprobes.h>
40
41#include <asm/pgtable.h>
42#include <asm/uaccess.h>
43#include <asm/system.h>
44#include <asm/io.h>
45#include <asm/processor.h>
46#include <asm/mmu.h>
47#include <asm/mmu_context.h>
48#include <asm/prom.h>
49#include <asm/ppcdebug.h>
50#include <asm/machdep.h>
51#include <asm/iSeries/HvCallHpt.h>
52#include <asm/cputable.h>
53#include <asm/firmware.h>
54#include <asm/sections.h>
55#include <asm/tlbflush.h>
56#include <asm/time.h>
57#include <asm/plpar_wrappers.h>
58
59#ifndef CONFIG_SMP
60struct task_struct *last_task_used_math = NULL;
61struct task_struct *last_task_used_altivec = NULL;
62#endif
63
64/*
65 * Make sure the floating-point register state in the
66 * the thread_struct is up to date for task tsk.
67 */
68void flush_fp_to_thread(struct task_struct *tsk)
69{
70 if (tsk->thread.regs) {
71 /*
72 * We need to disable preemption here because if we didn't,
73 * another process could get scheduled after the regs->msr
74 * test but before we have finished saving the FP registers
75 * to the thread_struct. That process could take over the
76 * FPU, and then when we get scheduled again we would store
77 * bogus values for the remaining FP registers.
78 */
79 preempt_disable();
80 if (tsk->thread.regs->msr & MSR_FP) {
81#ifdef CONFIG_SMP
82 /*
83 * This should only ever be called for current or
84 * for a stopped child process. Since we save away
85 * the FP register state on context switch on SMP,
86 * there is something wrong if a stopped child appears
87 * to still have its FP state in the CPU registers.
88 */
89 BUG_ON(tsk != current);
90#endif
91 giveup_fpu(current);
92 }
93 preempt_enable();
94 }
95}
96
97void enable_kernel_fp(void)
98{
99 WARN_ON(preemptible());
100
101#ifdef CONFIG_SMP
102 if (current->thread.regs && (current->thread.regs->msr & MSR_FP))
103 giveup_fpu(current);
104 else
105 giveup_fpu(NULL); /* just enables FP for kernel */
106#else
107 giveup_fpu(last_task_used_math);
108#endif /* CONFIG_SMP */
109}
110EXPORT_SYMBOL(enable_kernel_fp);
111
112int dump_task_fpu(struct task_struct *tsk, elf_fpregset_t *fpregs)
113{
114 if (!tsk->thread.regs)
115 return 0;
116 flush_fp_to_thread(current);
117
118 memcpy(fpregs, &tsk->thread.fpr[0], sizeof(*fpregs));
119
120 return 1;
121}
122
123#ifdef CONFIG_ALTIVEC
124
125void enable_kernel_altivec(void)
126{
127 WARN_ON(preemptible());
128
129#ifdef CONFIG_SMP
130 if (current->thread.regs && (current->thread.regs->msr & MSR_VEC))
131 giveup_altivec(current);
132 else
133 giveup_altivec(NULL); /* just enables FP for kernel */
134#else
135 giveup_altivec(last_task_used_altivec);
136#endif /* CONFIG_SMP */
137}
138EXPORT_SYMBOL(enable_kernel_altivec);
139
140/*
141 * Make sure the VMX/Altivec register state in the
142 * the thread_struct is up to date for task tsk.
143 */
144void flush_altivec_to_thread(struct task_struct *tsk)
145{
146 if (tsk->thread.regs) {
147 preempt_disable();
148 if (tsk->thread.regs->msr & MSR_VEC) {
149#ifdef CONFIG_SMP
150 BUG_ON(tsk != current);
151#endif
152 giveup_altivec(current);
153 }
154 preempt_enable();
155 }
156}
157
158int dump_task_altivec(struct pt_regs *regs, elf_vrregset_t *vrregs)
159{
160 flush_altivec_to_thread(current);
161 memcpy(vrregs, &current->thread.vr[0], sizeof(*vrregs));
162 return 1;
163}
164
165#endif /* CONFIG_ALTIVEC */
166
167static void set_dabr_spr(unsigned long val)
168{
169 mtspr(SPRN_DABR, val);
170}
171
172int set_dabr(unsigned long dabr)
173{
174 int ret = 0;
175
176 if (firmware_has_feature(FW_FEATURE_XDABR)) {
177 /* We want to catch accesses from kernel and userspace */
178 unsigned long flags = H_DABRX_KERNEL|H_DABRX_USER;
179 ret = plpar_set_xdabr(dabr, flags);
180 } else if (firmware_has_feature(FW_FEATURE_DABR)) {
181 ret = plpar_set_dabr(dabr);
182 } else {
183 set_dabr_spr(dabr);
184 }
185
186 return ret;
187}
188
189DEFINE_PER_CPU(struct cpu_usage, cpu_usage_array);
190static DEFINE_PER_CPU(unsigned long, current_dabr);
191
192struct task_struct *__switch_to(struct task_struct *prev,
193 struct task_struct *new)
194{
195 struct thread_struct *new_thread, *old_thread;
196 unsigned long flags;
197 struct task_struct *last;
198
199#ifdef CONFIG_SMP
200 /* avoid complexity of lazy save/restore of fpu
201 * by just saving it every time we switch out if
202 * this task used the fpu during the last quantum.
203 *
204 * If it tries to use the fpu again, it'll trap and
205 * reload its fp regs. So we don't have to do a restore
206 * every switch, just a save.
207 * -- Cort
208 */
209 if (prev->thread.regs && (prev->thread.regs->msr & MSR_FP))
210 giveup_fpu(prev);
211#ifdef CONFIG_ALTIVEC
212 if (prev->thread.regs && (prev->thread.regs->msr & MSR_VEC))
213 giveup_altivec(prev);
214#endif /* CONFIG_ALTIVEC */
215#endif /* CONFIG_SMP */
216
217#if defined(CONFIG_ALTIVEC) && !defined(CONFIG_SMP)
218 /* Avoid the trap. On smp this this never happens since
219 * we don't set last_task_used_altivec -- Cort
220 */
221 if (new->thread.regs && last_task_used_altivec == new)
222 new->thread.regs->msr |= MSR_VEC;
223#endif /* CONFIG_ALTIVEC */
224
225 if (unlikely(__get_cpu_var(current_dabr) != new->thread.dabr)) {
226 set_dabr(new->thread.dabr);
227 __get_cpu_var(current_dabr) = new->thread.dabr;
228 }
229
230 flush_tlb_pending();
231
232 new_thread = &new->thread;
233 old_thread = &current->thread;
234
235 /* Collect purr utilization data per process and per processor
236 * wise purr is nothing but processor time base
237 */
238 if (firmware_has_feature(FW_FEATURE_SPLPAR)) {
239 struct cpu_usage *cu = &__get_cpu_var(cpu_usage_array);
240 long unsigned start_tb, current_tb;
241 start_tb = old_thread->start_tb;
242 cu->current_tb = current_tb = mfspr(SPRN_PURR);
243 old_thread->accum_tb += (current_tb - start_tb);
244 new_thread->start_tb = current_tb;
245 }
246
247 local_irq_save(flags);
248 last = _switch(old_thread, new_thread);
249
250 local_irq_restore(flags);
251
252 return last;
253}
254
255static int instructions_to_print = 16;
256
257static void show_instructions(struct pt_regs *regs)
258{
259 int i;
260 unsigned long pc = regs->nip - (instructions_to_print * 3 / 4 *
261 sizeof(int));
262
263 printk("Instruction dump:");
264
265 for (i = 0; i < instructions_to_print; i++) {
266 int instr;
267
268 if (!(i % 8))
269 printk("\n");
270
271 if (((REGION_ID(pc) != KERNEL_REGION_ID) &&
272 (REGION_ID(pc) != VMALLOC_REGION_ID)) ||
273 __get_user(instr, (unsigned int *)pc)) {
274 printk("XXXXXXXX ");
275 } else {
276 if (regs->nip == pc)
277 printk("<%08x> ", instr);
278 else
279 printk("%08x ", instr);
280 }
281
282 pc += sizeof(int);
283 }
284
285 printk("\n");
286}
287
288void show_regs(struct pt_regs * regs)
289{
290 int i;
291 unsigned long trap;
292
293 printk("NIP: %016lX XER: %08X LR: %016lX CTR: %016lX\n",
294 regs->nip, (unsigned int)regs->xer, regs->link, regs->ctr);
295 printk("REGS: %p TRAP: %04lx %s (%s)\n",
296 regs, regs->trap, print_tainted(), system_utsname.release);
297 printk("MSR: %016lx EE: %01x PR: %01x FP: %01x ME: %01x "
298 "IR/DR: %01x%01x CR: %08X\n",
299 regs->msr, regs->msr&MSR_EE ? 1 : 0, regs->msr&MSR_PR ? 1 : 0,
300 regs->msr & MSR_FP ? 1 : 0,regs->msr&MSR_ME ? 1 : 0,
301 regs->msr&MSR_IR ? 1 : 0,
302 regs->msr&MSR_DR ? 1 : 0,
303 (unsigned int)regs->ccr);
304 trap = TRAP(regs);
305 printk("DAR: %016lx DSISR: %016lx\n", regs->dar, regs->dsisr);
306 printk("TASK: %p[%d] '%s' THREAD: %p",
307 current, current->pid, current->comm, current->thread_info);
308
309#ifdef CONFIG_SMP
310 printk(" CPU: %d", smp_processor_id());
311#endif /* CONFIG_SMP */
312
313 for (i = 0; i < 32; i++) {
314 if ((i % 4) == 0) {
315 printk("\n" KERN_INFO "GPR%02d: ", i);
316 }
317
318 printk("%016lX ", regs->gpr[i]);
319 if (i == 13 && !FULL_REGS(regs))
320 break;
321 }
322 printk("\n");
323 /*
324 * Lookup NIP late so we have the best change of getting the
325 * above info out without failing
326 */
327 printk("NIP [%016lx] ", regs->nip);
328 print_symbol("%s\n", regs->nip);
329 printk("LR [%016lx] ", regs->link);
330 print_symbol("%s\n", regs->link);
331 show_stack(current, (unsigned long *)regs->gpr[1]);
332 if (!user_mode(regs))
333 show_instructions(regs);
334}
335
336void exit_thread(void)
337{
338 kprobe_flush_task(current);
339
340#ifndef CONFIG_SMP
341 if (last_task_used_math == current)
342 last_task_used_math = NULL;
343#ifdef CONFIG_ALTIVEC
344 if (last_task_used_altivec == current)
345 last_task_used_altivec = NULL;
346#endif /* CONFIG_ALTIVEC */
347#endif /* CONFIG_SMP */
348}
349
350void flush_thread(void)
351{
352 struct thread_info *t = current_thread_info();
353
354 kprobe_flush_task(current);
355 if (t->flags & _TIF_ABI_PENDING)
356 t->flags ^= (_TIF_ABI_PENDING | _TIF_32BIT);
357
358#ifndef CONFIG_SMP
359 if (last_task_used_math == current)
360 last_task_used_math = NULL;
361#ifdef CONFIG_ALTIVEC
362 if (last_task_used_altivec == current)
363 last_task_used_altivec = NULL;
364#endif /* CONFIG_ALTIVEC */
365#endif /* CONFIG_SMP */
366
367 if (current->thread.dabr) {
368 current->thread.dabr = 0;
369 set_dabr(0);
370 }
371}
372
373void
374release_thread(struct task_struct *t)
375{
376}
377
378
379/*
380 * This gets called before we allocate a new thread and copy
381 * the current task into it.
382 */
383void prepare_to_copy(struct task_struct *tsk)
384{
385 flush_fp_to_thread(current);
386 flush_altivec_to_thread(current);
387}
388
389/*
390 * Copy a thread..
391 */
392int
393copy_thread(int nr, unsigned long clone_flags, unsigned long usp,
394 unsigned long unused, struct task_struct *p, struct pt_regs *regs)
395{
396 struct pt_regs *childregs, *kregs;
397 extern void ret_from_fork(void);
398 unsigned long sp = (unsigned long)p->thread_info + THREAD_SIZE;
399
400 /* Copy registers */
401 sp -= sizeof(struct pt_regs);
402 childregs = (struct pt_regs *) sp;
403 *childregs = *regs;
404 if ((childregs->msr & MSR_PR) == 0) {
405 /* for kernel thread, set stackptr in new task */
406 childregs->gpr[1] = sp + sizeof(struct pt_regs);
407 p->thread.regs = NULL; /* no user register state */
408 clear_ti_thread_flag(p->thread_info, TIF_32BIT);
409 } else {
410 childregs->gpr[1] = usp;
411 p->thread.regs = childregs;
412 if (clone_flags & CLONE_SETTLS) {
413 if (test_thread_flag(TIF_32BIT))
414 childregs->gpr[2] = childregs->gpr[6];
415 else
416 childregs->gpr[13] = childregs->gpr[6];
417 }
418 }
419 childregs->gpr[3] = 0; /* Result from fork() */
420 sp -= STACK_FRAME_OVERHEAD;
421
422 /*
423 * The way this works is that at some point in the future
424 * some task will call _switch to switch to the new task.
425 * That will pop off the stack frame created below and start
426 * the new task running at ret_from_fork. The new task will
427 * do some house keeping and then return from the fork or clone
428 * system call, using the stack frame created above.
429 */
430 sp -= sizeof(struct pt_regs);
431 kregs = (struct pt_regs *) sp;
432 sp -= STACK_FRAME_OVERHEAD;
433 p->thread.ksp = sp;
434 if (cpu_has_feature(CPU_FTR_SLB)) {
435 unsigned long sp_vsid = get_kernel_vsid(sp);
436
437 sp_vsid <<= SLB_VSID_SHIFT;
438 sp_vsid |= SLB_VSID_KERNEL;
439 if (cpu_has_feature(CPU_FTR_16M_PAGE))
440 sp_vsid |= SLB_VSID_L;
441
442 p->thread.ksp_vsid = sp_vsid;
443 }
444
445 /*
446 * The PPC64 ABI makes use of a TOC to contain function
447 * pointers. The function (ret_from_except) is actually a pointer
448 * to the TOC entry. The first entry is a pointer to the actual
449 * function.
450 */
451 kregs->nip = *((unsigned long *)ret_from_fork);
452
453 return 0;
454}
455
456/*
457 * Set up a thread for executing a new program
458 */
459void start_thread(struct pt_regs *regs, unsigned long fdptr, unsigned long sp)
460{
461 unsigned long entry, toc, load_addr = regs->gpr[2];
462
463 /* fdptr is a relocated pointer to the function descriptor for
464 * the elf _start routine. The first entry in the function
465 * descriptor is the entry address of _start and the second
466 * entry is the TOC value we need to use.
467 */
468 set_fs(USER_DS);
469 __get_user(entry, (unsigned long __user *)fdptr);
470 __get_user(toc, (unsigned long __user *)fdptr+1);
471
472 /* Check whether the e_entry function descriptor entries
473 * need to be relocated before we can use them.
474 */
475 if (load_addr != 0) {
476 entry += load_addr;
477 toc += load_addr;
478 }
479
480 /*
481 * If we exec out of a kernel thread then thread.regs will not be
482 * set. Do it now.
483 */
484 if (!current->thread.regs) {
485 unsigned long childregs = (unsigned long)current->thread_info +
486 THREAD_SIZE;
487 childregs -= sizeof(struct pt_regs);
488 current->thread.regs = (struct pt_regs *)childregs;
489 }
490
491 regs->nip = entry;
492 regs->gpr[1] = sp;
493 regs->gpr[2] = toc;
494 regs->msr = MSR_USER64;
495#ifndef CONFIG_SMP
496 if (last_task_used_math == current)
497 last_task_used_math = 0;
498#endif /* CONFIG_SMP */
499 memset(current->thread.fpr, 0, sizeof(current->thread.fpr));
500 current->thread.fpscr = 0;
501#ifdef CONFIG_ALTIVEC
502#ifndef CONFIG_SMP
503 if (last_task_used_altivec == current)
504 last_task_used_altivec = 0;
505#endif /* CONFIG_SMP */
506 memset(current->thread.vr, 0, sizeof(current->thread.vr));
507 current->thread.vscr.u[0] = 0;
508 current->thread.vscr.u[1] = 0;
509 current->thread.vscr.u[2] = 0;
510 current->thread.vscr.u[3] = 0x00010000; /* Java mode disabled */
511 current->thread.vrsave = 0;
512 current->thread.used_vr = 0;
513#endif /* CONFIG_ALTIVEC */
514}
515EXPORT_SYMBOL(start_thread);
516
517int set_fpexc_mode(struct task_struct *tsk, unsigned int val)
518{
519 struct pt_regs *regs = tsk->thread.regs;
520
521 if (val > PR_FP_EXC_PRECISE)
522 return -EINVAL;
523 tsk->thread.fpexc_mode = __pack_fe01(val);
524 if (regs != NULL && (regs->msr & MSR_FP) != 0)
525 regs->msr = (regs->msr & ~(MSR_FE0|MSR_FE1))
526 | tsk->thread.fpexc_mode;
527 return 0;
528}
529
530int get_fpexc_mode(struct task_struct *tsk, unsigned long adr)
531{
532 unsigned int val;
533
534 val = __unpack_fe01(tsk->thread.fpexc_mode);
535 return put_user(val, (unsigned int __user *) adr);
536}
537
538int sys_clone(unsigned long clone_flags, unsigned long p2, unsigned long p3,
539 unsigned long p4, unsigned long p5, unsigned long p6,
540 struct pt_regs *regs)
541{
542 unsigned long parent_tidptr = 0;
543 unsigned long child_tidptr = 0;
544
545 if (p2 == 0)
546 p2 = regs->gpr[1]; /* stack pointer for child */
547
548 if (clone_flags & (CLONE_PARENT_SETTID | CLONE_CHILD_SETTID |
549 CLONE_CHILD_CLEARTID)) {
550 parent_tidptr = p3;
551 child_tidptr = p5;
552 if (test_thread_flag(TIF_32BIT)) {
553 parent_tidptr &= 0xffffffff;
554 child_tidptr &= 0xffffffff;
555 }
556 }
557
558 return do_fork(clone_flags, p2, regs, 0,
559 (int __user *)parent_tidptr, (int __user *)child_tidptr);
560}
561
562int sys_fork(unsigned long p1, unsigned long p2, unsigned long p3,
563 unsigned long p4, unsigned long p5, unsigned long p6,
564 struct pt_regs *regs)
565{
566 return do_fork(SIGCHLD, regs->gpr[1], regs, 0, NULL, NULL);
567}
568
569int sys_vfork(unsigned long p1, unsigned long p2, unsigned long p3,
570 unsigned long p4, unsigned long p5, unsigned long p6,
571 struct pt_regs *regs)
572{
573 return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs->gpr[1], regs, 0,
574 NULL, NULL);
575}
576
577int sys_execve(unsigned long a0, unsigned long a1, unsigned long a2,
578 unsigned long a3, unsigned long a4, unsigned long a5,
579 struct pt_regs *regs)
580{
581 int error;
582 char * filename;
583
584 filename = getname((char __user *) a0);
585 error = PTR_ERR(filename);
586 if (IS_ERR(filename))
587 goto out;
588 flush_fp_to_thread(current);
589 flush_altivec_to_thread(current);
590 error = do_execve(filename, (char __user * __user *) a1,
591 (char __user * __user *) a2, regs);
592
593 if (error == 0) {
594 task_lock(current);
595 current->ptrace &= ~PT_DTRACE;
596 task_unlock(current);
597 }
598 putname(filename);
599
600out:
601 return error;
602}
603
604static int kstack_depth_to_print = 64;
605
606static int validate_sp(unsigned long sp, struct task_struct *p,
607 unsigned long nbytes)
608{
609 unsigned long stack_page = (unsigned long)p->thread_info;
610
611 if (sp >= stack_page + sizeof(struct thread_struct)
612 && sp <= stack_page + THREAD_SIZE - nbytes)
613 return 1;
614
615#ifdef CONFIG_IRQSTACKS
616 stack_page = (unsigned long) hardirq_ctx[task_cpu(p)];
617 if (sp >= stack_page + sizeof(struct thread_struct)
618 && sp <= stack_page + THREAD_SIZE - nbytes)
619 return 1;
620
621 stack_page = (unsigned long) softirq_ctx[task_cpu(p)];
622 if (sp >= stack_page + sizeof(struct thread_struct)
623 && sp <= stack_page + THREAD_SIZE - nbytes)
624 return 1;
625#endif
626
627 return 0;
628}
629
630unsigned long get_wchan(struct task_struct *p)
631{
632 unsigned long ip, sp;
633 int count = 0;
634
635 if (!p || p == current || p->state == TASK_RUNNING)
636 return 0;
637
638 sp = p->thread.ksp;
639 if (!validate_sp(sp, p, 112))
640 return 0;
641
642 do {
643 sp = *(unsigned long *)sp;
644 if (!validate_sp(sp, p, 112))
645 return 0;
646 if (count > 0) {
647 ip = *(unsigned long *)(sp + 16);
648 if (!in_sched_functions(ip))
649 return ip;
650 }
651 } while (count++ < 16);
652 return 0;
653}
654EXPORT_SYMBOL(get_wchan);
655
656void show_stack(struct task_struct *p, unsigned long *_sp)
657{
658 unsigned long ip, newsp, lr;
659 int count = 0;
660 unsigned long sp = (unsigned long)_sp;
661 int firstframe = 1;
662
663 if (sp == 0) {
664 if (p) {
665 sp = p->thread.ksp;
666 } else {
667 sp = __get_SP();
668 p = current;
669 }
670 }
671
672 lr = 0;
673 printk("Call Trace:\n");
674 do {
675 if (!validate_sp(sp, p, 112))
676 return;
677
678 _sp = (unsigned long *) sp;
679 newsp = _sp[0];
680 ip = _sp[2];
681 if (!firstframe || ip != lr) {
682 printk("[%016lx] [%016lx] ", sp, ip);
683 print_symbol("%s", ip);
684 if (firstframe)
685 printk(" (unreliable)");
686 printk("\n");
687 }
688 firstframe = 0;
689
690 /*
691 * See if this is an exception frame.
692 * We look for the "regshere" marker in the current frame.
693 */
694 if (validate_sp(sp, p, sizeof(struct pt_regs) + 400)
695 && _sp[12] == 0x7265677368657265ul) {
696 struct pt_regs *regs = (struct pt_regs *)
697 (sp + STACK_FRAME_OVERHEAD);
698 printk("--- Exception: %lx", regs->trap);
699 print_symbol(" at %s\n", regs->nip);
700 lr = regs->link;
701 print_symbol(" LR = %s\n", lr);
702 firstframe = 1;
703 }
704
705 sp = newsp;
706 } while (count++ < kstack_depth_to_print);
707}
708
709void dump_stack(void)
710{
711 show_stack(current, (unsigned long *)__get_SP());
712}
713EXPORT_SYMBOL(dump_stack);
diff --git a/arch/ppc64/kernel/prom.c b/arch/ppc64/kernel/prom.c
index 7035deb6de92..97bfceb5353b 100644
--- a/arch/ppc64/kernel/prom.c
+++ b/arch/ppc64/kernel/prom.c
@@ -46,7 +46,6 @@
46#include <asm/pgtable.h> 46#include <asm/pgtable.h>
47#include <asm/pci.h> 47#include <asm/pci.h>
48#include <asm/iommu.h> 48#include <asm/iommu.h>
49#include <asm/bootinfo.h>
50#include <asm/ppcdebug.h> 49#include <asm/ppcdebug.h>
51#include <asm/btext.h> 50#include <asm/btext.h>
52#include <asm/sections.h> 51#include <asm/sections.h>
@@ -78,11 +77,14 @@ typedef int interpret_func(struct device_node *, unsigned long *,
78extern struct rtas_t rtas; 77extern struct rtas_t rtas;
79extern struct lmb lmb; 78extern struct lmb lmb;
80extern unsigned long klimit; 79extern unsigned long klimit;
80extern unsigned long memory_limit;
81 81
82static int __initdata dt_root_addr_cells; 82static int __initdata dt_root_addr_cells;
83static int __initdata dt_root_size_cells; 83static int __initdata dt_root_size_cells;
84static int __initdata iommu_is_off; 84static int __initdata iommu_is_off;
85int __initdata iommu_force_on; 85int __initdata iommu_force_on;
86unsigned long tce_alloc_start, tce_alloc_end;
87
86typedef u32 cell_t; 88typedef u32 cell_t;
87 89
88#if 0 90#if 0
@@ -1063,7 +1065,6 @@ static int __init early_init_dt_scan_chosen(unsigned long node,
1063{ 1065{
1064 u32 *prop; 1066 u32 *prop;
1065 u64 *prop64; 1067 u64 *prop64;
1066 extern unsigned long memory_limit, tce_alloc_start, tce_alloc_end;
1067 1068
1068 DBG("search \"chosen\", depth: %d, uname: %s\n", depth, uname); 1069 DBG("search \"chosen\", depth: %d, uname: %s\n", depth, uname);
1069 1070
@@ -1237,7 +1238,7 @@ void __init early_init_devtree(void *params)
1237 lmb_init(); 1238 lmb_init();
1238 scan_flat_dt(early_init_dt_scan_root, NULL); 1239 scan_flat_dt(early_init_dt_scan_root, NULL);
1239 scan_flat_dt(early_init_dt_scan_memory, NULL); 1240 scan_flat_dt(early_init_dt_scan_memory, NULL);
1240 lmb_enforce_memory_limit(); 1241 lmb_enforce_memory_limit(memory_limit);
1241 lmb_analyze(); 1242 lmb_analyze();
1242 systemcfg->physicalMemorySize = lmb_phys_mem_size(); 1243 systemcfg->physicalMemorySize = lmb_phys_mem_size();
1243 lmb_reserve(0, __pa(klimit)); 1244 lmb_reserve(0, __pa(klimit));
diff --git a/arch/ppc64/kernel/prom_init.c b/arch/ppc64/kernel/prom_init.c
index f252670874a4..69924ba4d7d9 100644
--- a/arch/ppc64/kernel/prom_init.c
+++ b/arch/ppc64/kernel/prom_init.c
@@ -44,7 +44,6 @@
44#include <asm/pgtable.h> 44#include <asm/pgtable.h>
45#include <asm/pci.h> 45#include <asm/pci.h>
46#include <asm/iommu.h> 46#include <asm/iommu.h>
47#include <asm/bootinfo.h>
48#include <asm/ppcdebug.h> 47#include <asm/ppcdebug.h>
49#include <asm/btext.h> 48#include <asm/btext.h>
50#include <asm/sections.h> 49#include <asm/sections.h>
diff --git a/arch/ppc64/kernel/ptrace.c b/arch/ppc64/kernel/ptrace.c
deleted file mode 100644
index b1c044ca5756..000000000000
--- a/arch/ppc64/kernel/ptrace.c
+++ /dev/null
@@ -1,363 +0,0 @@
1/*
2 * linux/arch/ppc64/kernel/ptrace.c
3 *
4 * PowerPC version
5 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
6 *
7 * Derived from "arch/m68k/kernel/ptrace.c"
8 * Copyright (C) 1994 by Hamish Macdonald
9 * Taken from linux/kernel/ptrace.c and modified for M680x0.
10 * linux/kernel/ptrace.c is by Ross Biro 1/23/92, edited by Linus Torvalds
11 *
12 * Modified by Cort Dougan (cort@hq.fsmlabs.com)
13 * and Paul Mackerras (paulus@linuxcare.com.au).
14 *
15 * This file is subject to the terms and conditions of the GNU General
16 * Public License. See the file README.legal in the main directory of
17 * this archive for more details.
18 */
19
20#include <linux/config.h>
21#include <linux/kernel.h>
22#include <linux/sched.h>
23#include <linux/mm.h>
24#include <linux/smp.h>
25#include <linux/smp_lock.h>
26#include <linux/errno.h>
27#include <linux/ptrace.h>
28#include <linux/user.h>
29#include <linux/security.h>
30#include <linux/audit.h>
31#include <linux/seccomp.h>
32#include <linux/signal.h>
33
34#include <asm/uaccess.h>
35#include <asm/page.h>
36#include <asm/pgtable.h>
37#include <asm/system.h>
38#include <asm/ptrace-common.h>
39
40/*
41 * does not yet catch signals sent when the child dies.
42 * in exit.c or in signal.c.
43 */
44
45/*
46 * Called by kernel/ptrace.c when detaching..
47 *
48 * Make sure single step bits etc are not set.
49 */
50void ptrace_disable(struct task_struct *child)
51{
52 /* make sure the single step bit is not set. */
53 clear_single_step(child);
54}
55
56int sys_ptrace(long request, long pid, long addr, long data)
57{
58 struct task_struct *child;
59 int ret = -EPERM;
60
61 lock_kernel();
62 if (request == PTRACE_TRACEME) {
63 /* are we already being traced? */
64 if (current->ptrace & PT_PTRACED)
65 goto out;
66 ret = security_ptrace(current->parent, current);
67 if (ret)
68 goto out;
69 /* set the ptrace bit in the process flags. */
70 current->ptrace |= PT_PTRACED;
71 ret = 0;
72 goto out;
73 }
74 ret = -ESRCH;
75 read_lock(&tasklist_lock);
76 child = find_task_by_pid(pid);
77 if (child)
78 get_task_struct(child);
79 read_unlock(&tasklist_lock);
80 if (!child)
81 goto out;
82
83 ret = -EPERM;
84 if (pid == 1) /* you may not mess with init */
85 goto out_tsk;
86
87 if (request == PTRACE_ATTACH) {
88 ret = ptrace_attach(child);
89 goto out_tsk;
90 }
91
92 ret = ptrace_check_attach(child, request == PTRACE_KILL);
93 if (ret < 0)
94 goto out_tsk;
95
96 switch (request) {
97 /* when I and D space are separate, these will need to be fixed. */
98 case PTRACE_PEEKTEXT: /* read word at location addr. */
99 case PTRACE_PEEKDATA: {
100 unsigned long tmp;
101 int copied;
102
103 copied = access_process_vm(child, addr, &tmp, sizeof(tmp), 0);
104 ret = -EIO;
105 if (copied != sizeof(tmp))
106 break;
107 ret = put_user(tmp,(unsigned long __user *) data);
108 break;
109 }
110
111 /* read the word at location addr in the USER area. */
112 case PTRACE_PEEKUSR: {
113 unsigned long index;
114 unsigned long tmp;
115
116 ret = -EIO;
117 /* convert to index and check */
118 index = (unsigned long) addr >> 3;
119 if ((addr & 7) || (index > PT_FPSCR))
120 break;
121
122 if (index < PT_FPR0) {
123 tmp = get_reg(child, (int)index);
124 } else {
125 flush_fp_to_thread(child);
126 tmp = ((unsigned long *)child->thread.fpr)[index - PT_FPR0];
127 }
128 ret = put_user(tmp,(unsigned long __user *) data);
129 break;
130 }
131
132 /* If I and D space are separate, this will have to be fixed. */
133 case PTRACE_POKETEXT: /* write the word at location addr. */
134 case PTRACE_POKEDATA:
135 ret = 0;
136 if (access_process_vm(child, addr, &data, sizeof(data), 1)
137 == sizeof(data))
138 break;
139 ret = -EIO;
140 break;
141
142 /* write the word at location addr in the USER area */
143 case PTRACE_POKEUSR: {
144 unsigned long index;
145
146 ret = -EIO;
147 /* convert to index and check */
148 index = (unsigned long) addr >> 3;
149 if ((addr & 7) || (index > PT_FPSCR))
150 break;
151
152 if (index == PT_ORIG_R3)
153 break;
154 if (index < PT_FPR0) {
155 ret = put_reg(child, index, data);
156 } else {
157 flush_fp_to_thread(child);
158 ((unsigned long *)child->thread.fpr)[index - PT_FPR0] = data;
159 ret = 0;
160 }
161 break;
162 }
163
164 case PTRACE_SYSCALL: /* continue and stop at next (return from) syscall */
165 case PTRACE_CONT: { /* restart after signal. */
166 ret = -EIO;
167 if (!valid_signal(data))
168 break;
169 if (request == PTRACE_SYSCALL)
170 set_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
171 else
172 clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
173 child->exit_code = data;
174 /* make sure the single step bit is not set. */
175 clear_single_step(child);
176 wake_up_process(child);
177 ret = 0;
178 break;
179 }
180
181 /*
182 * make the child exit. Best I can do is send it a sigkill.
183 * perhaps it should be put in the status that it wants to
184 * exit.
185 */
186 case PTRACE_KILL: {
187 ret = 0;
188 if (child->exit_state == EXIT_ZOMBIE) /* already dead */
189 break;
190 child->exit_code = SIGKILL;
191 /* make sure the single step bit is not set. */
192 clear_single_step(child);
193 wake_up_process(child);
194 break;
195 }
196
197 case PTRACE_SINGLESTEP: { /* set the trap flag. */
198 ret = -EIO;
199 if (!valid_signal(data))
200 break;
201 clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
202 set_single_step(child);
203 child->exit_code = data;
204 /* give it a chance to run. */
205 wake_up_process(child);
206 ret = 0;
207 break;
208 }
209
210 case PTRACE_GET_DEBUGREG: {
211 ret = -EINVAL;
212 /* We only support one DABR and no IABRS at the moment */
213 if (addr > 0)
214 break;
215 ret = put_user(child->thread.dabr,
216 (unsigned long __user *)data);
217 break;
218 }
219
220 case PTRACE_SET_DEBUGREG:
221 ret = ptrace_set_debugreg(child, addr, data);
222 break;
223
224 case PTRACE_DETACH:
225 ret = ptrace_detach(child, data);
226 break;
227
228 case PPC_PTRACE_GETREGS: { /* Get GPRs 0 - 31. */
229 int i;
230 unsigned long *reg = &((unsigned long *)child->thread.regs)[0];
231 unsigned long __user *tmp = (unsigned long __user *)addr;
232
233 for (i = 0; i < 32; i++) {
234 ret = put_user(*reg, tmp);
235 if (ret)
236 break;
237 reg++;
238 tmp++;
239 }
240 break;
241 }
242
243 case PPC_PTRACE_SETREGS: { /* Set GPRs 0 - 31. */
244 int i;
245 unsigned long *reg = &((unsigned long *)child->thread.regs)[0];
246 unsigned long __user *tmp = (unsigned long __user *)addr;
247
248 for (i = 0; i < 32; i++) {
249 ret = get_user(*reg, tmp);
250 if (ret)
251 break;
252 reg++;
253 tmp++;
254 }
255 break;
256 }
257
258 case PPC_PTRACE_GETFPREGS: { /* Get FPRs 0 - 31. */
259 int i;
260 unsigned long *reg = &((unsigned long *)child->thread.fpr)[0];
261 unsigned long __user *tmp = (unsigned long __user *)addr;
262
263 flush_fp_to_thread(child);
264
265 for (i = 0; i < 32; i++) {
266 ret = put_user(*reg, tmp);
267 if (ret)
268 break;
269 reg++;
270 tmp++;
271 }
272 break;
273 }
274
275 case PPC_PTRACE_SETFPREGS: { /* Get FPRs 0 - 31. */
276 int i;
277 unsigned long *reg = &((unsigned long *)child->thread.fpr)[0];
278 unsigned long __user *tmp = (unsigned long __user *)addr;
279
280 flush_fp_to_thread(child);
281
282 for (i = 0; i < 32; i++) {
283 ret = get_user(*reg, tmp);
284 if (ret)
285 break;
286 reg++;
287 tmp++;
288 }
289 break;
290 }
291
292#ifdef CONFIG_ALTIVEC
293 case PTRACE_GETVRREGS:
294 /* Get the child altivec register state. */
295 flush_altivec_to_thread(child);
296 ret = get_vrregs((unsigned long __user *)data, child);
297 break;
298
299 case PTRACE_SETVRREGS:
300 /* Set the child altivec register state. */
301 flush_altivec_to_thread(child);
302 ret = set_vrregs(child, (unsigned long __user *)data);
303 break;
304#endif
305
306 default:
307 ret = ptrace_request(child, request, addr, data);
308 break;
309 }
310out_tsk:
311 put_task_struct(child);
312out:
313 unlock_kernel();
314 return ret;
315}
316
317static void do_syscall_trace(void)
318{
319 /* the 0x80 provides a way for the tracing parent to distinguish
320 between a syscall stop and SIGTRAP delivery */
321 ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD)
322 ? 0x80 : 0));
323
324 /*
325 * this isn't the same as continuing with a signal, but it will do
326 * for normal use. strace only continues with a signal if the
327 * stopping signal is not SIGTRAP. -brl
328 */
329 if (current->exit_code) {
330 send_sig(current->exit_code, current, 1);
331 current->exit_code = 0;
332 }
333}
334
335void do_syscall_trace_enter(struct pt_regs *regs)
336{
337 secure_computing(regs->gpr[0]);
338
339 if (test_thread_flag(TIF_SYSCALL_TRACE)
340 && (current->ptrace & PT_PTRACED))
341 do_syscall_trace();
342
343 if (unlikely(current->audit_context))
344 audit_syscall_entry(current,
345 test_thread_flag(TIF_32BIT)?AUDIT_ARCH_PPC:AUDIT_ARCH_PPC64,
346 regs->gpr[0],
347 regs->gpr[3], regs->gpr[4],
348 regs->gpr[5], regs->gpr[6]);
349
350}
351
352void do_syscall_trace_leave(struct pt_regs *regs)
353{
354 if (unlikely(current->audit_context))
355 audit_syscall_exit(current,
356 (regs->ccr&0x1000)?AUDITSC_FAILURE:AUDITSC_SUCCESS,
357 regs->result);
358
359 if ((test_thread_flag(TIF_SYSCALL_TRACE)
360 || test_thread_flag(TIF_SINGLESTEP))
361 && (current->ptrace & PT_PTRACED))
362 do_syscall_trace();
363}
diff --git a/arch/ppc64/kernel/ptrace32.c b/arch/ppc64/kernel/ptrace32.c
deleted file mode 100644
index fb8c22d6084a..000000000000
--- a/arch/ppc64/kernel/ptrace32.c
+++ /dev/null
@@ -1,449 +0,0 @@
1/*
2 * linux/arch/ppc64/kernel/ptrace32.c
3 *
4 * PowerPC version
5 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
6 *
7 * Derived from "arch/m68k/kernel/ptrace.c"
8 * Copyright (C) 1994 by Hamish Macdonald
9 * Taken from linux/kernel/ptrace.c and modified for M680x0.
10 * linux/kernel/ptrace.c is by Ross Biro 1/23/92, edited by Linus Torvalds
11 *
12 * Modified by Cort Dougan (cort@hq.fsmlabs.com)
13 * and Paul Mackerras (paulus@linuxcare.com.au).
14 *
15 * This file is subject to the terms and conditions of the GNU General
16 * Public License. See the file README.legal in the main directory of
17 * this archive for more details.
18 */
19
20#include <linux/config.h>
21#include <linux/kernel.h>
22#include <linux/sched.h>
23#include <linux/mm.h>
24#include <linux/smp.h>
25#include <linux/smp_lock.h>
26#include <linux/errno.h>
27#include <linux/ptrace.h>
28#include <linux/user.h>
29#include <linux/security.h>
30#include <linux/signal.h>
31
32#include <asm/uaccess.h>
33#include <asm/page.h>
34#include <asm/pgtable.h>
35#include <asm/system.h>
36#include <asm/ptrace-common.h>
37
38/*
39 * does not yet catch signals sent when the child dies.
40 * in exit.c or in signal.c.
41 */
42
43int sys32_ptrace(long request, long pid, unsigned long addr, unsigned long data)
44{
45 struct task_struct *child;
46 int ret = -EPERM;
47
48 lock_kernel();
49 if (request == PTRACE_TRACEME) {
50 /* are we already being traced? */
51 if (current->ptrace & PT_PTRACED)
52 goto out;
53 ret = security_ptrace(current->parent, current);
54 if (ret)
55 goto out;
56 /* set the ptrace bit in the process flags. */
57 current->ptrace |= PT_PTRACED;
58 ret = 0;
59 goto out;
60 }
61 ret = -ESRCH;
62 read_lock(&tasklist_lock);
63 child = find_task_by_pid(pid);
64 if (child)
65 get_task_struct(child);
66 read_unlock(&tasklist_lock);
67 if (!child)
68 goto out;
69
70 ret = -EPERM;
71 if (pid == 1) /* you may not mess with init */
72 goto out_tsk;
73
74 if (request == PTRACE_ATTACH) {
75 ret = ptrace_attach(child);
76 goto out_tsk;
77 }
78
79 ret = ptrace_check_attach(child, request == PTRACE_KILL);
80 if (ret < 0)
81 goto out_tsk;
82
83 switch (request) {
84 /* when I and D space are separate, these will need to be fixed. */
85 case PTRACE_PEEKTEXT: /* read word at location addr. */
86 case PTRACE_PEEKDATA: {
87 unsigned int tmp;
88 int copied;
89
90 copied = access_process_vm(child, addr, &tmp, sizeof(tmp), 0);
91 ret = -EIO;
92 if (copied != sizeof(tmp))
93 break;
94 ret = put_user(tmp, (u32 __user *)data);
95 break;
96 }
97
98 /*
99 * Read 4 bytes of the other process' storage
100 * data is a pointer specifying where the user wants the
101 * 4 bytes copied into
102 * addr is a pointer in the user's storage that contains an 8 byte
103 * address in the other process of the 4 bytes that is to be read
104 * (this is run in a 32-bit process looking at a 64-bit process)
105 * when I and D space are separate, these will need to be fixed.
106 */
107 case PPC_PTRACE_PEEKTEXT_3264:
108 case PPC_PTRACE_PEEKDATA_3264: {
109 u32 tmp;
110 int copied;
111 u32 __user * addrOthers;
112
113 ret = -EIO;
114
115 /* Get the addr in the other process that we want to read */
116 if (get_user(addrOthers, (u32 __user * __user *)addr) != 0)
117 break;
118
119 copied = access_process_vm(child, (u64)addrOthers, &tmp,
120 sizeof(tmp), 0);
121 if (copied != sizeof(tmp))
122 break;
123 ret = put_user(tmp, (u32 __user *)data);
124 break;
125 }
126
127 /* Read a register (specified by ADDR) out of the "user area" */
128 case PTRACE_PEEKUSR: {
129 int index;
130 unsigned long tmp;
131
132 ret = -EIO;
133 /* convert to index and check */
134 index = (unsigned long) addr >> 2;
135 if ((addr & 3) || (index > PT_FPSCR32))
136 break;
137
138 if (index < PT_FPR0) {
139 tmp = get_reg(child, index);
140 } else {
141 flush_fp_to_thread(child);
142 /*
143 * the user space code considers the floating point
144 * to be an array of unsigned int (32 bits) - the
145 * index passed in is based on this assumption.
146 */
147 tmp = ((unsigned int *)child->thread.fpr)[index - PT_FPR0];
148 }
149 ret = put_user((unsigned int)tmp, (u32 __user *)data);
150 break;
151 }
152
153 /*
154 * Read 4 bytes out of the other process' pt_regs area
155 * data is a pointer specifying where the user wants the
156 * 4 bytes copied into
157 * addr is the offset into the other process' pt_regs structure
158 * that is to be read
159 * (this is run in a 32-bit process looking at a 64-bit process)
160 */
161 case PPC_PTRACE_PEEKUSR_3264: {
162 u32 index;
163 u32 reg32bits;
164 u64 tmp;
165 u32 numReg;
166 u32 part;
167
168 ret = -EIO;
169 /* Determine which register the user wants */
170 index = (u64)addr >> 2;
171 numReg = index / 2;
172 /* Determine which part of the register the user wants */
173 if (index % 2)
174 part = 1; /* want the 2nd half of the register (right-most). */
175 else
176 part = 0; /* want the 1st half of the register (left-most). */
177
178 /* Validate the input - check to see if address is on the wrong boundary or beyond the end of the user area */
179 if ((addr & 3) || numReg > PT_FPSCR)
180 break;
181
182 if (numReg >= PT_FPR0) {
183 flush_fp_to_thread(child);
184 tmp = ((unsigned long int *)child->thread.fpr)[numReg - PT_FPR0];
185 } else { /* register within PT_REGS struct */
186 tmp = get_reg(child, numReg);
187 }
188 reg32bits = ((u32*)&tmp)[part];
189 ret = put_user(reg32bits, (u32 __user *)data);
190 break;
191 }
192
193 /* If I and D space are separate, this will have to be fixed. */
194 case PTRACE_POKETEXT: /* write the word at location addr. */
195 case PTRACE_POKEDATA: {
196 unsigned int tmp;
197 tmp = data;
198 ret = 0;
199 if (access_process_vm(child, addr, &tmp, sizeof(tmp), 1)
200 == sizeof(tmp))
201 break;
202 ret = -EIO;
203 break;
204 }
205
206 /*
207 * Write 4 bytes into the other process' storage
208 * data is the 4 bytes that the user wants written
209 * addr is a pointer in the user's storage that contains an
210 * 8 byte address in the other process where the 4 bytes
211 * that is to be written
212 * (this is run in a 32-bit process looking at a 64-bit process)
213 * when I and D space are separate, these will need to be fixed.
214 */
215 case PPC_PTRACE_POKETEXT_3264:
216 case PPC_PTRACE_POKEDATA_3264: {
217 u32 tmp = data;
218 u32 __user * addrOthers;
219
220 /* Get the addr in the other process that we want to write into */
221 ret = -EIO;
222 if (get_user(addrOthers, (u32 __user * __user *)addr) != 0)
223 break;
224 ret = 0;
225 if (access_process_vm(child, (u64)addrOthers, &tmp,
226 sizeof(tmp), 1) == sizeof(tmp))
227 break;
228 ret = -EIO;
229 break;
230 }
231
232 /* write the word at location addr in the USER area */
233 case PTRACE_POKEUSR: {
234 unsigned long index;
235
236 ret = -EIO;
237 /* convert to index and check */
238 index = (unsigned long) addr >> 2;
239 if ((addr & 3) || (index > PT_FPSCR32))
240 break;
241
242 if (index == PT_ORIG_R3)
243 break;
244 if (index < PT_FPR0) {
245 ret = put_reg(child, index, data);
246 } else {
247 flush_fp_to_thread(child);
248 /*
249 * the user space code considers the floating point
250 * to be an array of unsigned int (32 bits) - the
251 * index passed in is based on this assumption.
252 */
253 ((unsigned int *)child->thread.fpr)[index - PT_FPR0] = data;
254 ret = 0;
255 }
256 break;
257 }
258
259 /*
260 * Write 4 bytes into the other process' pt_regs area
261 * data is the 4 bytes that the user wants written
262 * addr is the offset into the other process' pt_regs structure
263 * that is to be written into
264 * (this is run in a 32-bit process looking at a 64-bit process)
265 */
266 case PPC_PTRACE_POKEUSR_3264: {
267 u32 index;
268 u32 numReg;
269
270 ret = -EIO;
271 /* Determine which register the user wants */
272 index = (u64)addr >> 2;
273 numReg = index / 2;
274 /*
275 * Validate the input - check to see if address is on the
276 * wrong boundary or beyond the end of the user area
277 */
278 if ((addr & 3) || (numReg > PT_FPSCR))
279 break;
280 /* Insure it is a register we let them change */
281 if ((numReg == PT_ORIG_R3)
282 || ((numReg > PT_CCR) && (numReg < PT_FPR0)))
283 break;
284 if (numReg >= PT_FPR0) {
285 flush_fp_to_thread(child);
286 }
287 if (numReg == PT_MSR)
288 data = (data & MSR_DEBUGCHANGE)
289 | (child->thread.regs->msr & ~MSR_DEBUGCHANGE);
290 ((u32*)child->thread.regs)[index] = data;
291 ret = 0;
292 break;
293 }
294
295 case PTRACE_SYSCALL: /* continue and stop at next (return from) syscall */
296 case PTRACE_CONT: { /* restart after signal. */
297 ret = -EIO;
298 if (!valid_signal(data))
299 break;
300 if (request == PTRACE_SYSCALL)
301 set_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
302 else
303 clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
304 child->exit_code = data;
305 /* make sure the single step bit is not set. */
306 clear_single_step(child);
307 wake_up_process(child);
308 ret = 0;
309 break;
310 }
311
312 /*
313 * make the child exit. Best I can do is send it a sigkill.
314 * perhaps it should be put in the status that it wants to
315 * exit.
316 */
317 case PTRACE_KILL: {
318 ret = 0;
319 if (child->exit_state == EXIT_ZOMBIE) /* already dead */
320 break;
321 child->exit_code = SIGKILL;
322 /* make sure the single step bit is not set. */
323 clear_single_step(child);
324 wake_up_process(child);
325 break;
326 }
327
328 case PTRACE_SINGLESTEP: { /* set the trap flag. */
329 ret = -EIO;
330 if (!valid_signal(data))
331 break;
332 clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
333 set_single_step(child);
334 child->exit_code = data;
335 /* give it a chance to run. */
336 wake_up_process(child);
337 ret = 0;
338 break;
339 }
340
341 case PTRACE_GET_DEBUGREG: {
342 ret = -EINVAL;
343 /* We only support one DABR and no IABRS at the moment */
344 if (addr > 0)
345 break;
346 ret = put_user(child->thread.dabr, (u32 __user *)data);
347 break;
348 }
349
350 case PTRACE_SET_DEBUGREG:
351 ret = ptrace_set_debugreg(child, addr, data);
352 break;
353
354 case PTRACE_DETACH:
355 ret = ptrace_detach(child, data);
356 break;
357
358 case PPC_PTRACE_GETREGS: { /* Get GPRs 0 - 31. */
359 int i;
360 unsigned long *reg = &((unsigned long *)child->thread.regs)[0];
361 unsigned int __user *tmp = (unsigned int __user *)addr;
362
363 for (i = 0; i < 32; i++) {
364 ret = put_user(*reg, tmp);
365 if (ret)
366 break;
367 reg++;
368 tmp++;
369 }
370 break;
371 }
372
373 case PPC_PTRACE_SETREGS: { /* Set GPRs 0 - 31. */
374 int i;
375 unsigned long *reg = &((unsigned long *)child->thread.regs)[0];
376 unsigned int __user *tmp = (unsigned int __user *)addr;
377
378 for (i = 0; i < 32; i++) {
379 ret = get_user(*reg, tmp);
380 if (ret)
381 break;
382 reg++;
383 tmp++;
384 }
385 break;
386 }
387
388 case PPC_PTRACE_GETFPREGS: { /* Get FPRs 0 - 31. */
389 int i;
390 unsigned long *reg = &((unsigned long *)child->thread.fpr)[0];
391 unsigned int __user *tmp = (unsigned int __user *)addr;
392
393 flush_fp_to_thread(child);
394
395 for (i = 0; i < 32; i++) {
396 ret = put_user(*reg, tmp);
397 if (ret)
398 break;
399 reg++;
400 tmp++;
401 }
402 break;
403 }
404
405 case PPC_PTRACE_SETFPREGS: { /* Get FPRs 0 - 31. */
406 int i;
407 unsigned long *reg = &((unsigned long *)child->thread.fpr)[0];
408 unsigned int __user *tmp = (unsigned int __user *)addr;
409
410 flush_fp_to_thread(child);
411
412 for (i = 0; i < 32; i++) {
413 ret = get_user(*reg, tmp);
414 if (ret)
415 break;
416 reg++;
417 tmp++;
418 }
419 break;
420 }
421
422 case PTRACE_GETEVENTMSG:
423 ret = put_user(child->ptrace_message, (unsigned int __user *) data);
424 break;
425
426#ifdef CONFIG_ALTIVEC
427 case PTRACE_GETVRREGS:
428 /* Get the child altivec register state. */
429 flush_altivec_to_thread(child);
430 ret = get_vrregs((unsigned long __user *)data, child);
431 break;
432
433 case PTRACE_SETVRREGS:
434 /* Set the child altivec register state. */
435 flush_altivec_to_thread(child);
436 ret = set_vrregs(child, (unsigned long __user *)data);
437 break;
438#endif
439
440 default:
441 ret = ptrace_request(child, request, addr, data);
442 break;
443 }
444out_tsk:
445 put_task_struct(child);
446out:
447 unlock_kernel();
448 return ret;
449}
diff --git a/arch/ppc64/kernel/ras.c b/arch/ppc64/kernel/ras.c
deleted file mode 100644
index 41b97dc9cc0a..000000000000
--- a/arch/ppc64/kernel/ras.c
+++ /dev/null
@@ -1,353 +0,0 @@
1/*
2 * ras.c
3 * Copyright (C) 2001 Dave Engebretsen IBM Corporation
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 */
19
20/* Change Activity:
21 * 2001/09/21 : engebret : Created with minimal EPOW and HW exception support.
22 * End Change Activity
23 */
24
25#include <linux/errno.h>
26#include <linux/threads.h>
27#include <linux/kernel_stat.h>
28#include <linux/signal.h>
29#include <linux/sched.h>
30#include <linux/ioport.h>
31#include <linux/interrupt.h>
32#include <linux/timex.h>
33#include <linux/init.h>
34#include <linux/slab.h>
35#include <linux/pci.h>
36#include <linux/delay.h>
37#include <linux/irq.h>
38#include <linux/random.h>
39#include <linux/sysrq.h>
40#include <linux/bitops.h>
41
42#include <asm/uaccess.h>
43#include <asm/system.h>
44#include <asm/io.h>
45#include <asm/pgtable.h>
46#include <asm/irq.h>
47#include <asm/cache.h>
48#include <asm/prom.h>
49#include <asm/ptrace.h>
50#include <asm/machdep.h>
51#include <asm/rtas.h>
52#include <asm/ppcdebug.h>
53
54static unsigned char ras_log_buf[RTAS_ERROR_LOG_MAX];
55static DEFINE_SPINLOCK(ras_log_buf_lock);
56
57char mce_data_buf[RTAS_ERROR_LOG_MAX]
58;
59/* This is true if we are using the firmware NMI handler (typically LPAR) */
60extern int fwnmi_active;
61
62static int ras_get_sensor_state_token;
63static int ras_check_exception_token;
64
65#define EPOW_SENSOR_TOKEN 9
66#define EPOW_SENSOR_INDEX 0
67#define RAS_VECTOR_OFFSET 0x500
68
69static irqreturn_t ras_epow_interrupt(int irq, void *dev_id,
70 struct pt_regs * regs);
71static irqreturn_t ras_error_interrupt(int irq, void *dev_id,
72 struct pt_regs * regs);
73
74/* #define DEBUG */
75
76static void request_ras_irqs(struct device_node *np, char *propname,
77 irqreturn_t (*handler)(int, void *, struct pt_regs *),
78 const char *name)
79{
80 unsigned int *ireg, len, i;
81 int virq, n_intr;
82
83 ireg = (unsigned int *)get_property(np, propname, &len);
84 if (ireg == NULL)
85 return;
86 n_intr = prom_n_intr_cells(np);
87 len /= n_intr * sizeof(*ireg);
88
89 for (i = 0; i < len; i++) {
90 virq = virt_irq_create_mapping(*ireg);
91 if (virq == NO_IRQ) {
92 printk(KERN_ERR "Unable to allocate interrupt "
93 "number for %s\n", np->full_name);
94 return;
95 }
96 if (request_irq(irq_offset_up(virq), handler, 0, name, NULL)) {
97 printk(KERN_ERR "Unable to request interrupt %d for "
98 "%s\n", irq_offset_up(virq), np->full_name);
99 return;
100 }
101 ireg += n_intr;
102 }
103}
104
105/*
106 * Initialize handlers for the set of interrupts caused by hardware errors
107 * and power system events.
108 */
109static int __init init_ras_IRQ(void)
110{
111 struct device_node *np;
112
113 ras_get_sensor_state_token = rtas_token("get-sensor-state");
114 ras_check_exception_token = rtas_token("check-exception");
115
116 /* Internal Errors */
117 np = of_find_node_by_path("/event-sources/internal-errors");
118 if (np != NULL) {
119 request_ras_irqs(np, "open-pic-interrupt", ras_error_interrupt,
120 "RAS_ERROR");
121 request_ras_irqs(np, "interrupts", ras_error_interrupt,
122 "RAS_ERROR");
123 of_node_put(np);
124 }
125
126 /* EPOW Events */
127 np = of_find_node_by_path("/event-sources/epow-events");
128 if (np != NULL) {
129 request_ras_irqs(np, "open-pic-interrupt", ras_epow_interrupt,
130 "RAS_EPOW");
131 request_ras_irqs(np, "interrupts", ras_epow_interrupt,
132 "RAS_EPOW");
133 of_node_put(np);
134 }
135
136 return 1;
137}
138__initcall(init_ras_IRQ);
139
140/*
141 * Handle power subsystem events (EPOW).
142 *
143 * Presently we just log the event has occurred. This should be fixed
144 * to examine the type of power failure and take appropriate action where
145 * the time horizon permits something useful to be done.
146 */
147static irqreturn_t
148ras_epow_interrupt(int irq, void *dev_id, struct pt_regs * regs)
149{
150 int status = 0xdeadbeef;
151 int state = 0;
152 int critical;
153
154 status = rtas_call(ras_get_sensor_state_token, 2, 2, &state,
155 EPOW_SENSOR_TOKEN, EPOW_SENSOR_INDEX);
156
157 if (state > 3)
158 critical = 1; /* Time Critical */
159 else
160 critical = 0;
161
162 spin_lock(&ras_log_buf_lock);
163
164 status = rtas_call(ras_check_exception_token, 6, 1, NULL,
165 RAS_VECTOR_OFFSET,
166 virt_irq_to_real(irq_offset_down(irq)),
167 RTAS_EPOW_WARNING | RTAS_POWERMGM_EVENTS,
168 critical, __pa(&ras_log_buf),
169 rtas_get_error_log_max());
170
171 udbg_printf("EPOW <0x%lx 0x%x 0x%x>\n",
172 *((unsigned long *)&ras_log_buf), status, state);
173 printk(KERN_WARNING "EPOW <0x%lx 0x%x 0x%x>\n",
174 *((unsigned long *)&ras_log_buf), status, state);
175
176 /* format and print the extended information */
177 log_error(ras_log_buf, ERR_TYPE_RTAS_LOG, 0);
178
179 spin_unlock(&ras_log_buf_lock);
180 return IRQ_HANDLED;
181}
182
183/*
184 * Handle hardware error interrupts.
185 *
186 * RTAS check-exception is called to collect data on the exception. If
187 * the error is deemed recoverable, we log a warning and return.
188 * For nonrecoverable errors, an error is logged and we stop all processing
189 * as quickly as possible in order to prevent propagation of the failure.
190 */
191static irqreturn_t
192ras_error_interrupt(int irq, void *dev_id, struct pt_regs * regs)
193{
194 struct rtas_error_log *rtas_elog;
195 int status = 0xdeadbeef;
196 int fatal;
197
198 spin_lock(&ras_log_buf_lock);
199
200 status = rtas_call(ras_check_exception_token, 6, 1, NULL,
201 RAS_VECTOR_OFFSET,
202 virt_irq_to_real(irq_offset_down(irq)),
203 RTAS_INTERNAL_ERROR, 1 /*Time Critical */,
204 __pa(&ras_log_buf),
205 rtas_get_error_log_max());
206
207 rtas_elog = (struct rtas_error_log *)ras_log_buf;
208
209 if ((status == 0) && (rtas_elog->severity >= RTAS_SEVERITY_ERROR_SYNC))
210 fatal = 1;
211 else
212 fatal = 0;
213
214 /* format and print the extended information */
215 log_error(ras_log_buf, ERR_TYPE_RTAS_LOG, fatal);
216
217 if (fatal) {
218 udbg_printf("Fatal HW Error <0x%lx 0x%x>\n",
219 *((unsigned long *)&ras_log_buf), status);
220 printk(KERN_EMERG "Error: Fatal hardware error <0x%lx 0x%x>\n",
221 *((unsigned long *)&ras_log_buf), status);
222
223#ifndef DEBUG
224 /* Don't actually power off when debugging so we can test
225 * without actually failing while injecting errors.
226 * Error data will not be logged to syslog.
227 */
228 ppc_md.power_off();
229#endif
230 } else {
231 udbg_printf("Recoverable HW Error <0x%lx 0x%x>\n",
232 *((unsigned long *)&ras_log_buf), status);
233 printk(KERN_WARNING
234 "Warning: Recoverable hardware error <0x%lx 0x%x>\n",
235 *((unsigned long *)&ras_log_buf), status);
236 }
237
238 spin_unlock(&ras_log_buf_lock);
239 return IRQ_HANDLED;
240}
241
242/* Get the error information for errors coming through the
243 * FWNMI vectors. The pt_regs' r3 will be updated to reflect
244 * the actual r3 if possible, and a ptr to the error log entry
245 * will be returned if found.
246 *
247 * The mce_data_buf does not have any locks or protection around it,
248 * if a second machine check comes in, or a system reset is done
249 * before we have logged the error, then we will get corruption in the
250 * error log. This is preferable over holding off on calling
251 * ibm,nmi-interlock which would result in us checkstopping if a
252 * second machine check did come in.
253 */
254static struct rtas_error_log *fwnmi_get_errinfo(struct pt_regs *regs)
255{
256 unsigned long errdata = regs->gpr[3];
257 struct rtas_error_log *errhdr = NULL;
258 unsigned long *savep;
259
260 if ((errdata >= 0x7000 && errdata < 0x7fff0) ||
261 (errdata >= rtas.base && errdata < rtas.base + rtas.size - 16)) {
262 savep = __va(errdata);
263 regs->gpr[3] = savep[0]; /* restore original r3 */
264 memset(mce_data_buf, 0, RTAS_ERROR_LOG_MAX);
265 memcpy(mce_data_buf, (char *)(savep + 1), RTAS_ERROR_LOG_MAX);
266 errhdr = (struct rtas_error_log *)mce_data_buf;
267 } else {
268 printk("FWNMI: corrupt r3\n");
269 }
270 return errhdr;
271}
272
273/* Call this when done with the data returned by FWNMI_get_errinfo.
274 * It will release the saved data area for other CPUs in the
275 * partition to receive FWNMI errors.
276 */
277static void fwnmi_release_errinfo(void)
278{
279 int ret = rtas_call(rtas_token("ibm,nmi-interlock"), 0, 1, NULL);
280 if (ret != 0)
281 printk("FWNMI: nmi-interlock failed: %d\n", ret);
282}
283
284void pSeries_system_reset_exception(struct pt_regs *regs)
285{
286 if (fwnmi_active) {
287 struct rtas_error_log *errhdr = fwnmi_get_errinfo(regs);
288 if (errhdr) {
289 /* XXX Should look at FWNMI information */
290 }
291 fwnmi_release_errinfo();
292 }
293}
294
295/*
296 * See if we can recover from a machine check exception.
297 * This is only called on power4 (or above) and only via
298 * the Firmware Non-Maskable Interrupts (fwnmi) handler
299 * which provides the error analysis for us.
300 *
301 * Return 1 if corrected (or delivered a signal).
302 * Return 0 if there is nothing we can do.
303 */
304static int recover_mce(struct pt_regs *regs, struct rtas_error_log * err)
305{
306 int nonfatal = 0;
307
308 if (err->disposition == RTAS_DISP_FULLY_RECOVERED) {
309 /* Platform corrected itself */
310 nonfatal = 1;
311 } else if ((regs->msr & MSR_RI) &&
312 user_mode(regs) &&
313 err->severity == RTAS_SEVERITY_ERROR_SYNC &&
314 err->disposition == RTAS_DISP_NOT_RECOVERED &&
315 err->target == RTAS_TARGET_MEMORY &&
316 err->type == RTAS_TYPE_ECC_UNCORR &&
317 !(current->pid == 0 || current->pid == 1)) {
318 /* Kill off a user process with an ECC error */
319 printk(KERN_ERR "MCE: uncorrectable ecc error for pid %d\n",
320 current->pid);
321 /* XXX something better for ECC error? */
322 _exception(SIGBUS, regs, BUS_ADRERR, regs->nip);
323 nonfatal = 1;
324 }
325
326 log_error((char *)err, ERR_TYPE_RTAS_LOG, !nonfatal);
327
328 return nonfatal;
329}
330
331/*
332 * Handle a machine check.
333 *
334 * Note that on Power 4 and beyond Firmware Non-Maskable Interrupts (fwnmi)
335 * should be present. If so the handler which called us tells us if the
336 * error was recovered (never true if RI=0).
337 *
338 * On hardware prior to Power 4 these exceptions were asynchronous which
339 * means we can't tell exactly where it occurred and so we can't recover.
340 */
341int pSeries_machine_check_exception(struct pt_regs *regs)
342{
343 struct rtas_error_log *errp;
344
345 if (fwnmi_active) {
346 errp = fwnmi_get_errinfo(regs);
347 fwnmi_release_errinfo();
348 if (errp && recover_mce(regs, errp))
349 return 1;
350 }
351
352 return 0;
353}
diff --git a/arch/ppc64/kernel/rtas-proc.c b/arch/ppc64/kernel/rtas-proc.c
index 1f3ff860fdf0..5bdd5b079d96 100644
--- a/arch/ppc64/kernel/rtas-proc.c
+++ b/arch/ppc64/kernel/rtas-proc.c
@@ -23,6 +23,7 @@
23#include <linux/init.h> 23#include <linux/init.h>
24#include <linux/seq_file.h> 24#include <linux/seq_file.h>
25#include <linux/bitops.h> 25#include <linux/bitops.h>
26#include <linux/rtc.h>
26 27
27#include <asm/uaccess.h> 28#include <asm/uaccess.h>
28#include <asm/processor.h> 29#include <asm/processor.h>
diff --git a/arch/ppc64/kernel/rtas.c b/arch/ppc64/kernel/rtas.c
deleted file mode 100644
index 5e8eb33b8e54..000000000000
--- a/arch/ppc64/kernel/rtas.c
+++ /dev/null
@@ -1,774 +0,0 @@
1/*
2 *
3 * Procedures for interfacing to the RTAS on CHRP machines.
4 *
5 * Peter Bergner, IBM March 2001.
6 * Copyright (C) 2001 IBM.
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
12 */
13
14#include <stdarg.h>
15#include <linux/kernel.h>
16#include <linux/types.h>
17#include <linux/spinlock.h>
18#include <linux/module.h>
19#include <linux/init.h>
20
21#include <asm/prom.h>
22#include <asm/rtas.h>
23#include <asm/semaphore.h>
24#include <asm/machdep.h>
25#include <asm/page.h>
26#include <asm/param.h>
27#include <asm/system.h>
28#include <asm/abs_addr.h>
29#include <asm/udbg.h>
30#include <asm/delay.h>
31#include <asm/uaccess.h>
32#include <asm/systemcfg.h>
33
34struct flash_block_list_header rtas_firmware_flash_list = {0, NULL};
35
36struct rtas_t rtas = {
37 .lock = SPIN_LOCK_UNLOCKED
38};
39
40EXPORT_SYMBOL(rtas);
41
42char rtas_err_buf[RTAS_ERROR_LOG_MAX];
43
44DEFINE_SPINLOCK(rtas_data_buf_lock);
45char rtas_data_buf[RTAS_DATA_BUF_SIZE]__page_aligned;
46unsigned long rtas_rmo_buf;
47
48void
49call_rtas_display_status(unsigned char c)
50{
51 struct rtas_args *args = &rtas.args;
52 unsigned long s;
53
54 if (!rtas.base)
55 return;
56 spin_lock_irqsave(&rtas.lock, s);
57
58 args->token = 10;
59 args->nargs = 1;
60 args->nret = 1;
61 args->rets = (rtas_arg_t *)&(args->args[1]);
62 args->args[0] = (int)c;
63
64 enter_rtas(__pa(args));
65
66 spin_unlock_irqrestore(&rtas.lock, s);
67}
68
69void
70call_rtas_display_status_delay(unsigned char c)
71{
72 static int pending_newline = 0; /* did last write end with unprinted newline? */
73 static int width = 16;
74
75 if (c == '\n') {
76 while (width-- > 0)
77 call_rtas_display_status(' ');
78 width = 16;
79 udelay(500000);
80 pending_newline = 1;
81 } else {
82 if (pending_newline) {
83 call_rtas_display_status('\r');
84 call_rtas_display_status('\n');
85 }
86 pending_newline = 0;
87 if (width--) {
88 call_rtas_display_status(c);
89 udelay(10000);
90 }
91 }
92}
93
94void
95rtas_progress(char *s, unsigned short hex)
96{
97 struct device_node *root;
98 int width, *p;
99 char *os;
100 static int display_character, set_indicator;
101 static int display_width, display_lines, *row_width, form_feed;
102 static DEFINE_SPINLOCK(progress_lock);
103 static int current_line;
104 static int pending_newline = 0; /* did last write end with unprinted newline? */
105
106 if (!rtas.base)
107 return;
108
109 if (display_width == 0) {
110 display_width = 0x10;
111 if ((root = find_path_device("/rtas"))) {
112 if ((p = (unsigned int *)get_property(root,
113 "ibm,display-line-length", NULL)))
114 display_width = *p;
115 if ((p = (unsigned int *)get_property(root,
116 "ibm,form-feed", NULL)))
117 form_feed = *p;
118 if ((p = (unsigned int *)get_property(root,
119 "ibm,display-number-of-lines", NULL)))
120 display_lines = *p;
121 row_width = (unsigned int *)get_property(root,
122 "ibm,display-truncation-length", NULL);
123 }
124 display_character = rtas_token("display-character");
125 set_indicator = rtas_token("set-indicator");
126 }
127
128 if (display_character == RTAS_UNKNOWN_SERVICE) {
129 /* use hex display if available */
130 if (set_indicator != RTAS_UNKNOWN_SERVICE)
131 rtas_call(set_indicator, 3, 1, NULL, 6, 0, hex);
132 return;
133 }
134
135 spin_lock(&progress_lock);
136
137 /*
138 * Last write ended with newline, but we didn't print it since
139 * it would just clear the bottom line of output. Print it now
140 * instead.
141 *
142 * If no newline is pending and form feed is supported, clear the
143 * display with a form feed; otherwise, print a CR to start output
144 * at the beginning of the line.
145 */
146 if (pending_newline) {
147 rtas_call(display_character, 1, 1, NULL, '\r');
148 rtas_call(display_character, 1, 1, NULL, '\n');
149 pending_newline = 0;
150 } else {
151 current_line = 0;
152 if (form_feed)
153 rtas_call(display_character, 1, 1, NULL,
154 (char)form_feed);
155 else
156 rtas_call(display_character, 1, 1, NULL, '\r');
157 }
158
159 if (row_width)
160 width = row_width[current_line];
161 else
162 width = display_width;
163 os = s;
164 while (*os) {
165 if (*os == '\n' || *os == '\r') {
166 /* If newline is the last character, save it
167 * until next call to avoid bumping up the
168 * display output.
169 */
170 if (*os == '\n' && !os[1]) {
171 pending_newline = 1;
172 current_line++;
173 if (current_line > display_lines-1)
174 current_line = display_lines-1;
175 spin_unlock(&progress_lock);
176 return;
177 }
178
179 /* RTAS wants CR-LF, not just LF */
180
181 if (*os == '\n') {
182 rtas_call(display_character, 1, 1, NULL, '\r');
183 rtas_call(display_character, 1, 1, NULL, '\n');
184 } else {
185 /* CR might be used to re-draw a line, so we'll
186 * leave it alone and not add LF.
187 */
188 rtas_call(display_character, 1, 1, NULL, *os);
189 }
190
191 if (row_width)
192 width = row_width[current_line];
193 else
194 width = display_width;
195 } else {
196 width--;
197 rtas_call(display_character, 1, 1, NULL, *os);
198 }
199
200 os++;
201
202 /* if we overwrite the screen length */
203 if (width <= 0)
204 while ((*os != 0) && (*os != '\n') && (*os != '\r'))
205 os++;
206 }
207
208 spin_unlock(&progress_lock);
209}
210
211int
212rtas_token(const char *service)
213{
214 int *tokp;
215 if (rtas.dev == NULL) {
216 PPCDBG(PPCDBG_RTAS,"\tNo rtas device in device-tree...\n");
217 return RTAS_UNKNOWN_SERVICE;
218 }
219 tokp = (int *) get_property(rtas.dev, service, NULL);
220 return tokp ? *tokp : RTAS_UNKNOWN_SERVICE;
221}
222
223/*
224 * Return the firmware-specified size of the error log buffer
225 * for all rtas calls that require an error buffer argument.
226 * This includes 'check-exception' and 'rtas-last-error'.
227 */
228int rtas_get_error_log_max(void)
229{
230 static int rtas_error_log_max;
231 if (rtas_error_log_max)
232 return rtas_error_log_max;
233
234 rtas_error_log_max = rtas_token ("rtas-error-log-max");
235 if ((rtas_error_log_max == RTAS_UNKNOWN_SERVICE) ||
236 (rtas_error_log_max > RTAS_ERROR_LOG_MAX)) {
237 printk (KERN_WARNING "RTAS: bad log buffer size %d\n", rtas_error_log_max);
238 rtas_error_log_max = RTAS_ERROR_LOG_MAX;
239 }
240 return rtas_error_log_max;
241}
242
243
244/** Return a copy of the detailed error text associated with the
245 * most recent failed call to rtas. Because the error text
246 * might go stale if there are any other intervening rtas calls,
247 * this routine must be called atomically with whatever produced
248 * the error (i.e. with rtas.lock still held from the previous call).
249 */
250static int
251__fetch_rtas_last_error(void)
252{
253 struct rtas_args err_args, save_args;
254 u32 bufsz;
255
256 bufsz = rtas_get_error_log_max();
257
258 err_args.token = rtas_token("rtas-last-error");
259 err_args.nargs = 2;
260 err_args.nret = 1;
261
262 err_args.args[0] = (rtas_arg_t)__pa(rtas_err_buf);
263 err_args.args[1] = bufsz;
264 err_args.args[2] = 0;
265
266 save_args = rtas.args;
267 rtas.args = err_args;
268
269 enter_rtas(__pa(&rtas.args));
270
271 err_args = rtas.args;
272 rtas.args = save_args;
273
274 return err_args.args[2];
275}
276
277int rtas_call(int token, int nargs, int nret, int *outputs, ...)
278{
279 va_list list;
280 int i, logit = 0;
281 unsigned long s;
282 struct rtas_args *rtas_args;
283 char * buff_copy = NULL;
284 int ret;
285
286 PPCDBG(PPCDBG_RTAS, "Entering rtas_call\n");
287 PPCDBG(PPCDBG_RTAS, "\ttoken = 0x%x\n", token);
288 PPCDBG(PPCDBG_RTAS, "\tnargs = %d\n", nargs);
289 PPCDBG(PPCDBG_RTAS, "\tnret = %d\n", nret);
290 PPCDBG(PPCDBG_RTAS, "\t&outputs = 0x%lx\n", outputs);
291 if (token == RTAS_UNKNOWN_SERVICE)
292 return -1;
293
294 /* Gotta do something different here, use global lock for now... */
295 spin_lock_irqsave(&rtas.lock, s);
296 rtas_args = &rtas.args;
297
298 rtas_args->token = token;
299 rtas_args->nargs = nargs;
300 rtas_args->nret = nret;
301 rtas_args->rets = (rtas_arg_t *)&(rtas_args->args[nargs]);
302 va_start(list, outputs);
303 for (i = 0; i < nargs; ++i) {
304 rtas_args->args[i] = va_arg(list, rtas_arg_t);
305 PPCDBG(PPCDBG_RTAS, "\tnarg[%d] = 0x%x\n", i, rtas_args->args[i]);
306 }
307 va_end(list);
308
309 for (i = 0; i < nret; ++i)
310 rtas_args->rets[i] = 0;
311
312 PPCDBG(PPCDBG_RTAS, "\tentering rtas with 0x%lx\n",
313 __pa(rtas_args));
314 enter_rtas(__pa(rtas_args));
315 PPCDBG(PPCDBG_RTAS, "\treturned from rtas ...\n");
316
317 /* A -1 return code indicates that the last command couldn't
318 be completed due to a hardware error. */
319 if (rtas_args->rets[0] == -1)
320 logit = (__fetch_rtas_last_error() == 0);
321
322 ifppcdebug(PPCDBG_RTAS) {
323 for(i=0; i < nret ;i++)
324 udbg_printf("\tnret[%d] = 0x%lx\n", i, (ulong)rtas_args->rets[i]);
325 }
326
327 if (nret > 1 && outputs != NULL)
328 for (i = 0; i < nret-1; ++i)
329 outputs[i] = rtas_args->rets[i+1];
330 ret = (nret > 0)? rtas_args->rets[0]: 0;
331
332 /* Log the error in the unlikely case that there was one. */
333 if (unlikely(logit)) {
334 buff_copy = rtas_err_buf;
335 if (mem_init_done) {
336 buff_copy = kmalloc(RTAS_ERROR_LOG_MAX, GFP_ATOMIC);
337 if (buff_copy)
338 memcpy(buff_copy, rtas_err_buf,
339 RTAS_ERROR_LOG_MAX);
340 }
341 }
342
343 /* Gotta do something different here, use global lock for now... */
344 spin_unlock_irqrestore(&rtas.lock, s);
345
346 if (buff_copy) {
347 log_error(buff_copy, ERR_TYPE_RTAS_LOG, 0);
348 if (mem_init_done)
349 kfree(buff_copy);
350 }
351 return ret;
352}
353
354/* Given an RTAS status code of 990n compute the hinted delay of 10^n
355 * (last digit) milliseconds. For now we bound at n=5 (100 sec).
356 */
357unsigned int
358rtas_extended_busy_delay_time(int status)
359{
360 int order = status - 9900;
361 unsigned long ms;
362
363 if (order < 0)
364 order = 0; /* RTC depends on this for -2 clock busy */
365 else if (order > 5)
366 order = 5; /* bound */
367
368 /* Use microseconds for reasonable accuracy */
369 for (ms=1; order > 0; order--)
370 ms *= 10;
371
372 return ms;
373}
374
375int rtas_error_rc(int rtas_rc)
376{
377 int rc;
378
379 switch (rtas_rc) {
380 case -1: /* Hardware Error */
381 rc = -EIO;
382 break;
383 case -3: /* Bad indicator/domain/etc */
384 rc = -EINVAL;
385 break;
386 case -9000: /* Isolation error */
387 rc = -EFAULT;
388 break;
389 case -9001: /* Outstanding TCE/PTE */
390 rc = -EEXIST;
391 break;
392 case -9002: /* No usable slot */
393 rc = -ENODEV;
394 break;
395 default:
396 printk(KERN_ERR "%s: unexpected RTAS error %d\n",
397 __FUNCTION__, rtas_rc);
398 rc = -ERANGE;
399 break;
400 }
401 return rc;
402}
403
404int rtas_get_power_level(int powerdomain, int *level)
405{
406 int token = rtas_token("get-power-level");
407 int rc;
408
409 if (token == RTAS_UNKNOWN_SERVICE)
410 return -ENOENT;
411
412 while ((rc = rtas_call(token, 1, 2, level, powerdomain)) == RTAS_BUSY)
413 udelay(1);
414
415 if (rc < 0)
416 return rtas_error_rc(rc);
417 return rc;
418}
419
420int rtas_set_power_level(int powerdomain, int level, int *setlevel)
421{
422 int token = rtas_token("set-power-level");
423 unsigned int wait_time;
424 int rc;
425
426 if (token == RTAS_UNKNOWN_SERVICE)
427 return -ENOENT;
428
429 while (1) {
430 rc = rtas_call(token, 2, 2, setlevel, powerdomain, level);
431 if (rc == RTAS_BUSY)
432 udelay(1);
433 else if (rtas_is_extended_busy(rc)) {
434 wait_time = rtas_extended_busy_delay_time(rc);
435 udelay(wait_time * 1000);
436 } else
437 break;
438 }
439
440 if (rc < 0)
441 return rtas_error_rc(rc);
442 return rc;
443}
444
445int rtas_get_sensor(int sensor, int index, int *state)
446{
447 int token = rtas_token("get-sensor-state");
448 unsigned int wait_time;
449 int rc;
450
451 if (token == RTAS_UNKNOWN_SERVICE)
452 return -ENOENT;
453
454 while (1) {
455 rc = rtas_call(token, 2, 2, state, sensor, index);
456 if (rc == RTAS_BUSY)
457 udelay(1);
458 else if (rtas_is_extended_busy(rc)) {
459 wait_time = rtas_extended_busy_delay_time(rc);
460 udelay(wait_time * 1000);
461 } else
462 break;
463 }
464
465 if (rc < 0)
466 return rtas_error_rc(rc);
467 return rc;
468}
469
470int rtas_set_indicator(int indicator, int index, int new_value)
471{
472 int token = rtas_token("set-indicator");
473 unsigned int wait_time;
474 int rc;
475
476 if (token == RTAS_UNKNOWN_SERVICE)
477 return -ENOENT;
478
479 while (1) {
480 rc = rtas_call(token, 3, 1, NULL, indicator, index, new_value);
481 if (rc == RTAS_BUSY)
482 udelay(1);
483 else if (rtas_is_extended_busy(rc)) {
484 wait_time = rtas_extended_busy_delay_time(rc);
485 udelay(wait_time * 1000);
486 }
487 else
488 break;
489 }
490
491 if (rc < 0)
492 return rtas_error_rc(rc);
493 return rc;
494}
495
496#define FLASH_BLOCK_LIST_VERSION (1UL)
497static void
498rtas_flash_firmware(void)
499{
500 unsigned long image_size;
501 struct flash_block_list *f, *next, *flist;
502 unsigned long rtas_block_list;
503 int i, status, update_token;
504
505 update_token = rtas_token("ibm,update-flash-64-and-reboot");
506 if (update_token == RTAS_UNKNOWN_SERVICE) {
507 printk(KERN_ALERT "FLASH: ibm,update-flash-64-and-reboot is not available -- not a service partition?\n");
508 printk(KERN_ALERT "FLASH: firmware will not be flashed\n");
509 return;
510 }
511
512 /* NOTE: the "first" block list is a global var with no data
513 * blocks in the kernel data segment. We do this because
514 * we want to ensure this block_list addr is under 4GB.
515 */
516 rtas_firmware_flash_list.num_blocks = 0;
517 flist = (struct flash_block_list *)&rtas_firmware_flash_list;
518 rtas_block_list = virt_to_abs(flist);
519 if (rtas_block_list >= 4UL*1024*1024*1024) {
520 printk(KERN_ALERT "FLASH: kernel bug...flash list header addr above 4GB\n");
521 return;
522 }
523
524 printk(KERN_ALERT "FLASH: preparing saved firmware image for flash\n");
525 /* Update the block_list in place. */
526 image_size = 0;
527 for (f = flist; f; f = next) {
528 /* Translate data addrs to absolute */
529 for (i = 0; i < f->num_blocks; i++) {
530 f->blocks[i].data = (char *)virt_to_abs(f->blocks[i].data);
531 image_size += f->blocks[i].length;
532 }
533 next = f->next;
534 /* Don't translate NULL pointer for last entry */
535 if (f->next)
536 f->next = (struct flash_block_list *)virt_to_abs(f->next);
537 else
538 f->next = NULL;
539 /* make num_blocks into the version/length field */
540 f->num_blocks = (FLASH_BLOCK_LIST_VERSION << 56) | ((f->num_blocks+1)*16);
541 }
542
543 printk(KERN_ALERT "FLASH: flash image is %ld bytes\n", image_size);
544 printk(KERN_ALERT "FLASH: performing flash and reboot\n");
545 rtas_progress("Flashing \n", 0x0);
546 rtas_progress("Please Wait... ", 0x0);
547 printk(KERN_ALERT "FLASH: this will take several minutes. Do not power off!\n");
548 status = rtas_call(update_token, 1, 1, NULL, rtas_block_list);
549 switch (status) { /* should only get "bad" status */
550 case 0:
551 printk(KERN_ALERT "FLASH: success\n");
552 break;
553 case -1:
554 printk(KERN_ALERT "FLASH: hardware error. Firmware may not be not flashed\n");
555 break;
556 case -3:
557 printk(KERN_ALERT "FLASH: image is corrupt or not correct for this platform. Firmware not flashed\n");
558 break;
559 case -4:
560 printk(KERN_ALERT "FLASH: flash failed when partially complete. System may not reboot\n");
561 break;
562 default:
563 printk(KERN_ALERT "FLASH: unknown flash return code %d\n", status);
564 break;
565 }
566}
567
568void rtas_flash_bypass_warning(void)
569{
570 printk(KERN_ALERT "FLASH: firmware flash requires a reboot\n");
571 printk(KERN_ALERT "FLASH: the firmware image will NOT be flashed\n");
572}
573
574
575void
576rtas_restart(char *cmd)
577{
578 if (rtas_firmware_flash_list.next)
579 rtas_flash_firmware();
580
581 printk("RTAS system-reboot returned %d\n",
582 rtas_call(rtas_token("system-reboot"), 0, 1, NULL));
583 for (;;);
584}
585
586void
587rtas_power_off(void)
588{
589 if (rtas_firmware_flash_list.next)
590 rtas_flash_bypass_warning();
591 /* allow power on only with power button press */
592 printk("RTAS power-off returned %d\n",
593 rtas_call(rtas_token("power-off"), 2, 1, NULL, -1, -1));
594 for (;;);
595}
596
597void
598rtas_halt(void)
599{
600 if (rtas_firmware_flash_list.next)
601 rtas_flash_bypass_warning();
602 rtas_power_off();
603}
604
605/* Must be in the RMO region, so we place it here */
606static char rtas_os_term_buf[2048];
607
608void rtas_os_term(char *str)
609{
610 int status;
611
612 if (RTAS_UNKNOWN_SERVICE == rtas_token("ibm,os-term"))
613 return;
614
615 snprintf(rtas_os_term_buf, 2048, "OS panic: %s", str);
616
617 do {
618 status = rtas_call(rtas_token("ibm,os-term"), 1, 1, NULL,
619 __pa(rtas_os_term_buf));
620
621 if (status == RTAS_BUSY)
622 udelay(1);
623 else if (status != 0)
624 printk(KERN_EMERG "ibm,os-term call failed %d\n",
625 status);
626 } while (status == RTAS_BUSY);
627}
628
629
630asmlinkage int ppc_rtas(struct rtas_args __user *uargs)
631{
632 struct rtas_args args;
633 unsigned long flags;
634 char * buff_copy;
635 int nargs;
636 int err_rc = 0;
637
638 if (!capable(CAP_SYS_ADMIN))
639 return -EPERM;
640
641 if (copy_from_user(&args, uargs, 3 * sizeof(u32)) != 0)
642 return -EFAULT;
643
644 nargs = args.nargs;
645 if (nargs > ARRAY_SIZE(args.args)
646 || args.nret > ARRAY_SIZE(args.args)
647 || nargs + args.nret > ARRAY_SIZE(args.args))
648 return -EINVAL;
649
650 /* Copy in args. */
651 if (copy_from_user(args.args, uargs->args,
652 nargs * sizeof(rtas_arg_t)) != 0)
653 return -EFAULT;
654
655 buff_copy = kmalloc(RTAS_ERROR_LOG_MAX, GFP_KERNEL);
656
657 spin_lock_irqsave(&rtas.lock, flags);
658
659 rtas.args = args;
660 enter_rtas(__pa(&rtas.args));
661 args = rtas.args;
662
663 args.rets = &args.args[nargs];
664
665 /* A -1 return code indicates that the last command couldn't
666 be completed due to a hardware error. */
667 if (args.rets[0] == -1) {
668 err_rc = __fetch_rtas_last_error();
669 if ((err_rc == 0) && buff_copy) {
670 memcpy(buff_copy, rtas_err_buf, RTAS_ERROR_LOG_MAX);
671 }
672 }
673
674 spin_unlock_irqrestore(&rtas.lock, flags);
675
676 if (buff_copy) {
677 if ((args.rets[0] == -1) && (err_rc == 0)) {
678 log_error(buff_copy, ERR_TYPE_RTAS_LOG, 0);
679 }
680 kfree(buff_copy);
681 }
682
683 /* Copy out args. */
684 if (copy_to_user(uargs->args + nargs,
685 args.args + nargs,
686 args.nret * sizeof(rtas_arg_t)) != 0)
687 return -EFAULT;
688
689 return 0;
690}
691
692/* This version can't take the spinlock, because it never returns */
693
694struct rtas_args rtas_stop_self_args = {
695 /* The token is initialized for real in setup_system() */
696 .token = RTAS_UNKNOWN_SERVICE,
697 .nargs = 0,
698 .nret = 1,
699 .rets = &rtas_stop_self_args.args[0],
700};
701
702void rtas_stop_self(void)
703{
704 struct rtas_args *rtas_args = &rtas_stop_self_args;
705
706 local_irq_disable();
707
708 BUG_ON(rtas_args->token == RTAS_UNKNOWN_SERVICE);
709
710 printk("cpu %u (hwid %u) Ready to die...\n",
711 smp_processor_id(), hard_smp_processor_id());
712 enter_rtas(__pa(rtas_args));
713
714 panic("Alas, I survived.\n");
715}
716
717/*
718 * Call early during boot, before mem init or bootmem, to retreive the RTAS
719 * informations from the device-tree and allocate the RMO buffer for userland
720 * accesses.
721 */
722void __init rtas_initialize(void)
723{
724 /* Get RTAS dev node and fill up our "rtas" structure with infos
725 * about it.
726 */
727 rtas.dev = of_find_node_by_name(NULL, "rtas");
728 if (rtas.dev) {
729 u32 *basep, *entryp;
730 u32 *sizep;
731
732 basep = (u32 *)get_property(rtas.dev, "linux,rtas-base", NULL);
733 sizep = (u32 *)get_property(rtas.dev, "rtas-size", NULL);
734 if (basep != NULL && sizep != NULL) {
735 rtas.base = *basep;
736 rtas.size = *sizep;
737 entryp = (u32 *)get_property(rtas.dev, "linux,rtas-entry", NULL);
738 if (entryp == NULL) /* Ugh */
739 rtas.entry = rtas.base;
740 else
741 rtas.entry = *entryp;
742 } else
743 rtas.dev = NULL;
744 }
745 /* If RTAS was found, allocate the RMO buffer for it and look for
746 * the stop-self token if any
747 */
748 if (rtas.dev) {
749 unsigned long rtas_region = RTAS_INSTANTIATE_MAX;
750 if (systemcfg->platform == PLATFORM_PSERIES_LPAR)
751 rtas_region = min(lmb.rmo_size, RTAS_INSTANTIATE_MAX);
752
753 rtas_rmo_buf = lmb_alloc_base(RTAS_RMOBUF_MAX, PAGE_SIZE,
754 rtas_region);
755
756#ifdef CONFIG_HOTPLUG_CPU
757 rtas_stop_self_args.token = rtas_token("stop-self");
758#endif /* CONFIG_HOTPLUG_CPU */
759 }
760
761}
762
763
764EXPORT_SYMBOL(rtas_firmware_flash_list);
765EXPORT_SYMBOL(rtas_token);
766EXPORT_SYMBOL(rtas_call);
767EXPORT_SYMBOL(rtas_data_buf);
768EXPORT_SYMBOL(rtas_data_buf_lock);
769EXPORT_SYMBOL(rtas_extended_busy_delay_time);
770EXPORT_SYMBOL(rtas_get_sensor);
771EXPORT_SYMBOL(rtas_get_power_level);
772EXPORT_SYMBOL(rtas_set_power_level);
773EXPORT_SYMBOL(rtas_set_indicator);
774EXPORT_SYMBOL(rtas_get_error_log_max);
diff --git a/arch/ppc64/kernel/rtas_pci.c b/arch/ppc64/kernel/rtas_pci.c
index 4a9719b48abe..3ad15c90fbbd 100644
--- a/arch/ppc64/kernel/rtas_pci.c
+++ b/arch/ppc64/kernel/rtas_pci.c
@@ -38,9 +38,8 @@
38#include <asm/pci-bridge.h> 38#include <asm/pci-bridge.h>
39#include <asm/iommu.h> 39#include <asm/iommu.h>
40#include <asm/rtas.h> 40#include <asm/rtas.h>
41 41#include <asm/mpic.h>
42#include "mpic.h" 42#include <asm/ppc-pci.h>
43#include "pci.h"
44 43
45/* RTAS tokens */ 44/* RTAS tokens */
46static int read_pci_config; 45static int read_pci_config;
@@ -401,7 +400,7 @@ unsigned long __init find_and_init_phbs(void)
401 if (!phb) 400 if (!phb)
402 continue; 401 continue;
403 402
404 pci_process_bridge_OF_ranges(phb, node); 403 pci_process_bridge_OF_ranges(phb, node, 0);
405 pci_setup_phb_io(phb, index == 0); 404 pci_setup_phb_io(phb, index == 0);
406#ifdef CONFIG_PPC_PSERIES 405#ifdef CONFIG_PPC_PSERIES
407 if (ppc64_interrupt_controller == IC_OPEN_PIC && pSeries_mpic) { 406 if (ppc64_interrupt_controller == IC_OPEN_PIC && pSeries_mpic) {
@@ -451,7 +450,7 @@ struct pci_controller * __devinit init_phb_dynamic(struct device_node *dn)
451 if (!phb) 450 if (!phb)
452 return NULL; 451 return NULL;
453 452
454 pci_process_bridge_OF_ranges(phb, dn); 453 pci_process_bridge_OF_ranges(phb, dn, primary);
455 454
456 pci_setup_phb_io_dynamic(phb, primary); 455 pci_setup_phb_io_dynamic(phb, primary);
457 of_node_put(root); 456 of_node_put(root);
diff --git a/arch/ppc64/kernel/rtc.c b/arch/ppc64/kernel/rtc.c
index 6ff52bc61325..79e7ed2858dd 100644
--- a/arch/ppc64/kernel/rtc.c
+++ b/arch/ppc64/kernel/rtc.c
@@ -43,11 +43,8 @@
43#include <asm/time.h> 43#include <asm/time.h>
44#include <asm/rtas.h> 44#include <asm/rtas.h>
45 45
46#include <asm/iSeries/mf.h>
47#include <asm/machdep.h> 46#include <asm/machdep.h>
48 47
49extern int piranha_simulator;
50
51/* 48/*
52 * We sponge a minor off of the misc major. No need slurping 49 * We sponge a minor off of the misc major. No need slurping
53 * up another valuable major dev number for this. If you add 50 * up another valuable major dev number for this. If you add
@@ -265,44 +262,10 @@ static int rtc_read_proc(char *page, char **start, off_t off,
265 return len; 262 return len;
266} 263}
267 264
268#ifdef CONFIG_PPC_ISERIES
269/*
270 * Get the RTC from the virtual service processor
271 * This requires flowing LpEvents to the primary partition
272 */
273void iSeries_get_rtc_time(struct rtc_time *rtc_tm)
274{
275 if (piranha_simulator)
276 return;
277
278 mf_get_rtc(rtc_tm);
279 rtc_tm->tm_mon--;
280}
281
282/*
283 * Set the RTC in the virtual service processor
284 * This requires flowing LpEvents to the primary partition
285 */
286int iSeries_set_rtc_time(struct rtc_time *tm)
287{
288 mf_set_rtc(tm);
289 return 0;
290}
291
292void iSeries_get_boot_time(struct rtc_time *tm)
293{
294 if ( piranha_simulator )
295 return;
296
297 mf_get_boot_rtc(tm);
298 tm->tm_mon -= 1;
299}
300#endif
301
302#ifdef CONFIG_PPC_RTAS 265#ifdef CONFIG_PPC_RTAS
303#define MAX_RTC_WAIT 5000 /* 5 sec */ 266#define MAX_RTC_WAIT 5000 /* 5 sec */
304#define RTAS_CLOCK_BUSY (-2) 267#define RTAS_CLOCK_BUSY (-2)
305void rtas_get_boot_time(struct rtc_time *rtc_tm) 268unsigned long rtas_get_boot_time(void)
306{ 269{
307 int ret[8]; 270 int ret[8];
308 int error, wait_time; 271 int error, wait_time;
@@ -322,15 +285,10 @@ void rtas_get_boot_time(struct rtc_time *rtc_tm)
322 if (error != 0 && printk_ratelimit()) { 285 if (error != 0 && printk_ratelimit()) {
323 printk(KERN_WARNING "error: reading the clock failed (%d)\n", 286 printk(KERN_WARNING "error: reading the clock failed (%d)\n",
324 error); 287 error);
325 return; 288 return 0;
326 } 289 }
327 290
328 rtc_tm->tm_sec = ret[5]; 291 return mktime(ret[0], ret[1], ret[2], ret[3], ret[4], ret[5]);
329 rtc_tm->tm_min = ret[4];
330 rtc_tm->tm_hour = ret[3];
331 rtc_tm->tm_mday = ret[2];
332 rtc_tm->tm_mon = ret[1] - 1;
333 rtc_tm->tm_year = ret[0] - 1900;
334} 292}
335 293
336/* NOTE: get_rtc_time will get an error if executed in interrupt context 294/* NOTE: get_rtc_time will get an error if executed in interrupt context
diff --git a/arch/ppc64/kernel/setup.c b/arch/ppc64/kernel/setup.c
deleted file mode 100644
index 5ac48bd64891..000000000000
--- a/arch/ppc64/kernel/setup.c
+++ /dev/null
@@ -1,1316 +0,0 @@
1/*
2 *
3 * Common boot and setup code.
4 *
5 * Copyright (C) 2001 PPC64 Team, IBM Corp
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
11 */
12
13#undef DEBUG
14
15#include <linux/config.h>
16#include <linux/module.h>
17#include <linux/string.h>
18#include <linux/sched.h>
19#include <linux/init.h>
20#include <linux/kernel.h>
21#include <linux/reboot.h>
22#include <linux/delay.h>
23#include <linux/initrd.h>
24#include <linux/ide.h>
25#include <linux/seq_file.h>
26#include <linux/ioport.h>
27#include <linux/console.h>
28#include <linux/utsname.h>
29#include <linux/tty.h>
30#include <linux/root_dev.h>
31#include <linux/notifier.h>
32#include <linux/cpu.h>
33#include <linux/unistd.h>
34#include <linux/serial.h>
35#include <linux/serial_8250.h>
36#include <asm/io.h>
37#include <asm/prom.h>
38#include <asm/processor.h>
39#include <asm/pgtable.h>
40#include <asm/bootinfo.h>
41#include <asm/smp.h>
42#include <asm/elf.h>
43#include <asm/machdep.h>
44#include <asm/paca.h>
45#include <asm/ppcdebug.h>
46#include <asm/time.h>
47#include <asm/cputable.h>
48#include <asm/sections.h>
49#include <asm/btext.h>
50#include <asm/nvram.h>
51#include <asm/setup.h>
52#include <asm/system.h>
53#include <asm/rtas.h>
54#include <asm/iommu.h>
55#include <asm/serial.h>
56#include <asm/cache.h>
57#include <asm/page.h>
58#include <asm/mmu.h>
59#include <asm/lmb.h>
60#include <asm/iSeries/ItLpNaca.h>
61
62#ifdef DEBUG
63#define DBG(fmt...) udbg_printf(fmt)
64#else
65#define DBG(fmt...)
66#endif
67
68/*
69 * Here are some early debugging facilities. You can enable one
70 * but your kernel will not boot on anything else if you do so
71 */
72
73/* This one is for use on LPAR machines that support an HVC console
74 * on vterm 0
75 */
76extern void udbg_init_debug_lpar(void);
77/* This one is for use on Apple G5 machines
78 */
79extern void udbg_init_pmac_realmode(void);
80/* That's RTAS panel debug */
81extern void call_rtas_display_status_delay(unsigned char c);
82/* Here's maple real mode debug */
83extern void udbg_init_maple_realmode(void);
84
85#define EARLY_DEBUG_INIT() do {} while(0)
86
87#if 0
88#define EARLY_DEBUG_INIT() udbg_init_debug_lpar()
89#define EARLY_DEBUG_INIT() udbg_init_maple_realmode()
90#define EARLY_DEBUG_INIT() udbg_init_pmac_realmode()
91#define EARLY_DEBUG_INIT() \
92 do { udbg_putc = call_rtas_display_status_delay; } while(0)
93#endif
94
95/* extern void *stab; */
96extern unsigned long klimit;
97
98extern void mm_init_ppc64(void);
99extern void stab_initialize(unsigned long stab);
100extern void htab_initialize(void);
101extern void early_init_devtree(void *flat_dt);
102extern void unflatten_device_tree(void);
103
104extern void smp_release_cpus(void);
105
106int have_of = 1;
107int boot_cpuid = 0;
108int boot_cpuid_phys = 0;
109dev_t boot_dev;
110u64 ppc64_pft_size;
111
112struct ppc64_caches ppc64_caches;
113EXPORT_SYMBOL_GPL(ppc64_caches);
114
115/*
116 * These are used in binfmt_elf.c to put aux entries on the stack
117 * for each elf executable being started.
118 */
119int dcache_bsize;
120int icache_bsize;
121int ucache_bsize;
122
123/* The main machine-dep calls structure
124 */
125struct machdep_calls ppc_md;
126EXPORT_SYMBOL(ppc_md);
127
128#ifdef CONFIG_MAGIC_SYSRQ
129unsigned long SYSRQ_KEY;
130#endif /* CONFIG_MAGIC_SYSRQ */
131
132
133static int ppc64_panic_event(struct notifier_block *, unsigned long, void *);
134static struct notifier_block ppc64_panic_block = {
135 .notifier_call = ppc64_panic_event,
136 .priority = INT_MIN /* may not return; must be done last */
137};
138
139/*
140 * Perhaps we can put the pmac screen_info[] here
141 * on pmac as well so we don't need the ifdef's.
142 * Until we get multiple-console support in here
143 * that is. -- Cort
144 * Maybe tie it to serial consoles, since this is really what
145 * these processors use on existing boards. -- Dan
146 */
147struct screen_info screen_info = {
148 .orig_x = 0,
149 .orig_y = 25,
150 .orig_video_cols = 80,
151 .orig_video_lines = 25,
152 .orig_video_isVGA = 1,
153 .orig_video_points = 16
154};
155
156#if defined(CONFIG_PPC_MULTIPLATFORM) && defined(CONFIG_SMP)
157
158static int smt_enabled_cmdline;
159
160/* Look for ibm,smt-enabled OF option */
161static void check_smt_enabled(void)
162{
163 struct device_node *dn;
164 char *smt_option;
165
166 /* Allow the command line to overrule the OF option */
167 if (smt_enabled_cmdline)
168 return;
169
170 dn = of_find_node_by_path("/options");
171
172 if (dn) {
173 smt_option = (char *)get_property(dn, "ibm,smt-enabled", NULL);
174
175 if (smt_option) {
176 if (!strcmp(smt_option, "on"))
177 smt_enabled_at_boot = 1;
178 else if (!strcmp(smt_option, "off"))
179 smt_enabled_at_boot = 0;
180 }
181 }
182}
183
184/* Look for smt-enabled= cmdline option */
185static int __init early_smt_enabled(char *p)
186{
187 smt_enabled_cmdline = 1;
188
189 if (!p)
190 return 0;
191
192 if (!strcmp(p, "on") || !strcmp(p, "1"))
193 smt_enabled_at_boot = 1;
194 else if (!strcmp(p, "off") || !strcmp(p, "0"))
195 smt_enabled_at_boot = 0;
196
197 return 0;
198}
199early_param("smt-enabled", early_smt_enabled);
200
201/**
202 * setup_cpu_maps - initialize the following cpu maps:
203 * cpu_possible_map
204 * cpu_present_map
205 * cpu_sibling_map
206 *
207 * Having the possible map set up early allows us to restrict allocations
208 * of things like irqstacks to num_possible_cpus() rather than NR_CPUS.
209 *
210 * We do not initialize the online map here; cpus set their own bits in
211 * cpu_online_map as they come up.
212 *
213 * This function is valid only for Open Firmware systems. finish_device_tree
214 * must be called before using this.
215 *
216 * While we're here, we may as well set the "physical" cpu ids in the paca.
217 */
218static void __init setup_cpu_maps(void)
219{
220 struct device_node *dn = NULL;
221 int cpu = 0;
222 int swap_cpuid = 0;
223
224 check_smt_enabled();
225
226 while ((dn = of_find_node_by_type(dn, "cpu")) && cpu < NR_CPUS) {
227 u32 *intserv;
228 int j, len = sizeof(u32), nthreads;
229
230 intserv = (u32 *)get_property(dn, "ibm,ppc-interrupt-server#s",
231 &len);
232 if (!intserv)
233 intserv = (u32 *)get_property(dn, "reg", NULL);
234
235 nthreads = len / sizeof(u32);
236
237 for (j = 0; j < nthreads && cpu < NR_CPUS; j++) {
238 cpu_set(cpu, cpu_present_map);
239 set_hard_smp_processor_id(cpu, intserv[j]);
240
241 if (intserv[j] == boot_cpuid_phys)
242 swap_cpuid = cpu;
243 cpu_set(cpu, cpu_possible_map);
244 cpu++;
245 }
246 }
247
248 /* Swap CPU id 0 with boot_cpuid_phys, so we can always assume that
249 * boot cpu is logical 0.
250 */
251 if (boot_cpuid_phys != get_hard_smp_processor_id(0)) {
252 u32 tmp;
253 tmp = get_hard_smp_processor_id(0);
254 set_hard_smp_processor_id(0, boot_cpuid_phys);
255 set_hard_smp_processor_id(swap_cpuid, tmp);
256 }
257
258 /*
259 * On pSeries LPAR, we need to know how many cpus
260 * could possibly be added to this partition.
261 */
262 if (systemcfg->platform == PLATFORM_PSERIES_LPAR &&
263 (dn = of_find_node_by_path("/rtas"))) {
264 int num_addr_cell, num_size_cell, maxcpus;
265 unsigned int *ireg;
266
267 num_addr_cell = prom_n_addr_cells(dn);
268 num_size_cell = prom_n_size_cells(dn);
269
270 ireg = (unsigned int *)
271 get_property(dn, "ibm,lrdr-capacity", NULL);
272
273 if (!ireg)
274 goto out;
275
276 maxcpus = ireg[num_addr_cell + num_size_cell];
277
278 /* Double maxcpus for processors which have SMT capability */
279 if (cpu_has_feature(CPU_FTR_SMT))
280 maxcpus *= 2;
281
282 if (maxcpus > NR_CPUS) {
283 printk(KERN_WARNING
284 "Partition configured for %d cpus, "
285 "operating system maximum is %d.\n",
286 maxcpus, NR_CPUS);
287 maxcpus = NR_CPUS;
288 } else
289 printk(KERN_INFO "Partition configured for %d cpus.\n",
290 maxcpus);
291
292 for (cpu = 0; cpu < maxcpus; cpu++)
293 cpu_set(cpu, cpu_possible_map);
294 out:
295 of_node_put(dn);
296 }
297
298 /*
299 * Do the sibling map; assume only two threads per processor.
300 */
301 for_each_cpu(cpu) {
302 cpu_set(cpu, cpu_sibling_map[cpu]);
303 if (cpu_has_feature(CPU_FTR_SMT))
304 cpu_set(cpu ^ 0x1, cpu_sibling_map[cpu]);
305 }
306
307 systemcfg->processorCount = num_present_cpus();
308}
309#endif /* defined(CONFIG_PPC_MULTIPLATFORM) && defined(CONFIG_SMP) */
310
311
312#ifdef CONFIG_PPC_MULTIPLATFORM
313
314extern struct machdep_calls pSeries_md;
315extern struct machdep_calls pmac_md;
316extern struct machdep_calls maple_md;
317extern struct machdep_calls bpa_md;
318
319/* Ultimately, stuff them in an elf section like initcalls... */
320static struct machdep_calls __initdata *machines[] = {
321#ifdef CONFIG_PPC_PSERIES
322 &pSeries_md,
323#endif /* CONFIG_PPC_PSERIES */
324#ifdef CONFIG_PPC_PMAC
325 &pmac_md,
326#endif /* CONFIG_PPC_PMAC */
327#ifdef CONFIG_PPC_MAPLE
328 &maple_md,
329#endif /* CONFIG_PPC_MAPLE */
330#ifdef CONFIG_PPC_BPA
331 &bpa_md,
332#endif
333 NULL
334};
335
336/*
337 * Early initialization entry point. This is called by head.S
338 * with MMU translation disabled. We rely on the "feature" of
339 * the CPU that ignores the top 2 bits of the address in real
340 * mode so we can access kernel globals normally provided we
341 * only toy with things in the RMO region. From here, we do
342 * some early parsing of the device-tree to setup out LMB
343 * data structures, and allocate & initialize the hash table
344 * and segment tables so we can start running with translation
345 * enabled.
346 *
347 * It is this function which will call the probe() callback of
348 * the various platform types and copy the matching one to the
349 * global ppc_md structure. Your platform can eventually do
350 * some very early initializations from the probe() routine, but
351 * this is not recommended, be very careful as, for example, the
352 * device-tree is not accessible via normal means at this point.
353 */
354
355void __init early_setup(unsigned long dt_ptr)
356{
357 struct paca_struct *lpaca = get_paca();
358 static struct machdep_calls **mach;
359
360 /*
361 * Enable early debugging if any specified (see top of
362 * this file)
363 */
364 EARLY_DEBUG_INIT();
365
366 DBG(" -> early_setup()\n");
367
368 /*
369 * Fill the default DBG level (do we want to keep
370 * that old mecanism around forever ?)
371 */
372 ppcdbg_initialize();
373
374 /*
375 * Do early initializations using the flattened device
376 * tree, like retreiving the physical memory map or
377 * calculating/retreiving the hash table size
378 */
379 early_init_devtree(__va(dt_ptr));
380
381 /*
382 * Iterate all ppc_md structures until we find the proper
383 * one for the current machine type
384 */
385 DBG("Probing machine type for platform %x...\n",
386 systemcfg->platform);
387
388 for (mach = machines; *mach; mach++) {
389 if ((*mach)->probe(systemcfg->platform))
390 break;
391 }
392 /* What can we do if we didn't find ? */
393 if (*mach == NULL) {
394 DBG("No suitable machine found !\n");
395 for (;;);
396 }
397 ppc_md = **mach;
398
399 DBG("Found, Initializing memory management...\n");
400
401 /*
402 * Initialize stab / SLB management
403 */
404 stab_initialize(lpaca->stab_real);
405
406 /*
407 * Initialize the MMU Hash table and create the linear mapping
408 * of memory
409 */
410 htab_initialize();
411
412 DBG(" <- early_setup()\n");
413}
414
415
416/*
417 * Initialize some remaining members of the ppc64_caches and systemcfg structures
418 * (at least until we get rid of them completely). This is mostly some
419 * cache informations about the CPU that will be used by cache flush
420 * routines and/or provided to userland
421 */
422static void __init initialize_cache_info(void)
423{
424 struct device_node *np;
425 unsigned long num_cpus = 0;
426
427 DBG(" -> initialize_cache_info()\n");
428
429 for (np = NULL; (np = of_find_node_by_type(np, "cpu"));) {
430 num_cpus += 1;
431
432 /* We're assuming *all* of the CPUs have the same
433 * d-cache and i-cache sizes... -Peter
434 */
435
436 if ( num_cpus == 1 ) {
437 u32 *sizep, *lsizep;
438 u32 size, lsize;
439 const char *dc, *ic;
440
441 /* Then read cache informations */
442 if (systemcfg->platform == PLATFORM_POWERMAC) {
443 dc = "d-cache-block-size";
444 ic = "i-cache-block-size";
445 } else {
446 dc = "d-cache-line-size";
447 ic = "i-cache-line-size";
448 }
449
450 size = 0;
451 lsize = cur_cpu_spec->dcache_bsize;
452 sizep = (u32 *)get_property(np, "d-cache-size", NULL);
453 if (sizep != NULL)
454 size = *sizep;
455 lsizep = (u32 *) get_property(np, dc, NULL);
456 if (lsizep != NULL)
457 lsize = *lsizep;
458 if (sizep == 0 || lsizep == 0)
459 DBG("Argh, can't find dcache properties ! "
460 "sizep: %p, lsizep: %p\n", sizep, lsizep);
461
462 systemcfg->dcache_size = ppc64_caches.dsize = size;
463 systemcfg->dcache_line_size =
464 ppc64_caches.dline_size = lsize;
465 ppc64_caches.log_dline_size = __ilog2(lsize);
466 ppc64_caches.dlines_per_page = PAGE_SIZE / lsize;
467
468 size = 0;
469 lsize = cur_cpu_spec->icache_bsize;
470 sizep = (u32 *)get_property(np, "i-cache-size", NULL);
471 if (sizep != NULL)
472 size = *sizep;
473 lsizep = (u32 *)get_property(np, ic, NULL);
474 if (lsizep != NULL)
475 lsize = *lsizep;
476 if (sizep == 0 || lsizep == 0)
477 DBG("Argh, can't find icache properties ! "
478 "sizep: %p, lsizep: %p\n", sizep, lsizep);
479
480 systemcfg->icache_size = ppc64_caches.isize = size;
481 systemcfg->icache_line_size =
482 ppc64_caches.iline_size = lsize;
483 ppc64_caches.log_iline_size = __ilog2(lsize);
484 ppc64_caches.ilines_per_page = PAGE_SIZE / lsize;
485 }
486 }
487
488 /* Add an eye catcher and the systemcfg layout version number */
489 strcpy(systemcfg->eye_catcher, "SYSTEMCFG:PPC64");
490 systemcfg->version.major = SYSTEMCFG_MAJOR;
491 systemcfg->version.minor = SYSTEMCFG_MINOR;
492 systemcfg->processor = mfspr(SPRN_PVR);
493
494 DBG(" <- initialize_cache_info()\n");
495}
496
497static void __init check_for_initrd(void)
498{
499#ifdef CONFIG_BLK_DEV_INITRD
500 u64 *prop;
501
502 DBG(" -> check_for_initrd()\n");
503
504 if (of_chosen) {
505 prop = (u64 *)get_property(of_chosen,
506 "linux,initrd-start", NULL);
507 if (prop != NULL) {
508 initrd_start = (unsigned long)__va(*prop);
509 prop = (u64 *)get_property(of_chosen,
510 "linux,initrd-end", NULL);
511 if (prop != NULL) {
512 initrd_end = (unsigned long)__va(*prop);
513 initrd_below_start_ok = 1;
514 } else
515 initrd_start = 0;
516 }
517 }
518
519 /* If we were passed an initrd, set the ROOT_DEV properly if the values
520 * look sensible. If not, clear initrd reference.
521 */
522 if (initrd_start >= KERNELBASE && initrd_end >= KERNELBASE &&
523 initrd_end > initrd_start)
524 ROOT_DEV = Root_RAM0;
525 else
526 initrd_start = initrd_end = 0;
527
528 if (initrd_start)
529 printk("Found initrd at 0x%lx:0x%lx\n", initrd_start, initrd_end);
530
531 DBG(" <- check_for_initrd()\n");
532#endif /* CONFIG_BLK_DEV_INITRD */
533}
534
535#endif /* CONFIG_PPC_MULTIPLATFORM */
536
537/*
538 * Do some initial setup of the system. The parameters are those which
539 * were passed in from the bootloader.
540 */
541void __init setup_system(void)
542{
543 DBG(" -> setup_system()\n");
544
545#ifdef CONFIG_PPC_ISERIES
546 /* pSeries systems are identified in prom.c via OF. */
547 if (itLpNaca.xLparInstalled == 1)
548 systemcfg->platform = PLATFORM_ISERIES_LPAR;
549
550 ppc_md.init_early();
551#else /* CONFIG_PPC_ISERIES */
552
553 /*
554 * Unflatten the device-tree passed by prom_init or kexec
555 */
556 unflatten_device_tree();
557
558 /*
559 * Fill the ppc64_caches & systemcfg structures with informations
560 * retreived from the device-tree. Need to be called before
561 * finish_device_tree() since the later requires some of the
562 * informations filled up here to properly parse the interrupt
563 * tree.
564 * It also sets up the cache line sizes which allows to call
565 * routines like flush_icache_range (used by the hash init
566 * later on).
567 */
568 initialize_cache_info();
569
570#ifdef CONFIG_PPC_RTAS
571 /*
572 * Initialize RTAS if available
573 */
574 rtas_initialize();
575#endif /* CONFIG_PPC_RTAS */
576
577 /*
578 * Check if we have an initrd provided via the device-tree
579 */
580 check_for_initrd();
581
582 /*
583 * Do some platform specific early initializations, that includes
584 * setting up the hash table pointers. It also sets up some interrupt-mapping
585 * related options that will be used by finish_device_tree()
586 */
587 ppc_md.init_early();
588
589 /*
590 * "Finish" the device-tree, that is do the actual parsing of
591 * some of the properties like the interrupt map
592 */
593 finish_device_tree();
594
595 /*
596 * Initialize xmon
597 */
598#ifdef CONFIG_XMON_DEFAULT
599 xmon_init(1);
600#endif
601 /*
602 * Register early console
603 */
604 register_early_udbg_console();
605
606 /* Save unparsed command line copy for /proc/cmdline */
607 strlcpy(saved_command_line, cmd_line, COMMAND_LINE_SIZE);
608
609 parse_early_param();
610#endif /* !CONFIG_PPC_ISERIES */
611
612#if defined(CONFIG_SMP) && !defined(CONFIG_PPC_ISERIES)
613 /*
614 * iSeries has already initialized the cpu maps at this point.
615 */
616 setup_cpu_maps();
617
618 /* Release secondary cpus out of their spinloops at 0x60 now that
619 * we can map physical -> logical CPU ids
620 */
621 smp_release_cpus();
622#endif /* defined(CONFIG_SMP) && !defined(CONFIG_PPC_ISERIES) */
623
624 printk("Starting Linux PPC64 %s\n", system_utsname.version);
625
626 printk("-----------------------------------------------------\n");
627 printk("ppc64_pft_size = 0x%lx\n", ppc64_pft_size);
628 printk("ppc64_debug_switch = 0x%lx\n", ppc64_debug_switch);
629 printk("ppc64_interrupt_controller = 0x%ld\n", ppc64_interrupt_controller);
630 printk("systemcfg = 0x%p\n", systemcfg);
631 printk("systemcfg->platform = 0x%x\n", systemcfg->platform);
632 printk("systemcfg->processorCount = 0x%lx\n", systemcfg->processorCount);
633 printk("systemcfg->physicalMemorySize = 0x%lx\n", systemcfg->physicalMemorySize);
634 printk("ppc64_caches.dcache_line_size = 0x%x\n",
635 ppc64_caches.dline_size);
636 printk("ppc64_caches.icache_line_size = 0x%x\n",
637 ppc64_caches.iline_size);
638 printk("htab_address = 0x%p\n", htab_address);
639 printk("htab_hash_mask = 0x%lx\n", htab_hash_mask);
640 printk("-----------------------------------------------------\n");
641
642 mm_init_ppc64();
643
644 DBG(" <- setup_system()\n");
645}
646
647/* also used by kexec */
648void machine_shutdown(void)
649{
650 if (ppc_md.nvram_sync)
651 ppc_md.nvram_sync();
652}
653
654void machine_restart(char *cmd)
655{
656 machine_shutdown();
657 ppc_md.restart(cmd);
658#ifdef CONFIG_SMP
659 smp_send_stop();
660#endif
661 printk(KERN_EMERG "System Halted, OK to turn off power\n");
662 local_irq_disable();
663 while (1) ;
664}
665
666void machine_power_off(void)
667{
668 machine_shutdown();
669 ppc_md.power_off();
670#ifdef CONFIG_SMP
671 smp_send_stop();
672#endif
673 printk(KERN_EMERG "System Halted, OK to turn off power\n");
674 local_irq_disable();
675 while (1) ;
676}
677/* Used by the G5 thermal driver */
678EXPORT_SYMBOL_GPL(machine_power_off);
679
680void machine_halt(void)
681{
682 machine_shutdown();
683 ppc_md.halt();
684#ifdef CONFIG_SMP
685 smp_send_stop();
686#endif
687 printk(KERN_EMERG "System Halted, OK to turn off power\n");
688 local_irq_disable();
689 while (1) ;
690}
691
692static int ppc64_panic_event(struct notifier_block *this,
693 unsigned long event, void *ptr)
694{
695 ppc_md.panic((char *)ptr); /* May not return */
696 return NOTIFY_DONE;
697}
698
699
700#ifdef CONFIG_SMP
701DEFINE_PER_CPU(unsigned int, pvr);
702#endif
703
704static int show_cpuinfo(struct seq_file *m, void *v)
705{
706 unsigned long cpu_id = (unsigned long)v - 1;
707 unsigned int pvr;
708 unsigned short maj;
709 unsigned short min;
710
711 if (cpu_id == NR_CPUS) {
712 seq_printf(m, "timebase\t: %lu\n", ppc_tb_freq);
713
714 if (ppc_md.get_cpuinfo != NULL)
715 ppc_md.get_cpuinfo(m);
716
717 return 0;
718 }
719
720 /* We only show online cpus: disable preempt (overzealous, I
721 * knew) to prevent cpu going down. */
722 preempt_disable();
723 if (!cpu_online(cpu_id)) {
724 preempt_enable();
725 return 0;
726 }
727
728#ifdef CONFIG_SMP
729 pvr = per_cpu(pvr, cpu_id);
730#else
731 pvr = mfspr(SPRN_PVR);
732#endif
733 maj = (pvr >> 8) & 0xFF;
734 min = pvr & 0xFF;
735
736 seq_printf(m, "processor\t: %lu\n", cpu_id);
737 seq_printf(m, "cpu\t\t: ");
738
739 if (cur_cpu_spec->pvr_mask)
740 seq_printf(m, "%s", cur_cpu_spec->cpu_name);
741 else
742 seq_printf(m, "unknown (%08x)", pvr);
743
744#ifdef CONFIG_ALTIVEC
745 if (cpu_has_feature(CPU_FTR_ALTIVEC))
746 seq_printf(m, ", altivec supported");
747#endif /* CONFIG_ALTIVEC */
748
749 seq_printf(m, "\n");
750
751 /*
752 * Assume here that all clock rates are the same in a
753 * smp system. -- Cort
754 */
755 seq_printf(m, "clock\t\t: %lu.%06luMHz\n", ppc_proc_freq / 1000000,
756 ppc_proc_freq % 1000000);
757
758 seq_printf(m, "revision\t: %hd.%hd\n\n", maj, min);
759
760 preempt_enable();
761 return 0;
762}
763
764static void *c_start(struct seq_file *m, loff_t *pos)
765{
766 return *pos <= NR_CPUS ? (void *)((*pos)+1) : NULL;
767}
768static void *c_next(struct seq_file *m, void *v, loff_t *pos)
769{
770 ++*pos;
771 return c_start(m, pos);
772}
773static void c_stop(struct seq_file *m, void *v)
774{
775}
776struct seq_operations cpuinfo_op = {
777 .start =c_start,
778 .next = c_next,
779 .stop = c_stop,
780 .show = show_cpuinfo,
781};
782
783/*
784 * These three variables are used to save values passed to us by prom_init()
785 * via the device tree. The TCE variables are needed because with a memory_limit
786 * in force we may need to explicitly map the TCE are at the top of RAM.
787 */
788unsigned long memory_limit;
789unsigned long tce_alloc_start;
790unsigned long tce_alloc_end;
791
792#ifdef CONFIG_PPC_ISERIES
793/*
794 * On iSeries we just parse the mem=X option from the command line.
795 * On pSeries it's a bit more complicated, see prom_init_mem()
796 */
797static int __init early_parsemem(char *p)
798{
799 if (!p)
800 return 0;
801
802 memory_limit = ALIGN(memparse(p, &p), PAGE_SIZE);
803
804 return 0;
805}
806early_param("mem", early_parsemem);
807#endif /* CONFIG_PPC_ISERIES */
808
809#ifdef CONFIG_PPC_MULTIPLATFORM
810static int __init set_preferred_console(void)
811{
812 struct device_node *prom_stdout = NULL;
813 char *name;
814 u32 *spd;
815 int offset = 0;
816
817 DBG(" -> set_preferred_console()\n");
818
819 /* The user has requested a console so this is already set up. */
820 if (strstr(saved_command_line, "console=")) {
821 DBG(" console was specified !\n");
822 return -EBUSY;
823 }
824
825 if (!of_chosen) {
826 DBG(" of_chosen is NULL !\n");
827 return -ENODEV;
828 }
829 /* We are getting a weird phandle from OF ... */
830 /* ... So use the full path instead */
831 name = (char *)get_property(of_chosen, "linux,stdout-path", NULL);
832 if (name == NULL) {
833 DBG(" no linux,stdout-path !\n");
834 return -ENODEV;
835 }
836 prom_stdout = of_find_node_by_path(name);
837 if (!prom_stdout) {
838 DBG(" can't find stdout package %s !\n", name);
839 return -ENODEV;
840 }
841 DBG("stdout is %s\n", prom_stdout->full_name);
842
843 name = (char *)get_property(prom_stdout, "name", NULL);
844 if (!name) {
845 DBG(" stdout package has no name !\n");
846 goto not_found;
847 }
848 spd = (u32 *)get_property(prom_stdout, "current-speed", NULL);
849
850 if (0)
851 ;
852#ifdef CONFIG_SERIAL_8250_CONSOLE
853 else if (strcmp(name, "serial") == 0) {
854 int i;
855 u32 *reg = (u32 *)get_property(prom_stdout, "reg", &i);
856 if (i > 8) {
857 switch (reg[1]) {
858 case 0x3f8:
859 offset = 0;
860 break;
861 case 0x2f8:
862 offset = 1;
863 break;
864 case 0x898:
865 offset = 2;
866 break;
867 case 0x890:
868 offset = 3;
869 break;
870 default:
871 /* We dont recognise the serial port */
872 goto not_found;
873 }
874 }
875 }
876#endif /* CONFIG_SERIAL_8250_CONSOLE */
877#ifdef CONFIG_PPC_PSERIES
878 else if (strcmp(name, "vty") == 0) {
879 u32 *reg = (u32 *)get_property(prom_stdout, "reg", NULL);
880 char *compat = (char *)get_property(prom_stdout, "compatible", NULL);
881
882 if (reg && compat && (strcmp(compat, "hvterm-protocol") == 0)) {
883 /* Host Virtual Serial Interface */
884 int offset;
885 switch (reg[0]) {
886 case 0x30000000:
887 offset = 0;
888 break;
889 case 0x30000001:
890 offset = 1;
891 break;
892 default:
893 goto not_found;
894 }
895 of_node_put(prom_stdout);
896 DBG("Found hvsi console at offset %d\n", offset);
897 return add_preferred_console("hvsi", offset, NULL);
898 } else {
899 /* pSeries LPAR virtual console */
900 of_node_put(prom_stdout);
901 DBG("Found hvc console\n");
902 return add_preferred_console("hvc", 0, NULL);
903 }
904 }
905#endif /* CONFIG_PPC_PSERIES */
906#ifdef CONFIG_SERIAL_PMACZILOG_CONSOLE
907 else if (strcmp(name, "ch-a") == 0)
908 offset = 0;
909 else if (strcmp(name, "ch-b") == 0)
910 offset = 1;
911#endif /* CONFIG_SERIAL_PMACZILOG_CONSOLE */
912 else
913 goto not_found;
914 of_node_put(prom_stdout);
915
916 DBG("Found serial console at ttyS%d\n", offset);
917
918 if (spd) {
919 static char __initdata opt[16];
920 sprintf(opt, "%d", *spd);
921 return add_preferred_console("ttyS", offset, opt);
922 } else
923 return add_preferred_console("ttyS", offset, NULL);
924
925 not_found:
926 DBG("No preferred console found !\n");
927 of_node_put(prom_stdout);
928 return -ENODEV;
929}
930console_initcall(set_preferred_console);
931#endif /* CONFIG_PPC_MULTIPLATFORM */
932
933#ifdef CONFIG_IRQSTACKS
934static void __init irqstack_early_init(void)
935{
936 unsigned int i;
937
938 /*
939 * interrupt stacks must be under 256MB, we cannot afford to take
940 * SLB misses on them.
941 */
942 for_each_cpu(i) {
943 softirq_ctx[i] = (struct thread_info *)__va(lmb_alloc_base(THREAD_SIZE,
944 THREAD_SIZE, 0x10000000));
945 hardirq_ctx[i] = (struct thread_info *)__va(lmb_alloc_base(THREAD_SIZE,
946 THREAD_SIZE, 0x10000000));
947 }
948}
949#else
950#define irqstack_early_init()
951#endif
952
953/*
954 * Stack space used when we detect a bad kernel stack pointer, and
955 * early in SMP boots before relocation is enabled.
956 */
957static void __init emergency_stack_init(void)
958{
959 unsigned long limit;
960 unsigned int i;
961
962 /*
963 * Emergency stacks must be under 256MB, we cannot afford to take
964 * SLB misses on them. The ABI also requires them to be 128-byte
965 * aligned.
966 *
967 * Since we use these as temporary stacks during secondary CPU
968 * bringup, we need to get at them in real mode. This means they
969 * must also be within the RMO region.
970 */
971 limit = min(0x10000000UL, lmb.rmo_size);
972
973 for_each_cpu(i)
974 paca[i].emergency_sp = __va(lmb_alloc_base(PAGE_SIZE, 128,
975 limit)) + PAGE_SIZE;
976}
977
978/*
979 * Called from setup_arch to initialize the bitmap of available
980 * syscalls in the systemcfg page
981 */
982void __init setup_syscall_map(void)
983{
984 unsigned int i, count64 = 0, count32 = 0;
985 extern unsigned long *sys_call_table;
986 extern unsigned long *sys_call_table32;
987 extern unsigned long sys_ni_syscall;
988
989
990 for (i = 0; i < __NR_syscalls; i++) {
991 if (sys_call_table[i] == sys_ni_syscall)
992 continue;
993 count64++;
994 systemcfg->syscall_map_64[i >> 5] |= 0x80000000UL >> (i & 0x1f);
995 }
996 for (i = 0; i < __NR_syscalls; i++) {
997 if (sys_call_table32[i] == sys_ni_syscall)
998 continue;
999 count32++;
1000 systemcfg->syscall_map_32[i >> 5] |= 0x80000000UL >> (i & 0x1f);
1001 }
1002 printk(KERN_INFO "Syscall map setup, %d 32 bits and %d 64 bits syscalls\n",
1003 count32, count64);
1004}
1005
1006/*
1007 * Called into from start_kernel, after lock_kernel has been called.
1008 * Initializes bootmem, which is unsed to manage page allocation until
1009 * mem_init is called.
1010 */
1011void __init setup_arch(char **cmdline_p)
1012{
1013 extern void do_init_bootmem(void);
1014
1015 ppc64_boot_msg(0x12, "Setup Arch");
1016
1017 *cmdline_p = cmd_line;
1018
1019 /*
1020 * Set cache line size based on type of cpu as a default.
1021 * Systems with OF can look in the properties on the cpu node(s)
1022 * for a possibly more accurate value.
1023 */
1024 dcache_bsize = ppc64_caches.dline_size;
1025 icache_bsize = ppc64_caches.iline_size;
1026
1027 /* reboot on panic */
1028 panic_timeout = 180;
1029
1030 if (ppc_md.panic)
1031 notifier_chain_register(&panic_notifier_list, &ppc64_panic_block);
1032
1033 init_mm.start_code = PAGE_OFFSET;
1034 init_mm.end_code = (unsigned long) _etext;
1035 init_mm.end_data = (unsigned long) _edata;
1036 init_mm.brk = klimit;
1037
1038 irqstack_early_init();
1039 emergency_stack_init();
1040
1041 stabs_alloc();
1042
1043 /* set up the bootmem stuff with available memory */
1044 do_init_bootmem();
1045 sparse_init();
1046
1047 /* initialize the syscall map in systemcfg */
1048 setup_syscall_map();
1049
1050 ppc_md.setup_arch();
1051
1052 /* Use the default idle loop if the platform hasn't provided one. */
1053 if (NULL == ppc_md.idle_loop) {
1054 ppc_md.idle_loop = default_idle;
1055 printk(KERN_INFO "Using default idle loop\n");
1056 }
1057
1058 paging_init();
1059 ppc64_boot_msg(0x15, "Setup Done");
1060}
1061
1062
1063/* ToDo: do something useful if ppc_md is not yet setup. */
1064#define PPC64_LINUX_FUNCTION 0x0f000000
1065#define PPC64_IPL_MESSAGE 0xc0000000
1066#define PPC64_TERM_MESSAGE 0xb0000000
1067
1068static void ppc64_do_msg(unsigned int src, const char *msg)
1069{
1070 if (ppc_md.progress) {
1071 char buf[128];
1072
1073 sprintf(buf, "%08X\n", src);
1074 ppc_md.progress(buf, 0);
1075 snprintf(buf, 128, "%s", msg);
1076 ppc_md.progress(buf, 0);
1077 }
1078}
1079
1080/* Print a boot progress message. */
1081void ppc64_boot_msg(unsigned int src, const char *msg)
1082{
1083 ppc64_do_msg(PPC64_LINUX_FUNCTION|PPC64_IPL_MESSAGE|src, msg);
1084 printk("[boot]%04x %s\n", src, msg);
1085}
1086
1087/* Print a termination message (print only -- does not stop the kernel) */
1088void ppc64_terminate_msg(unsigned int src, const char *msg)
1089{
1090 ppc64_do_msg(PPC64_LINUX_FUNCTION|PPC64_TERM_MESSAGE|src, msg);
1091 printk("[terminate]%04x %s\n", src, msg);
1092}
1093
1094/* This should only be called on processor 0 during calibrate decr */
1095void __init setup_default_decr(void)
1096{
1097 struct paca_struct *lpaca = get_paca();
1098
1099 lpaca->default_decr = tb_ticks_per_jiffy;
1100 lpaca->next_jiffy_update_tb = get_tb() + tb_ticks_per_jiffy;
1101}
1102
1103#ifndef CONFIG_PPC_ISERIES
1104/*
1105 * This function can be used by platforms to "find" legacy serial ports.
1106 * It works for "serial" nodes under an "isa" node, and will try to
1107 * respect the "ibm,aix-loc" property if any. It works with up to 8
1108 * ports.
1109 */
1110
1111#define MAX_LEGACY_SERIAL_PORTS 8
1112static struct plat_serial8250_port serial_ports[MAX_LEGACY_SERIAL_PORTS+1];
1113static unsigned int old_serial_count;
1114
1115void __init generic_find_legacy_serial_ports(u64 *physport,
1116 unsigned int *default_speed)
1117{
1118 struct device_node *np;
1119 u32 *sizeprop;
1120
1121 struct isa_reg_property {
1122 u32 space;
1123 u32 address;
1124 u32 size;
1125 };
1126 struct pci_reg_property {
1127 struct pci_address addr;
1128 u32 size_hi;
1129 u32 size_lo;
1130 };
1131
1132 DBG(" -> generic_find_legacy_serial_port()\n");
1133
1134 *physport = 0;
1135 if (default_speed)
1136 *default_speed = 0;
1137
1138 np = of_find_node_by_path("/");
1139 if (!np)
1140 return;
1141
1142 /* First fill our array */
1143 for (np = NULL; (np = of_find_node_by_type(np, "serial"));) {
1144 struct device_node *isa, *pci;
1145 struct isa_reg_property *reg;
1146 unsigned long phys_size, addr_size, io_base;
1147 u32 *rangesp;
1148 u32 *interrupts, *clk, *spd;
1149 char *typep;
1150 int index, rlen, rentsize;
1151
1152 /* Ok, first check if it's under an "isa" parent */
1153 isa = of_get_parent(np);
1154 if (!isa || strcmp(isa->name, "isa")) {
1155 DBG("%s: no isa parent found\n", np->full_name);
1156 continue;
1157 }
1158
1159 /* Now look for an "ibm,aix-loc" property that gives us ordering
1160 * if any...
1161 */
1162 typep = (char *)get_property(np, "ibm,aix-loc", NULL);
1163
1164 /* Get the ISA port number */
1165 reg = (struct isa_reg_property *)get_property(np, "reg", NULL);
1166 if (reg == NULL)
1167 goto next_port;
1168 /* We assume the interrupt number isn't translated ... */
1169 interrupts = (u32 *)get_property(np, "interrupts", NULL);
1170 /* get clock freq. if present */
1171 clk = (u32 *)get_property(np, "clock-frequency", NULL);
1172 /* get default speed if present */
1173 spd = (u32 *)get_property(np, "current-speed", NULL);
1174 /* Default to locate at end of array */
1175 index = old_serial_count; /* end of the array by default */
1176
1177 /* If we have a location index, then use it */
1178 if (typep && *typep == 'S') {
1179 index = simple_strtol(typep+1, NULL, 0) - 1;
1180 /* if index is out of range, use end of array instead */
1181 if (index >= MAX_LEGACY_SERIAL_PORTS)
1182 index = old_serial_count;
1183 /* if our index is still out of range, that mean that
1184 * array is full, we could scan for a free slot but that
1185 * make little sense to bother, just skip the port
1186 */
1187 if (index >= MAX_LEGACY_SERIAL_PORTS)
1188 goto next_port;
1189 if (index >= old_serial_count)
1190 old_serial_count = index + 1;
1191 /* Check if there is a port who already claimed our slot */
1192 if (serial_ports[index].iobase != 0) {
1193 /* if we still have some room, move it, else override */
1194 if (old_serial_count < MAX_LEGACY_SERIAL_PORTS) {
1195 DBG("Moved legacy port %d -> %d\n", index,
1196 old_serial_count);
1197 serial_ports[old_serial_count++] =
1198 serial_ports[index];
1199 } else {
1200 DBG("Replacing legacy port %d\n", index);
1201 }
1202 }
1203 }
1204 if (index >= MAX_LEGACY_SERIAL_PORTS)
1205 goto next_port;
1206 if (index >= old_serial_count)
1207 old_serial_count = index + 1;
1208
1209 /* Now fill the entry */
1210 memset(&serial_ports[index], 0, sizeof(struct plat_serial8250_port));
1211 serial_ports[index].uartclk = clk ? *clk : BASE_BAUD * 16;
1212 serial_ports[index].iobase = reg->address;
1213 serial_ports[index].irq = interrupts ? interrupts[0] : 0;
1214 serial_ports[index].flags = ASYNC_BOOT_AUTOCONF;
1215
1216 DBG("Added legacy port, index: %d, port: %x, irq: %d, clk: %d\n",
1217 index,
1218 serial_ports[index].iobase,
1219 serial_ports[index].irq,
1220 serial_ports[index].uartclk);
1221
1222 /* Get phys address of IO reg for port 1 */
1223 if (index != 0)
1224 goto next_port;
1225
1226 pci = of_get_parent(isa);
1227 if (!pci) {
1228 DBG("%s: no pci parent found\n", np->full_name);
1229 goto next_port;
1230 }
1231
1232 rangesp = (u32 *)get_property(pci, "ranges", &rlen);
1233 if (rangesp == NULL) {
1234 of_node_put(pci);
1235 goto next_port;
1236 }
1237 rlen /= 4;
1238
1239 /* we need the #size-cells of the PCI bridge node itself */
1240 phys_size = 1;
1241 sizeprop = (u32 *)get_property(pci, "#size-cells", NULL);
1242 if (sizeprop != NULL)
1243 phys_size = *sizeprop;
1244 /* we need the parent #addr-cells */
1245 addr_size = prom_n_addr_cells(pci);
1246 rentsize = 3 + addr_size + phys_size;
1247 io_base = 0;
1248 for (;rlen >= rentsize; rlen -= rentsize,rangesp += rentsize) {
1249 if (((rangesp[0] >> 24) & 0x3) != 1)
1250 continue; /* not IO space */
1251 io_base = rangesp[3];
1252 if (addr_size == 2)
1253 io_base = (io_base << 32) | rangesp[4];
1254 }
1255 if (io_base != 0) {
1256 *physport = io_base + reg->address;
1257 if (default_speed && spd)
1258 *default_speed = *spd;
1259 }
1260 of_node_put(pci);
1261 next_port:
1262 of_node_put(isa);
1263 }
1264
1265 DBG(" <- generic_find_legacy_serial_port()\n");
1266}
1267
1268static struct platform_device serial_device = {
1269 .name = "serial8250",
1270 .id = PLAT8250_DEV_PLATFORM,
1271 .dev = {
1272 .platform_data = serial_ports,
1273 },
1274};
1275
1276static int __init serial_dev_init(void)
1277{
1278 return platform_device_register(&serial_device);
1279}
1280arch_initcall(serial_dev_init);
1281
1282#endif /* CONFIG_PPC_ISERIES */
1283
1284int check_legacy_ioport(unsigned long base_port)
1285{
1286 if (ppc_md.check_legacy_ioport == NULL)
1287 return 0;
1288 return ppc_md.check_legacy_ioport(base_port);
1289}
1290EXPORT_SYMBOL(check_legacy_ioport);
1291
1292#ifdef CONFIG_XMON
1293static int __init early_xmon(char *p)
1294{
1295 /* ensure xmon is enabled */
1296 if (p) {
1297 if (strncmp(p, "on", 2) == 0)
1298 xmon_init(1);
1299 if (strncmp(p, "off", 3) == 0)
1300 xmon_init(0);
1301 if (strncmp(p, "early", 5) != 0)
1302 return 0;
1303 }
1304 xmon_init(1);
1305 debugger(NULL);
1306
1307 return 0;
1308}
1309early_param("xmon", early_xmon);
1310#endif
1311
1312void cpu_die(void)
1313{
1314 if (ppc_md.cpu_die)
1315 ppc_md.cpu_die();
1316}
diff --git a/arch/ppc64/kernel/signal.c b/arch/ppc64/kernel/signal.c
index 347112cca3c0..ec9d0984b6a0 100644
--- a/arch/ppc64/kernel/signal.c
+++ b/arch/ppc64/kernel/signal.c
@@ -133,7 +133,7 @@ static long setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs,
133 flush_fp_to_thread(current); 133 flush_fp_to_thread(current);
134 134
135 /* Make sure signal doesn't get spurrious FP exceptions */ 135 /* Make sure signal doesn't get spurrious FP exceptions */
136 current->thread.fpscr = 0; 136 current->thread.fpscr.val = 0;
137 137
138#ifdef CONFIG_ALTIVEC 138#ifdef CONFIG_ALTIVEC
139 err |= __put_user(v_regs, &sc->v_regs); 139 err |= __put_user(v_regs, &sc->v_regs);
diff --git a/arch/ppc64/kernel/signal32.c b/arch/ppc64/kernel/signal32.c
deleted file mode 100644
index a8b7a5a56bb4..000000000000
--- a/arch/ppc64/kernel/signal32.c
+++ /dev/null
@@ -1,998 +0,0 @@
1/*
2 * signal32.c: Support 32bit signal syscalls.
3 *
4 * Copyright (C) 2001 IBM
5 * Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
6 * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
7 *
8 * These routines maintain argument size conversion between 32bit and 64bit
9 * environment.
10 *
11 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License
13 * as published by the Free Software Foundation; either version
14 * 2 of the License, or (at your option) any later version.
15 */
16
17#include <linux/config.h>
18#include <linux/sched.h>
19#include <linux/mm.h>
20#include <linux/smp.h>
21#include <linux/smp_lock.h>
22#include <linux/kernel.h>
23#include <linux/signal.h>
24#include <linux/syscalls.h>
25#include <linux/errno.h>
26#include <linux/elf.h>
27#include <linux/compat.h>
28#include <linux/ptrace.h>
29#include <asm/ppc32.h>
30#include <asm/uaccess.h>
31#include <asm/ppcdebug.h>
32#include <asm/unistd.h>
33#include <asm/cacheflush.h>
34#include <asm/vdso.h>
35
36#define DEBUG_SIG 0
37
38#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
39
40#define GP_REGS_SIZE32 min(sizeof(elf_gregset_t32), sizeof(struct pt_regs32))
41
42/*
43 * When we have signals to deliver, we set up on the
44 * user stack, going down from the original stack pointer:
45 * a sigregs32 struct
46 * a sigcontext32 struct
47 * a gap of __SIGNAL_FRAMESIZE32 bytes
48 *
49 * Each of these things must be a multiple of 16 bytes in size.
50 *
51 */
52struct sigregs32 {
53 struct mcontext32 mctx; /* all the register values */
54 /*
55 * Programs using the rs6000/xcoff abi can save up to 19 gp
56 * regs and 18 fp regs below sp before decrementing it.
57 */
58 int abigap[56];
59};
60
61/* We use the mc_pad field for the signal return trampoline. */
62#define tramp mc_pad
63
64/*
65 * When we have rt signals to deliver, we set up on the
66 * user stack, going down from the original stack pointer:
67 * one rt_sigframe32 struct (siginfo + ucontext + ABI gap)
68 * a gap of __SIGNAL_FRAMESIZE32+16 bytes
69 * (the +16 is to get the siginfo and ucontext32 in the same
70 * positions as in older kernels).
71 *
72 * Each of these things must be a multiple of 16 bytes in size.
73 *
74 */
75struct rt_sigframe32 {
76 compat_siginfo_t info;
77 struct ucontext32 uc;
78 /*
79 * Programs using the rs6000/xcoff abi can save up to 19 gp
80 * regs and 18 fp regs below sp before decrementing it.
81 */
82 int abigap[56];
83};
84
85
86/*
87 * Common utility functions used by signal and context support
88 *
89 */
90
91/*
92 * Restore the user process's signal mask
93 * (implemented in signal.c)
94 */
95extern void restore_sigmask(sigset_t *set);
96
97/*
98 * Functions for flipping sigsets (thanks to brain dead generic
99 * implementation that makes things simple for little endian only
100 */
101static inline void compat_from_sigset(compat_sigset_t *compat, sigset_t *set)
102{
103 switch (_NSIG_WORDS) {
104 case 4: compat->sig[5] = set->sig[3] & 0xffffffffull ;
105 compat->sig[7] = set->sig[3] >> 32;
106 case 3: compat->sig[4] = set->sig[2] & 0xffffffffull ;
107 compat->sig[5] = set->sig[2] >> 32;
108 case 2: compat->sig[2] = set->sig[1] & 0xffffffffull ;
109 compat->sig[3] = set->sig[1] >> 32;
110 case 1: compat->sig[0] = set->sig[0] & 0xffffffffull ;
111 compat->sig[1] = set->sig[0] >> 32;
112 }
113}
114
115static inline void sigset_from_compat(sigset_t *set, compat_sigset_t *compat)
116{
117 switch (_NSIG_WORDS) {
118 case 4: set->sig[3] = compat->sig[6] | (((long)compat->sig[7]) << 32);
119 case 3: set->sig[2] = compat->sig[4] | (((long)compat->sig[5]) << 32);
120 case 2: set->sig[1] = compat->sig[2] | (((long)compat->sig[3]) << 32);
121 case 1: set->sig[0] = compat->sig[0] | (((long)compat->sig[1]) << 32);
122 }
123}
124
125
126/*
127 * Save the current user registers on the user stack.
128 * We only save the altivec registers if the process has used
129 * altivec instructions at some point.
130 */
131static int save_user_regs(struct pt_regs *regs, struct mcontext32 __user *frame, int sigret)
132{
133 elf_greg_t64 *gregs = (elf_greg_t64 *)regs;
134 int i, err = 0;
135
136 /* Make sure floating point registers are stored in regs */
137 flush_fp_to_thread(current);
138
139 /* save general and floating-point registers */
140 for (i = 0; i <= PT_RESULT; i ++)
141 err |= __put_user((unsigned int)gregs[i], &frame->mc_gregs[i]);
142 err |= __copy_to_user(&frame->mc_fregs, current->thread.fpr,
143 ELF_NFPREG * sizeof(double));
144 if (err)
145 return 1;
146
147 current->thread.fpscr = 0; /* turn off all fp exceptions */
148
149#ifdef CONFIG_ALTIVEC
150 /* save altivec registers */
151 if (current->thread.used_vr) {
152 flush_altivec_to_thread(current);
153 if (__copy_to_user(&frame->mc_vregs, current->thread.vr,
154 ELF_NVRREG32 * sizeof(vector128)))
155 return 1;
156 /* set MSR_VEC in the saved MSR value to indicate that
157 frame->mc_vregs contains valid data */
158 if (__put_user(regs->msr | MSR_VEC, &frame->mc_gregs[PT_MSR]))
159 return 1;
160 }
161 /* else assert((regs->msr & MSR_VEC) == 0) */
162
163 /* We always copy to/from vrsave, it's 0 if we don't have or don't
164 * use altivec. Since VSCR only contains 32 bits saved in the least
165 * significant bits of a vector, we "cheat" and stuff VRSAVE in the
166 * most significant bits of that same vector. --BenH
167 */
168 if (__put_user(current->thread.vrsave, (u32 __user *)&frame->mc_vregs[32]))
169 return 1;
170#endif /* CONFIG_ALTIVEC */
171
172 if (sigret) {
173 /* Set up the sigreturn trampoline: li r0,sigret; sc */
174 if (__put_user(0x38000000UL + sigret, &frame->tramp[0])
175 || __put_user(0x44000002UL, &frame->tramp[1]))
176 return 1;
177 flush_icache_range((unsigned long) &frame->tramp[0],
178 (unsigned long) &frame->tramp[2]);
179 }
180
181 return 0;
182}
183
184/*
185 * Restore the current user register values from the user stack,
186 * (except for MSR).
187 */
188static long restore_user_regs(struct pt_regs *regs,
189 struct mcontext32 __user *sr, int sig)
190{
191 elf_greg_t64 *gregs = (elf_greg_t64 *)regs;
192 int i;
193 long err = 0;
194 unsigned int save_r2 = 0;
195#ifdef CONFIG_ALTIVEC
196 unsigned long msr;
197#endif
198
199 /*
200 * restore general registers but not including MSR or SOFTE. Also
201 * take care of keeping r2 (TLS) intact if not a signal
202 */
203 if (!sig)
204 save_r2 = (unsigned int)regs->gpr[2];
205 for (i = 0; i <= PT_RESULT; i++) {
206 if ((i == PT_MSR) || (i == PT_SOFTE))
207 continue;
208 err |= __get_user(gregs[i], &sr->mc_gregs[i]);
209 }
210 if (!sig)
211 regs->gpr[2] = (unsigned long) save_r2;
212 if (err)
213 return 1;
214
215 /* force the process to reload the FP registers from
216 current->thread when it next does FP instructions */
217 regs->msr &= ~(MSR_FP | MSR_FE0 | MSR_FE1);
218 if (__copy_from_user(current->thread.fpr, &sr->mc_fregs,
219 sizeof(sr->mc_fregs)))
220 return 1;
221
222#ifdef CONFIG_ALTIVEC
223 /* force the process to reload the altivec registers from
224 current->thread when it next does altivec instructions */
225 regs->msr &= ~MSR_VEC;
226 if (!__get_user(msr, &sr->mc_gregs[PT_MSR]) && (msr & MSR_VEC) != 0) {
227 /* restore altivec registers from the stack */
228 if (__copy_from_user(current->thread.vr, &sr->mc_vregs,
229 sizeof(sr->mc_vregs)))
230 return 1;
231 } else if (current->thread.used_vr)
232 memset(current->thread.vr, 0, ELF_NVRREG32 * sizeof(vector128));
233
234 /* Always get VRSAVE back */
235 if (__get_user(current->thread.vrsave, (u32 __user *)&sr->mc_vregs[32]))
236 return 1;
237#endif /* CONFIG_ALTIVEC */
238
239#ifndef CONFIG_SMP
240 preempt_disable();
241 if (last_task_used_math == current)
242 last_task_used_math = NULL;
243 if (last_task_used_altivec == current)
244 last_task_used_altivec = NULL;
245 preempt_enable();
246#endif
247 return 0;
248}
249
250
251/*
252 * Start of nonRT signal support
253 *
254 * sigset_t is 32 bits for non-rt signals
255 *
256 * System Calls
257 * sigaction sys32_sigaction
258 * sigreturn sys32_sigreturn
259 *
260 * Note sigsuspend has no special 32 bit routine - uses the 64 bit routine
261 *
262 * Other routines
263 * setup_frame32
264 */
265
266/*
267 * Atomically swap in the new signal mask, and wait for a signal.
268 */
269long sys32_sigsuspend(old_sigset_t mask, int p2, int p3, int p4, int p6, int p7,
270 struct pt_regs *regs)
271{
272 sigset_t saveset;
273
274 mask &= _BLOCKABLE;
275 spin_lock_irq(&current->sighand->siglock);
276 saveset = current->blocked;
277 siginitset(&current->blocked, mask);
278 recalc_sigpending();
279 spin_unlock_irq(&current->sighand->siglock);
280
281 regs->result = -EINTR;
282 regs->gpr[3] = EINTR;
283 regs->ccr |= 0x10000000;
284 while (1) {
285 current->state = TASK_INTERRUPTIBLE;
286 schedule();
287 if (do_signal32(&saveset, regs))
288 /*
289 * Returning 0 means we return to userspace via
290 * ret_from_except and thus restore all user
291 * registers from *regs. This is what we need
292 * to do when a signal has been delivered.
293 */
294 return 0;
295 }
296}
297
298long sys32_sigaction(int sig, struct old_sigaction32 __user *act,
299 struct old_sigaction32 __user *oact)
300{
301 struct k_sigaction new_ka, old_ka;
302 int ret;
303
304 if (sig < 0)
305 sig = -sig;
306
307 if (act) {
308 compat_old_sigset_t mask;
309 compat_uptr_t handler, restorer;
310
311 if (get_user(handler, &act->sa_handler) ||
312 __get_user(restorer, &act->sa_restorer) ||
313 __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
314 __get_user(mask, &act->sa_mask))
315 return -EFAULT;
316 new_ka.sa.sa_handler = compat_ptr(handler);
317 new_ka.sa.sa_restorer = compat_ptr(restorer);
318 siginitset(&new_ka.sa.sa_mask, mask);
319 }
320
321 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
322 if (!ret && oact) {
323 if (put_user((long)old_ka.sa.sa_handler, &oact->sa_handler) ||
324 __put_user((long)old_ka.sa.sa_restorer, &oact->sa_restorer) ||
325 __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
326 __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
327 return -EFAULT;
328 }
329
330 return ret;
331}
332
333
334
335/*
336 * Start of RT signal support
337 *
338 * sigset_t is 64 bits for rt signals
339 *
340 * System Calls
341 * sigaction sys32_rt_sigaction
342 * sigpending sys32_rt_sigpending
343 * sigprocmask sys32_rt_sigprocmask
344 * sigreturn sys32_rt_sigreturn
345 * sigqueueinfo sys32_rt_sigqueueinfo
346 * sigsuspend sys32_rt_sigsuspend
347 *
348 * Other routines
349 * setup_rt_frame32
350 * copy_siginfo_to_user32
351 * siginfo32to64
352 */
353
354
355long sys32_rt_sigaction(int sig, const struct sigaction32 __user *act,
356 struct sigaction32 __user *oact, size_t sigsetsize)
357{
358 struct k_sigaction new_ka, old_ka;
359 int ret;
360 compat_sigset_t set32;
361
362 /* XXX: Don't preclude handling different sized sigset_t's. */
363 if (sigsetsize != sizeof(compat_sigset_t))
364 return -EINVAL;
365
366 if (act) {
367 compat_uptr_t handler;
368
369 ret = get_user(handler, &act->sa_handler);
370 new_ka.sa.sa_handler = compat_ptr(handler);
371 ret |= __copy_from_user(&set32, &act->sa_mask,
372 sizeof(compat_sigset_t));
373 sigset_from_compat(&new_ka.sa.sa_mask, &set32);
374 ret |= __get_user(new_ka.sa.sa_flags, &act->sa_flags);
375 if (ret)
376 return -EFAULT;
377 }
378
379 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
380 if (!ret && oact) {
381 compat_from_sigset(&set32, &old_ka.sa.sa_mask);
382 ret = put_user((long)old_ka.sa.sa_handler, &oact->sa_handler);
383 ret |= __copy_to_user(&oact->sa_mask, &set32,
384 sizeof(compat_sigset_t));
385 ret |= __put_user(old_ka.sa.sa_flags, &oact->sa_flags);
386 }
387 return ret;
388}
389
390/*
391 * Note: it is necessary to treat how as an unsigned int, with the
392 * corresponding cast to a signed int to insure that the proper
393 * conversion (sign extension) between the register representation
394 * of a signed int (msr in 32-bit mode) and the register representation
395 * of a signed int (msr in 64-bit mode) is performed.
396 */
397long sys32_rt_sigprocmask(u32 how, compat_sigset_t __user *set,
398 compat_sigset_t __user *oset, size_t sigsetsize)
399{
400 sigset_t s;
401 sigset_t __user *up;
402 compat_sigset_t s32;
403 int ret;
404 mm_segment_t old_fs = get_fs();
405
406 if (set) {
407 if (copy_from_user (&s32, set, sizeof(compat_sigset_t)))
408 return -EFAULT;
409 sigset_from_compat(&s, &s32);
410 }
411
412 set_fs(KERNEL_DS);
413 /* This is valid because of the set_fs() */
414 up = (sigset_t __user *) &s;
415 ret = sys_rt_sigprocmask((int)how, set ? up : NULL, oset ? up : NULL,
416 sigsetsize);
417 set_fs(old_fs);
418 if (ret)
419 return ret;
420 if (oset) {
421 compat_from_sigset(&s32, &s);
422 if (copy_to_user (oset, &s32, sizeof(compat_sigset_t)))
423 return -EFAULT;
424 }
425 return 0;
426}
427
428long sys32_rt_sigpending(compat_sigset_t __user *set, compat_size_t sigsetsize)
429{
430 sigset_t s;
431 compat_sigset_t s32;
432 int ret;
433 mm_segment_t old_fs = get_fs();
434
435 set_fs(KERNEL_DS);
436 /* The __user pointer cast is valid because of the set_fs() */
437 ret = sys_rt_sigpending((sigset_t __user *) &s, sigsetsize);
438 set_fs(old_fs);
439 if (!ret) {
440 compat_from_sigset(&s32, &s);
441 if (copy_to_user (set, &s32, sizeof(compat_sigset_t)))
442 return -EFAULT;
443 }
444 return ret;
445}
446
447
448int copy_siginfo_to_user32(struct compat_siginfo __user *d, siginfo_t *s)
449{
450 int err;
451
452 if (!access_ok (VERIFY_WRITE, d, sizeof(*d)))
453 return -EFAULT;
454
455 /* If you change siginfo_t structure, please be sure
456 * this code is fixed accordingly.
457 * It should never copy any pad contained in the structure
458 * to avoid security leaks, but must copy the generic
459 * 3 ints plus the relevant union member.
460 * This routine must convert siginfo from 64bit to 32bit as well
461 * at the same time.
462 */
463 err = __put_user(s->si_signo, &d->si_signo);
464 err |= __put_user(s->si_errno, &d->si_errno);
465 err |= __put_user((short)s->si_code, &d->si_code);
466 if (s->si_code < 0)
467 err |= __copy_to_user(&d->_sifields._pad, &s->_sifields._pad,
468 SI_PAD_SIZE32);
469 else switch(s->si_code >> 16) {
470 case __SI_CHLD >> 16:
471 err |= __put_user(s->si_pid, &d->si_pid);
472 err |= __put_user(s->si_uid, &d->si_uid);
473 err |= __put_user(s->si_utime, &d->si_utime);
474 err |= __put_user(s->si_stime, &d->si_stime);
475 err |= __put_user(s->si_status, &d->si_status);
476 break;
477 case __SI_FAULT >> 16:
478 err |= __put_user((unsigned int)(unsigned long)s->si_addr,
479 &d->si_addr);
480 break;
481 case __SI_POLL >> 16:
482 err |= __put_user(s->si_band, &d->si_band);
483 err |= __put_user(s->si_fd, &d->si_fd);
484 break;
485 case __SI_TIMER >> 16:
486 err |= __put_user(s->si_tid, &d->si_tid);
487 err |= __put_user(s->si_overrun, &d->si_overrun);
488 err |= __put_user(s->si_int, &d->si_int);
489 break;
490 case __SI_RT >> 16: /* This is not generated by the kernel as of now. */
491 case __SI_MESGQ >> 16:
492 err |= __put_user(s->si_int, &d->si_int);
493 /* fallthrough */
494 case __SI_KILL >> 16:
495 default:
496 err |= __put_user(s->si_pid, &d->si_pid);
497 err |= __put_user(s->si_uid, &d->si_uid);
498 break;
499 }
500 return err;
501}
502
503/*
504 * Note: it is necessary to treat pid and sig as unsigned ints, with the
505 * corresponding cast to a signed int to insure that the proper conversion
506 * (sign extension) between the register representation of a signed int
507 * (msr in 32-bit mode) and the register representation of a signed int
508 * (msr in 64-bit mode) is performed.
509 */
510long sys32_rt_sigqueueinfo(u32 pid, u32 sig, compat_siginfo_t __user *uinfo)
511{
512 siginfo_t info;
513 int ret;
514 mm_segment_t old_fs = get_fs();
515
516 if (copy_from_user (&info, uinfo, 3*sizeof(int)) ||
517 copy_from_user (info._sifields._pad, uinfo->_sifields._pad, SI_PAD_SIZE32))
518 return -EFAULT;
519 set_fs (KERNEL_DS);
520 /* The __user pointer cast is valid becasuse of the set_fs() */
521 ret = sys_rt_sigqueueinfo((int)pid, (int)sig, (siginfo_t __user *) &info);
522 set_fs (old_fs);
523 return ret;
524}
525
526int sys32_rt_sigsuspend(compat_sigset_t __user * unewset, size_t sigsetsize, int p3,
527 int p4, int p6, int p7, struct pt_regs *regs)
528{
529 sigset_t saveset, newset;
530 compat_sigset_t s32;
531
532 /* XXX: Don't preclude handling different sized sigset_t's. */
533 if (sigsetsize != sizeof(sigset_t))
534 return -EINVAL;
535
536 if (copy_from_user(&s32, unewset, sizeof(s32)))
537 return -EFAULT;
538
539 /*
540 * Swap the 2 words of the 64-bit sigset_t (they are stored
541 * in the "wrong" endian in 32-bit user storage).
542 */
543 sigset_from_compat(&newset, &s32);
544
545 sigdelsetmask(&newset, ~_BLOCKABLE);
546 spin_lock_irq(&current->sighand->siglock);
547 saveset = current->blocked;
548 current->blocked = newset;
549 recalc_sigpending();
550 spin_unlock_irq(&current->sighand->siglock);
551
552 regs->result = -EINTR;
553 regs->gpr[3] = EINTR;
554 regs->ccr |= 0x10000000;
555 while (1) {
556 current->state = TASK_INTERRUPTIBLE;
557 schedule();
558 if (do_signal32(&saveset, regs))
559 /*
560 * Returning 0 means we return to userspace via
561 * ret_from_except and thus restore all user
562 * registers from *regs. This is what we need
563 * to do when a signal has been delivered.
564 */
565 return 0;
566 }
567}
568
569/*
570 * Start Alternate signal stack support
571 *
572 * System Calls
573 * sigaltatck sys32_sigaltstack
574 */
575
576int sys32_sigaltstack(u32 __new, u32 __old, int r5,
577 int r6, int r7, int r8, struct pt_regs *regs)
578{
579 stack_32_t __user * newstack = (stack_32_t __user *)(long) __new;
580 stack_32_t __user * oldstack = (stack_32_t __user *)(long) __old;
581 stack_t uss, uoss;
582 int ret;
583 mm_segment_t old_fs;
584 unsigned long sp;
585 compat_uptr_t ss_sp;
586
587 /*
588 * set sp to the user stack on entry to the system call
589 * the system call router sets R9 to the saved registers
590 */
591 sp = regs->gpr[1];
592
593 /* Put new stack info in local 64 bit stack struct */
594 if (newstack) {
595 if (get_user(ss_sp, &newstack->ss_sp) ||
596 __get_user(uss.ss_flags, &newstack->ss_flags) ||
597 __get_user(uss.ss_size, &newstack->ss_size))
598 return -EFAULT;
599 uss.ss_sp = compat_ptr(ss_sp);
600 }
601
602 old_fs = get_fs();
603 set_fs(KERNEL_DS);
604 /* The __user pointer casts are valid because of the set_fs() */
605 ret = do_sigaltstack(
606 newstack ? (stack_t __user *) &uss : NULL,
607 oldstack ? (stack_t __user *) &uoss : NULL,
608 sp);
609 set_fs(old_fs);
610 /* Copy the stack information to the user output buffer */
611 if (!ret && oldstack &&
612 (put_user((long)uoss.ss_sp, &oldstack->ss_sp) ||
613 __put_user(uoss.ss_flags, &oldstack->ss_flags) ||
614 __put_user(uoss.ss_size, &oldstack->ss_size)))
615 return -EFAULT;
616 return ret;
617}
618
619
620/*
621 * Set up a signal frame for a "real-time" signal handler
622 * (one which gets siginfo).
623 */
624static int handle_rt_signal32(unsigned long sig, struct k_sigaction *ka,
625 siginfo_t *info, sigset_t *oldset,
626 struct pt_regs * regs, unsigned long newsp)
627{
628 struct rt_sigframe32 __user *rt_sf;
629 struct mcontext32 __user *frame;
630 unsigned long origsp = newsp;
631 compat_sigset_t c_oldset;
632
633 /* Set up Signal Frame */
634 /* Put a Real Time Context onto stack */
635 newsp -= sizeof(*rt_sf);
636 rt_sf = (struct rt_sigframe32 __user *)newsp;
637
638 /* create a stack frame for the caller of the handler */
639 newsp -= __SIGNAL_FRAMESIZE32 + 16;
640
641 if (!access_ok(VERIFY_WRITE, (void __user *)newsp, origsp - newsp))
642 goto badframe;
643
644 compat_from_sigset(&c_oldset, oldset);
645
646 /* Put the siginfo & fill in most of the ucontext */
647 if (copy_siginfo_to_user32(&rt_sf->info, info)
648 || __put_user(0, &rt_sf->uc.uc_flags)
649 || __put_user(0, &rt_sf->uc.uc_link)
650 || __put_user(current->sas_ss_sp, &rt_sf->uc.uc_stack.ss_sp)
651 || __put_user(sas_ss_flags(regs->gpr[1]),
652 &rt_sf->uc.uc_stack.ss_flags)
653 || __put_user(current->sas_ss_size, &rt_sf->uc.uc_stack.ss_size)
654 || __put_user((u32)(u64)&rt_sf->uc.uc_mcontext, &rt_sf->uc.uc_regs)
655 || __copy_to_user(&rt_sf->uc.uc_sigmask, &c_oldset, sizeof(c_oldset)))
656 goto badframe;
657
658 /* Save user registers on the stack */
659 frame = &rt_sf->uc.uc_mcontext;
660 if (put_user(regs->gpr[1], (u32 __user *)newsp))
661 goto badframe;
662
663 if (vdso32_rt_sigtramp && current->thread.vdso_base) {
664 if (save_user_regs(regs, frame, 0))
665 goto badframe;
666 regs->link = current->thread.vdso_base + vdso32_rt_sigtramp;
667 } else {
668 if (save_user_regs(regs, frame, __NR_rt_sigreturn))
669 goto badframe;
670 regs->link = (unsigned long) frame->tramp;
671 }
672 regs->gpr[1] = (unsigned long) newsp;
673 regs->gpr[3] = sig;
674 regs->gpr[4] = (unsigned long) &rt_sf->info;
675 regs->gpr[5] = (unsigned long) &rt_sf->uc;
676 regs->gpr[6] = (unsigned long) rt_sf;
677 regs->nip = (unsigned long) ka->sa.sa_handler;
678 regs->trap = 0;
679 regs->result = 0;
680
681 if (test_thread_flag(TIF_SINGLESTEP))
682 ptrace_notify(SIGTRAP);
683
684 return 1;
685
686badframe:
687#if DEBUG_SIG
688 printk("badframe in handle_rt_signal, regs=%p frame=%p newsp=%lx\n",
689 regs, frame, newsp);
690#endif
691 force_sigsegv(sig, current);
692 return 0;
693}
694
695static long do_setcontext32(struct ucontext32 __user *ucp, struct pt_regs *regs, int sig)
696{
697 compat_sigset_t c_set;
698 sigset_t set;
699 u32 mcp;
700
701 if (__copy_from_user(&c_set, &ucp->uc_sigmask, sizeof(c_set))
702 || __get_user(mcp, &ucp->uc_regs))
703 return -EFAULT;
704 sigset_from_compat(&set, &c_set);
705 restore_sigmask(&set);
706 if (restore_user_regs(regs, (struct mcontext32 __user *)(u64)mcp, sig))
707 return -EFAULT;
708
709 return 0;
710}
711
712/*
713 * Handle {get,set,swap}_context operations for 32 bits processes
714 */
715
716long sys32_swapcontext(struct ucontext32 __user *old_ctx,
717 struct ucontext32 __user *new_ctx,
718 int ctx_size, int r6, int r7, int r8, struct pt_regs *regs)
719{
720 unsigned char tmp;
721 compat_sigset_t c_set;
722
723 /* Context size is for future use. Right now, we only make sure
724 * we are passed something we understand
725 */
726 if (ctx_size < sizeof(struct ucontext32))
727 return -EINVAL;
728
729 if (old_ctx != NULL) {
730 compat_from_sigset(&c_set, &current->blocked);
731 if (!access_ok(VERIFY_WRITE, old_ctx, sizeof(*old_ctx))
732 || save_user_regs(regs, &old_ctx->uc_mcontext, 0)
733 || __copy_to_user(&old_ctx->uc_sigmask, &c_set, sizeof(c_set))
734 || __put_user((u32)(u64)&old_ctx->uc_mcontext, &old_ctx->uc_regs))
735 return -EFAULT;
736 }
737 if (new_ctx == NULL)
738 return 0;
739 if (!access_ok(VERIFY_READ, new_ctx, sizeof(*new_ctx))
740 || __get_user(tmp, (u8 __user *) new_ctx)
741 || __get_user(tmp, (u8 __user *) (new_ctx + 1) - 1))
742 return -EFAULT;
743
744 /*
745 * If we get a fault copying the context into the kernel's
746 * image of the user's registers, we can't just return -EFAULT
747 * because the user's registers will be corrupted. For instance
748 * the NIP value may have been updated but not some of the
749 * other registers. Given that we have done the access_ok
750 * and successfully read the first and last bytes of the region
751 * above, this should only happen in an out-of-memory situation
752 * or if another thread unmaps the region containing the context.
753 * We kill the task with a SIGSEGV in this situation.
754 */
755 if (do_setcontext32(new_ctx, regs, 0))
756 do_exit(SIGSEGV);
757
758 return 0;
759}
760
761long sys32_rt_sigreturn(int r3, int r4, int r5, int r6, int r7, int r8,
762 struct pt_regs *regs)
763{
764 struct rt_sigframe32 __user *rt_sf;
765 int ret;
766
767
768 /* Always make any pending restarted system calls return -EINTR */
769 current_thread_info()->restart_block.fn = do_no_restart_syscall;
770
771 rt_sf = (struct rt_sigframe32 __user *)
772 (regs->gpr[1] + __SIGNAL_FRAMESIZE32 + 16);
773 if (!access_ok(VERIFY_READ, rt_sf, sizeof(*rt_sf)))
774 goto bad;
775 if (do_setcontext32(&rt_sf->uc, regs, 1))
776 goto bad;
777
778 /*
779 * It's not clear whether or why it is desirable to save the
780 * sigaltstack setting on signal delivery and restore it on
781 * signal return. But other architectures do this and we have
782 * always done it up until now so it is probably better not to
783 * change it. -- paulus
784 * We use the sys32_ version that does the 32/64 bits conversion
785 * and takes userland pointer directly. What about error checking ?
786 * nobody does any...
787 */
788 sys32_sigaltstack((u32)(u64)&rt_sf->uc.uc_stack, 0, 0, 0, 0, 0, regs);
789
790 ret = regs->result;
791
792 return ret;
793
794 bad:
795 force_sig(SIGSEGV, current);
796 return 0;
797}
798
799
800/*
801 * OK, we're invoking a handler
802 */
803static int handle_signal32(unsigned long sig, struct k_sigaction *ka,
804 siginfo_t *info, sigset_t *oldset,
805 struct pt_regs * regs, unsigned long newsp)
806{
807 struct sigcontext32 __user *sc;
808 struct sigregs32 __user *frame;
809 unsigned long origsp = newsp;
810
811 /* Set up Signal Frame */
812 newsp -= sizeof(struct sigregs32);
813 frame = (struct sigregs32 __user *) newsp;
814
815 /* Put a sigcontext on the stack */
816 newsp -= sizeof(*sc);
817 sc = (struct sigcontext32 __user *) newsp;
818
819 /* create a stack frame for the caller of the handler */
820 newsp -= __SIGNAL_FRAMESIZE32;
821
822 if (!access_ok(VERIFY_WRITE, (void __user *) newsp, origsp - newsp))
823 goto badframe;
824
825#if _NSIG != 64
826#error "Please adjust handle_signal32()"
827#endif
828 if (__put_user((u32)(u64)ka->sa.sa_handler, &sc->handler)
829 || __put_user(oldset->sig[0], &sc->oldmask)
830 || __put_user((oldset->sig[0] >> 32), &sc->_unused[3])
831 || __put_user((u32)(u64)frame, &sc->regs)
832 || __put_user(sig, &sc->signal))
833 goto badframe;
834
835 if (vdso32_sigtramp && current->thread.vdso_base) {
836 if (save_user_regs(regs, &frame->mctx, 0))
837 goto badframe;
838 regs->link = current->thread.vdso_base + vdso32_sigtramp;
839 } else {
840 if (save_user_regs(regs, &frame->mctx, __NR_sigreturn))
841 goto badframe;
842 regs->link = (unsigned long) frame->mctx.tramp;
843 }
844
845 if (put_user(regs->gpr[1], (u32 __user *)newsp))
846 goto badframe;
847 regs->gpr[1] = (unsigned long) newsp;
848 regs->gpr[3] = sig;
849 regs->gpr[4] = (unsigned long) sc;
850 regs->nip = (unsigned long) ka->sa.sa_handler;
851 regs->trap = 0;
852 regs->result = 0;
853
854 if (test_thread_flag(TIF_SINGLESTEP))
855 ptrace_notify(SIGTRAP);
856
857 return 1;
858
859badframe:
860#if DEBUG_SIG
861 printk("badframe in handle_signal, regs=%p frame=%x newsp=%x\n",
862 regs, frame, *newspp);
863#endif
864 force_sigsegv(sig, current);
865 return 0;
866}
867
868/*
869 * Do a signal return; undo the signal stack.
870 */
871long sys32_sigreturn(int r3, int r4, int r5, int r6, int r7, int r8,
872 struct pt_regs *regs)
873{
874 struct sigcontext32 __user *sc;
875 struct sigcontext32 sigctx;
876 struct mcontext32 __user *sr;
877 sigset_t set;
878 int ret;
879
880 /* Always make any pending restarted system calls return -EINTR */
881 current_thread_info()->restart_block.fn = do_no_restart_syscall;
882
883 sc = (struct sigcontext32 __user *)(regs->gpr[1] + __SIGNAL_FRAMESIZE32);
884 if (copy_from_user(&sigctx, sc, sizeof(sigctx)))
885 goto badframe;
886
887 /*
888 * Note that PPC32 puts the upper 32 bits of the sigmask in the
889 * unused part of the signal stackframe
890 */
891 set.sig[0] = sigctx.oldmask + ((long)(sigctx._unused[3]) << 32);
892 restore_sigmask(&set);
893
894 sr = (struct mcontext32 __user *)(u64)sigctx.regs;
895 if (!access_ok(VERIFY_READ, sr, sizeof(*sr))
896 || restore_user_regs(regs, sr, 1))
897 goto badframe;
898
899 ret = regs->result;
900 return ret;
901
902badframe:
903 force_sig(SIGSEGV, current);
904 return 0;
905}
906
907
908
909/*
910 * Start of do_signal32 routine
911 *
912 * This routine gets control when a pending signal needs to be processed
913 * in the 32 bit target thread -
914 *
915 * It handles both rt and non-rt signals
916 */
917
918/*
919 * Note that 'init' is a special process: it doesn't get signals it doesn't
920 * want to handle. Thus you cannot kill init even with a SIGKILL even by
921 * mistake.
922 */
923
924int do_signal32(sigset_t *oldset, struct pt_regs *regs)
925{
926 siginfo_t info;
927 unsigned int frame, newsp;
928 int signr, ret;
929 struct k_sigaction ka;
930
931 if (!oldset)
932 oldset = &current->blocked;
933
934 newsp = frame = 0;
935
936 signr = get_signal_to_deliver(&info, &ka, regs, NULL);
937
938 if (TRAP(regs) == 0x0C00 /* System Call! */
939 && regs->ccr & 0x10000000 /* error signalled */
940 && ((ret = regs->gpr[3]) == ERESTARTSYS
941 || ret == ERESTARTNOHAND || ret == ERESTARTNOINTR
942 || ret == ERESTART_RESTARTBLOCK)) {
943
944 if (signr > 0
945 && (ret == ERESTARTNOHAND || ret == ERESTART_RESTARTBLOCK
946 || (ret == ERESTARTSYS
947 && !(ka.sa.sa_flags & SA_RESTART)))) {
948 /* make the system call return an EINTR error */
949 regs->result = -EINTR;
950 regs->gpr[3] = EINTR;
951 /* note that the cr0.SO bit is already set */
952 } else {
953 regs->nip -= 4; /* Back up & retry system call */
954 regs->result = 0;
955 regs->trap = 0;
956 if (ret == ERESTART_RESTARTBLOCK)
957 regs->gpr[0] = __NR_restart_syscall;
958 else
959 regs->gpr[3] = regs->orig_gpr3;
960 }
961 }
962
963 if (signr == 0)
964 return 0; /* no signals delivered */
965
966 if ((ka.sa.sa_flags & SA_ONSTACK) && current->sas_ss_size
967 && (!on_sig_stack(regs->gpr[1])))
968 newsp = (current->sas_ss_sp + current->sas_ss_size);
969 else
970 newsp = regs->gpr[1];
971 newsp &= ~0xfUL;
972
973 /*
974 * Reenable the DABR before delivering the signal to
975 * user space. The DABR will have been cleared if it
976 * triggered inside the kernel.
977 */
978 if (current->thread.dabr)
979 set_dabr(current->thread.dabr);
980
981 /* Whee! Actually deliver the signal. */
982 if (ka.sa.sa_flags & SA_SIGINFO)
983 ret = handle_rt_signal32(signr, &ka, &info, oldset, regs, newsp);
984 else
985 ret = handle_signal32(signr, &ka, &info, oldset, regs, newsp);
986
987 if (ret) {
988 spin_lock_irq(&current->sighand->siglock);
989 sigorsets(&current->blocked, &current->blocked,
990 &ka.sa.sa_mask);
991 if (!(ka.sa.sa_flags & SA_NODEFER))
992 sigaddset(&current->blocked, signr);
993 recalc_sigpending();
994 spin_unlock_irq(&current->sighand->siglock);
995 }
996
997 return ret;
998}
diff --git a/arch/ppc64/kernel/smp.c b/arch/ppc64/kernel/smp.c
index 793b562da653..017c12919832 100644
--- a/arch/ppc64/kernel/smp.c
+++ b/arch/ppc64/kernel/smp.c
@@ -45,8 +45,7 @@
45#include <asm/cputable.h> 45#include <asm/cputable.h>
46#include <asm/system.h> 46#include <asm/system.h>
47#include <asm/abs_addr.h> 47#include <asm/abs_addr.h>
48 48#include <asm/mpic.h>
49#include "mpic.h"
50 49
51#ifdef DEBUG 50#ifdef DEBUG
52#define DBG(fmt...) udbg_printf(fmt) 51#define DBG(fmt...) udbg_printf(fmt)
@@ -70,28 +69,6 @@ void smp_call_function_interrupt(void);
70int smt_enabled_at_boot = 1; 69int smt_enabled_at_boot = 1;
71 70
72#ifdef CONFIG_MPIC 71#ifdef CONFIG_MPIC
73void smp_mpic_message_pass(int target, int msg)
74{
75 /* make sure we're sending something that translates to an IPI */
76 if ( msg > 0x3 ){
77 printk("SMP %d: smp_message_pass: unknown msg %d\n",
78 smp_processor_id(), msg);
79 return;
80 }
81 switch ( target )
82 {
83 case MSG_ALL:
84 mpic_send_ipi(msg, 0xffffffff);
85 break;
86 case MSG_ALL_BUT_SELF:
87 mpic_send_ipi(msg, 0xffffffff & ~(1 << smp_processor_id()));
88 break;
89 default:
90 mpic_send_ipi(msg, 1 << target);
91 break;
92 }
93}
94
95int __init smp_mpic_probe(void) 72int __init smp_mpic_probe(void)
96{ 73{
97 int nr_cpus; 74 int nr_cpus;
@@ -128,21 +105,6 @@ void __devinit smp_generic_kick_cpu(int nr)
128 105
129#endif /* CONFIG_MPIC */ 106#endif /* CONFIG_MPIC */
130 107
131static void __init smp_space_timers(unsigned int max_cpus)
132{
133 int i;
134 unsigned long offset = tb_ticks_per_jiffy / max_cpus;
135 unsigned long previous_tb = paca[boot_cpuid].next_jiffy_update_tb;
136
137 for_each_cpu(i) {
138 if (i != boot_cpuid) {
139 paca[i].next_jiffy_update_tb =
140 previous_tb + offset;
141 previous_tb = paca[i].next_jiffy_update_tb;
142 }
143 }
144}
145
146void smp_message_recv(int msg, struct pt_regs *regs) 108void smp_message_recv(int msg, struct pt_regs *regs)
147{ 109{
148 switch(msg) { 110 switch(msg) {
diff --git a/arch/ppc64/kernel/sys_ppc32.c b/arch/ppc64/kernel/sys_ppc32.c
deleted file mode 100644
index e93c13458910..000000000000
--- a/arch/ppc64/kernel/sys_ppc32.c
+++ /dev/null
@@ -1,1222 +0,0 @@
1/*
2 * sys_ppc32.c: Conversion between 32bit and 64bit native syscalls.
3 *
4 * Copyright (C) 2001 IBM
5 * Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
6 * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
7 *
8 * These routines maintain argument size conversion between 32bit and 64bit
9 * environment.
10 *
11 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License
13 * as published by the Free Software Foundation; either version
14 * 2 of the License, or (at your option) any later version.
15 */
16
17#include <linux/config.h>
18#include <linux/kernel.h>
19#include <linux/sched.h>
20#include <linux/fs.h>
21#include <linux/mm.h>
22#include <linux/file.h>
23#include <linux/signal.h>
24#include <linux/resource.h>
25#include <linux/times.h>
26#include <linux/utsname.h>
27#include <linux/timex.h>
28#include <linux/smp.h>
29#include <linux/smp_lock.h>
30#include <linux/sem.h>
31#include <linux/msg.h>
32#include <linux/shm.h>
33#include <linux/poll.h>
34#include <linux/personality.h>
35#include <linux/stat.h>
36#include <linux/mman.h>
37#include <linux/in.h>
38#include <linux/syscalls.h>
39#include <linux/unistd.h>
40#include <linux/sysctl.h>
41#include <linux/binfmts.h>
42#include <linux/security.h>
43#include <linux/compat.h>
44#include <linux/ptrace.h>
45#include <linux/elf.h>
46
47#include <asm/ptrace.h>
48#include <asm/types.h>
49#include <asm/ipc.h>
50#include <asm/uaccess.h>
51#include <asm/unistd.h>
52#include <asm/semaphore.h>
53#include <asm/time.h>
54#include <asm/mmu_context.h>
55#include <asm/systemcfg.h>
56
57#include "pci.h"
58
59/* readdir & getdents */
60#define NAME_OFFSET(de) ((int) ((de)->d_name - (char __user *) (de)))
61#define ROUND_UP(x) (((x)+sizeof(u32)-1) & ~(sizeof(u32)-1))
62
63struct old_linux_dirent32 {
64 u32 d_ino;
65 u32 d_offset;
66 unsigned short d_namlen;
67 char d_name[1];
68};
69
70struct readdir_callback32 {
71 struct old_linux_dirent32 __user * dirent;
72 int count;
73};
74
75static int fillonedir(void * __buf, const char * name, int namlen,
76 off_t offset, ino_t ino, unsigned int d_type)
77{
78 struct readdir_callback32 * buf = (struct readdir_callback32 *) __buf;
79 struct old_linux_dirent32 __user * dirent;
80
81 if (buf->count)
82 return -EINVAL;
83 buf->count++;
84 dirent = buf->dirent;
85 put_user(ino, &dirent->d_ino);
86 put_user(offset, &dirent->d_offset);
87 put_user(namlen, &dirent->d_namlen);
88 copy_to_user(dirent->d_name, name, namlen);
89 put_user(0, dirent->d_name + namlen);
90 return 0;
91}
92
93asmlinkage int old32_readdir(unsigned int fd, struct old_linux_dirent32 __user *dirent, unsigned int count)
94{
95 int error = -EBADF;
96 struct file * file;
97 struct readdir_callback32 buf;
98
99 file = fget(fd);
100 if (!file)
101 goto out;
102
103 buf.count = 0;
104 buf.dirent = dirent;
105
106 error = vfs_readdir(file, (filldir_t)fillonedir, &buf);
107 if (error < 0)
108 goto out_putf;
109 error = buf.count;
110
111out_putf:
112 fput(file);
113out:
114 return error;
115}
116
117struct linux_dirent32 {
118 u32 d_ino;
119 u32 d_off;
120 unsigned short d_reclen;
121 char d_name[1];
122};
123
124struct getdents_callback32 {
125 struct linux_dirent32 __user * current_dir;
126 struct linux_dirent32 __user * previous;
127 int count;
128 int error;
129};
130
131static int filldir(void * __buf, const char * name, int namlen, off_t offset,
132 ino_t ino, unsigned int d_type)
133{
134 struct linux_dirent32 __user * dirent;
135 struct getdents_callback32 * buf = (struct getdents_callback32 *) __buf;
136 int reclen = ROUND_UP(NAME_OFFSET(dirent) + namlen + 2);
137
138 buf->error = -EINVAL; /* only used if we fail.. */
139 if (reclen > buf->count)
140 return -EINVAL;
141 dirent = buf->previous;
142 if (dirent) {
143 if (__put_user(offset, &dirent->d_off))
144 goto efault;
145 }
146 dirent = buf->current_dir;
147 if (__put_user(ino, &dirent->d_ino))
148 goto efault;
149 if (__put_user(reclen, &dirent->d_reclen))
150 goto efault;
151 if (copy_to_user(dirent->d_name, name, namlen))
152 goto efault;
153 if (__put_user(0, dirent->d_name + namlen))
154 goto efault;
155 if (__put_user(d_type, (char __user *) dirent + reclen - 1))
156 goto efault;
157 buf->previous = dirent;
158 dirent = (void __user *)dirent + reclen;
159 buf->current_dir = dirent;
160 buf->count -= reclen;
161 return 0;
162efault:
163 buf->error = -EFAULT;
164 return -EFAULT;
165}
166
167asmlinkage long sys32_getdents(unsigned int fd, struct linux_dirent32 __user *dirent,
168 unsigned int count)
169{
170 struct file * file;
171 struct linux_dirent32 __user * lastdirent;
172 struct getdents_callback32 buf;
173 int error;
174
175 error = -EFAULT;
176 if (!access_ok(VERIFY_WRITE, dirent, count))
177 goto out;
178
179 error = -EBADF;
180 file = fget(fd);
181 if (!file)
182 goto out;
183
184 buf.current_dir = dirent;
185 buf.previous = NULL;
186 buf.count = count;
187 buf.error = 0;
188
189 error = vfs_readdir(file, (filldir_t)filldir, &buf);
190 if (error < 0)
191 goto out_putf;
192 error = buf.error;
193 lastdirent = buf.previous;
194 if (lastdirent) {
195 if (put_user(file->f_pos, &lastdirent->d_off))
196 error = -EFAULT;
197 else
198 error = count - buf.count;
199 }
200
201out_putf:
202 fput(file);
203out:
204 return error;
205}
206
207asmlinkage long ppc32_select(u32 n, compat_ulong_t __user *inp,
208 compat_ulong_t __user *outp, compat_ulong_t __user *exp,
209 compat_uptr_t tvp_x)
210{
211 /* sign extend n */
212 return compat_sys_select((int)n, inp, outp, exp, compat_ptr(tvp_x));
213}
214
215int cp_compat_stat(struct kstat *stat, struct compat_stat __user *statbuf)
216{
217 long err;
218
219 if (stat->size > MAX_NON_LFS || !new_valid_dev(stat->dev) ||
220 !new_valid_dev(stat->rdev))
221 return -EOVERFLOW;
222
223 err = access_ok(VERIFY_WRITE, statbuf, sizeof(*statbuf)) ? 0 : -EFAULT;
224 err |= __put_user(new_encode_dev(stat->dev), &statbuf->st_dev);
225 err |= __put_user(stat->ino, &statbuf->st_ino);
226 err |= __put_user(stat->mode, &statbuf->st_mode);
227 err |= __put_user(stat->nlink, &statbuf->st_nlink);
228 err |= __put_user(stat->uid, &statbuf->st_uid);
229 err |= __put_user(stat->gid, &statbuf->st_gid);
230 err |= __put_user(new_encode_dev(stat->rdev), &statbuf->st_rdev);
231 err |= __put_user(stat->size, &statbuf->st_size);
232 err |= __put_user(stat->atime.tv_sec, &statbuf->st_atime);
233 err |= __put_user(stat->atime.tv_nsec, &statbuf->st_atime_nsec);
234 err |= __put_user(stat->mtime.tv_sec, &statbuf->st_mtime);
235 err |= __put_user(stat->mtime.tv_nsec, &statbuf->st_mtime_nsec);
236 err |= __put_user(stat->ctime.tv_sec, &statbuf->st_ctime);
237 err |= __put_user(stat->ctime.tv_nsec, &statbuf->st_ctime_nsec);
238 err |= __put_user(stat->blksize, &statbuf->st_blksize);
239 err |= __put_user(stat->blocks, &statbuf->st_blocks);
240 err |= __put_user(0, &statbuf->__unused4[0]);
241 err |= __put_user(0, &statbuf->__unused4[1]);
242
243 return err;
244}
245
246/* Note: it is necessary to treat option as an unsigned int,
247 * with the corresponding cast to a signed int to insure that the
248 * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
249 * and the register representation of a signed int (msr in 64-bit mode) is performed.
250 */
251asmlinkage long sys32_sysfs(u32 option, u32 arg1, u32 arg2)
252{
253 return sys_sysfs((int)option, arg1, arg2);
254}
255
256/* Handle adjtimex compatibility. */
257struct timex32 {
258 u32 modes;
259 s32 offset, freq, maxerror, esterror;
260 s32 status, constant, precision, tolerance;
261 struct compat_timeval time;
262 s32 tick;
263 s32 ppsfreq, jitter, shift, stabil;
264 s32 jitcnt, calcnt, errcnt, stbcnt;
265 s32 :32; s32 :32; s32 :32; s32 :32;
266 s32 :32; s32 :32; s32 :32; s32 :32;
267 s32 :32; s32 :32; s32 :32; s32 :32;
268};
269
270extern int do_adjtimex(struct timex *);
271extern void ppc_adjtimex(void);
272
273asmlinkage long sys32_adjtimex(struct timex32 __user *utp)
274{
275 struct timex txc;
276 int ret;
277
278 memset(&txc, 0, sizeof(struct timex));
279
280 if(get_user(txc.modes, &utp->modes) ||
281 __get_user(txc.offset, &utp->offset) ||
282 __get_user(txc.freq, &utp->freq) ||
283 __get_user(txc.maxerror, &utp->maxerror) ||
284 __get_user(txc.esterror, &utp->esterror) ||
285 __get_user(txc.status, &utp->status) ||
286 __get_user(txc.constant, &utp->constant) ||
287 __get_user(txc.precision, &utp->precision) ||
288 __get_user(txc.tolerance, &utp->tolerance) ||
289 __get_user(txc.time.tv_sec, &utp->time.tv_sec) ||
290 __get_user(txc.time.tv_usec, &utp->time.tv_usec) ||
291 __get_user(txc.tick, &utp->tick) ||
292 __get_user(txc.ppsfreq, &utp->ppsfreq) ||
293 __get_user(txc.jitter, &utp->jitter) ||
294 __get_user(txc.shift, &utp->shift) ||
295 __get_user(txc.stabil, &utp->stabil) ||
296 __get_user(txc.jitcnt, &utp->jitcnt) ||
297 __get_user(txc.calcnt, &utp->calcnt) ||
298 __get_user(txc.errcnt, &utp->errcnt) ||
299 __get_user(txc.stbcnt, &utp->stbcnt))
300 return -EFAULT;
301
302 ret = do_adjtimex(&txc);
303
304 /* adjust the conversion of TB to time of day to track adjtimex */
305 ppc_adjtimex();
306
307 if(put_user(txc.modes, &utp->modes) ||
308 __put_user(txc.offset, &utp->offset) ||
309 __put_user(txc.freq, &utp->freq) ||
310 __put_user(txc.maxerror, &utp->maxerror) ||
311 __put_user(txc.esterror, &utp->esterror) ||
312 __put_user(txc.status, &utp->status) ||
313 __put_user(txc.constant, &utp->constant) ||
314 __put_user(txc.precision, &utp->precision) ||
315 __put_user(txc.tolerance, &utp->tolerance) ||
316 __put_user(txc.time.tv_sec, &utp->time.tv_sec) ||
317 __put_user(txc.time.tv_usec, &utp->time.tv_usec) ||
318 __put_user(txc.tick, &utp->tick) ||
319 __put_user(txc.ppsfreq, &utp->ppsfreq) ||
320 __put_user(txc.jitter, &utp->jitter) ||
321 __put_user(txc.shift, &utp->shift) ||
322 __put_user(txc.stabil, &utp->stabil) ||
323 __put_user(txc.jitcnt, &utp->jitcnt) ||
324 __put_user(txc.calcnt, &utp->calcnt) ||
325 __put_user(txc.errcnt, &utp->errcnt) ||
326 __put_user(txc.stbcnt, &utp->stbcnt))
327 ret = -EFAULT;
328
329 return ret;
330}
331
332asmlinkage long sys32_pause(void)
333{
334 current->state = TASK_INTERRUPTIBLE;
335 schedule();
336
337 return -ERESTARTNOHAND;
338}
339
340static inline long get_ts32(struct timespec *o, struct compat_timeval __user *i)
341{
342 long usec;
343
344 if (!access_ok(VERIFY_READ, i, sizeof(*i)))
345 return -EFAULT;
346 if (__get_user(o->tv_sec, &i->tv_sec))
347 return -EFAULT;
348 if (__get_user(usec, &i->tv_usec))
349 return -EFAULT;
350 o->tv_nsec = usec * 1000;
351 return 0;
352}
353
354static inline long put_tv32(struct compat_timeval __user *o, struct timeval *i)
355{
356 return (!access_ok(VERIFY_WRITE, o, sizeof(*o)) ||
357 (__put_user(i->tv_sec, &o->tv_sec) |
358 __put_user(i->tv_usec, &o->tv_usec)));
359}
360
361struct sysinfo32 {
362 s32 uptime;
363 u32 loads[3];
364 u32 totalram;
365 u32 freeram;
366 u32 sharedram;
367 u32 bufferram;
368 u32 totalswap;
369 u32 freeswap;
370 unsigned short procs;
371 unsigned short pad;
372 u32 totalhigh;
373 u32 freehigh;
374 u32 mem_unit;
375 char _f[20-2*sizeof(int)-sizeof(int)];
376};
377
378asmlinkage long sys32_sysinfo(struct sysinfo32 __user *info)
379{
380 struct sysinfo s;
381 int ret, err;
382 int bitcount=0;
383 mm_segment_t old_fs = get_fs ();
384
385 /* The __user cast is valid due to set_fs() */
386 set_fs (KERNEL_DS);
387 ret = sys_sysinfo((struct sysinfo __user *)&s);
388 set_fs (old_fs);
389
390 /* Check to see if any memory value is too large for 32-bit and
391 * scale down if needed.
392 */
393 if ((s.totalram >> 32) || (s.totalswap >> 32)) {
394 while (s.mem_unit < PAGE_SIZE) {
395 s.mem_unit <<= 1;
396 bitcount++;
397 }
398 s.totalram >>=bitcount;
399 s.freeram >>= bitcount;
400 s.sharedram >>= bitcount;
401 s.bufferram >>= bitcount;
402 s.totalswap >>= bitcount;
403 s.freeswap >>= bitcount;
404 s.totalhigh >>= bitcount;
405 s.freehigh >>= bitcount;
406 }
407
408 err = put_user (s.uptime, &info->uptime);
409 err |= __put_user (s.loads[0], &info->loads[0]);
410 err |= __put_user (s.loads[1], &info->loads[1]);
411 err |= __put_user (s.loads[2], &info->loads[2]);
412 err |= __put_user (s.totalram, &info->totalram);
413 err |= __put_user (s.freeram, &info->freeram);
414 err |= __put_user (s.sharedram, &info->sharedram);
415 err |= __put_user (s.bufferram, &info->bufferram);
416 err |= __put_user (s.totalswap, &info->totalswap);
417 err |= __put_user (s.freeswap, &info->freeswap);
418 err |= __put_user (s.procs, &info->procs);
419 err |= __put_user (s.totalhigh, &info->totalhigh);
420 err |= __put_user (s.freehigh, &info->freehigh);
421 err |= __put_user (s.mem_unit, &info->mem_unit);
422 if (err)
423 return -EFAULT;
424
425 return ret;
426}
427
428
429
430
431/* Translations due to time_t size differences. Which affects all
432 sorts of things, like timeval and itimerval. */
433extern struct timezone sys_tz;
434
435asmlinkage long sys32_gettimeofday(struct compat_timeval __user *tv, struct timezone __user *tz)
436{
437 if (tv) {
438 struct timeval ktv;
439 do_gettimeofday(&ktv);
440 if (put_tv32(tv, &ktv))
441 return -EFAULT;
442 }
443 if (tz) {
444 if (copy_to_user(tz, &sys_tz, sizeof(sys_tz)))
445 return -EFAULT;
446 }
447
448 return 0;
449}
450
451
452
453asmlinkage long sys32_settimeofday(struct compat_timeval __user *tv, struct timezone __user *tz)
454{
455 struct timespec kts;
456 struct timezone ktz;
457
458 if (tv) {
459 if (get_ts32(&kts, tv))
460 return -EFAULT;
461 }
462 if (tz) {
463 if (copy_from_user(&ktz, tz, sizeof(ktz)))
464 return -EFAULT;
465 }
466
467 return do_sys_settimeofday(tv ? &kts : NULL, tz ? &ktz : NULL);
468}
469
470#ifdef CONFIG_SYSVIPC
471long sys32_ipc(u32 call, u32 first, u32 second, u32 third, compat_uptr_t ptr,
472 u32 fifth)
473{
474 int version;
475
476 version = call >> 16; /* hack for backward compatibility */
477 call &= 0xffff;
478
479 switch (call) {
480
481 case SEMTIMEDOP:
482 if (fifth)
483 /* sign extend semid */
484 return compat_sys_semtimedop((int)first,
485 compat_ptr(ptr), second,
486 compat_ptr(fifth));
487 /* else fall through for normal semop() */
488 case SEMOP:
489 /* struct sembuf is the same on 32 and 64bit :)) */
490 /* sign extend semid */
491 return sys_semtimedop((int)first, compat_ptr(ptr), second,
492 NULL);
493 case SEMGET:
494 /* sign extend key, nsems */
495 return sys_semget((int)first, (int)second, third);
496 case SEMCTL:
497 /* sign extend semid, semnum */
498 return compat_sys_semctl((int)first, (int)second, third,
499 compat_ptr(ptr));
500
501 case MSGSND:
502 /* sign extend msqid */
503 return compat_sys_msgsnd((int)first, (int)second, third,
504 compat_ptr(ptr));
505 case MSGRCV:
506 /* sign extend msqid, msgtyp */
507 return compat_sys_msgrcv((int)first, second, (int)fifth,
508 third, version, compat_ptr(ptr));
509 case MSGGET:
510 /* sign extend key */
511 return sys_msgget((int)first, second);
512 case MSGCTL:
513 /* sign extend msqid */
514 return compat_sys_msgctl((int)first, second, compat_ptr(ptr));
515
516 case SHMAT:
517 /* sign extend shmid */
518 return compat_sys_shmat((int)first, second, third, version,
519 compat_ptr(ptr));
520 case SHMDT:
521 return sys_shmdt(compat_ptr(ptr));
522 case SHMGET:
523 /* sign extend key_t */
524 return sys_shmget((int)first, second, third);
525 case SHMCTL:
526 /* sign extend shmid */
527 return compat_sys_shmctl((int)first, second, compat_ptr(ptr));
528
529 default:
530 return -ENOSYS;
531 }
532
533 return -ENOSYS;
534}
535#endif
536
537/* Note: it is necessary to treat out_fd and in_fd as unsigned ints,
538 * with the corresponding cast to a signed int to insure that the
539 * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
540 * and the register representation of a signed int (msr in 64-bit mode) is performed.
541 */
542asmlinkage long sys32_sendfile(u32 out_fd, u32 in_fd, compat_off_t __user * offset, u32 count)
543{
544 mm_segment_t old_fs = get_fs();
545 int ret;
546 off_t of;
547 off_t __user *up;
548
549 if (offset && get_user(of, offset))
550 return -EFAULT;
551
552 /* The __user pointer cast is valid because of the set_fs() */
553 set_fs(KERNEL_DS);
554 up = offset ? (off_t __user *) &of : NULL;
555 ret = sys_sendfile((int)out_fd, (int)in_fd, up, count);
556 set_fs(old_fs);
557
558 if (offset && put_user(of, offset))
559 return -EFAULT;
560
561 return ret;
562}
563
564asmlinkage int sys32_sendfile64(int out_fd, int in_fd, compat_loff_t __user *offset, s32 count)
565{
566 mm_segment_t old_fs = get_fs();
567 int ret;
568 loff_t lof;
569 loff_t __user *up;
570
571 if (offset && get_user(lof, offset))
572 return -EFAULT;
573
574 /* The __user pointer cast is valid because of the set_fs() */
575 set_fs(KERNEL_DS);
576 up = offset ? (loff_t __user *) &lof : NULL;
577 ret = sys_sendfile64(out_fd, in_fd, up, count);
578 set_fs(old_fs);
579
580 if (offset && put_user(lof, offset))
581 return -EFAULT;
582
583 return ret;
584}
585
586long sys32_execve(unsigned long a0, unsigned long a1, unsigned long a2,
587 unsigned long a3, unsigned long a4, unsigned long a5,
588 struct pt_regs *regs)
589{
590 int error;
591 char * filename;
592
593 filename = getname((char __user *) a0);
594 error = PTR_ERR(filename);
595 if (IS_ERR(filename))
596 goto out;
597 flush_fp_to_thread(current);
598 flush_altivec_to_thread(current);
599
600 error = compat_do_execve(filename, compat_ptr(a1), compat_ptr(a2), regs);
601
602 if (error == 0) {
603 task_lock(current);
604 current->ptrace &= ~PT_DTRACE;
605 task_unlock(current);
606 }
607 putname(filename);
608
609out:
610 return error;
611}
612
613/* Set up a thread for executing a new program. */
614void start_thread32(struct pt_regs* regs, unsigned long nip, unsigned long sp)
615{
616 set_fs(USER_DS);
617
618 /*
619 * If we exec out of a kernel thread then thread.regs will not be
620 * set. Do it now.
621 */
622 if (!current->thread.regs) {
623 unsigned long childregs = (unsigned long)current->thread_info +
624 THREAD_SIZE;
625 childregs -= sizeof(struct pt_regs);
626 current->thread.regs = (struct pt_regs *)childregs;
627 }
628
629 /*
630 * ELF_PLAT_INIT already clears all registers but it also sets r2.
631 * So just clear r2 here.
632 */
633 regs->gpr[2] = 0;
634
635 regs->nip = nip;
636 regs->gpr[1] = sp;
637 regs->msr = MSR_USER32;
638#ifndef CONFIG_SMP
639 if (last_task_used_math == current)
640 last_task_used_math = 0;
641#endif /* CONFIG_SMP */
642 current->thread.fpscr = 0;
643 memset(current->thread.fpr, 0, sizeof(current->thread.fpr));
644#ifdef CONFIG_ALTIVEC
645#ifndef CONFIG_SMP
646 if (last_task_used_altivec == current)
647 last_task_used_altivec = 0;
648#endif /* CONFIG_SMP */
649 memset(current->thread.vr, 0, sizeof(current->thread.vr));
650 current->thread.vscr.u[0] = 0;
651 current->thread.vscr.u[1] = 0;
652 current->thread.vscr.u[2] = 0;
653 current->thread.vscr.u[3] = 0x00010000; /* Java mode disabled */
654 current->thread.vrsave = 0;
655 current->thread.used_vr = 0;
656#endif /* CONFIG_ALTIVEC */
657}
658
659/* Note: it is necessary to treat option as an unsigned int,
660 * with the corresponding cast to a signed int to insure that the
661 * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
662 * and the register representation of a signed int (msr in 64-bit mode) is performed.
663 */
664asmlinkage long sys32_prctl(u32 option, u32 arg2, u32 arg3, u32 arg4, u32 arg5)
665{
666 return sys_prctl((int)option,
667 (unsigned long) arg2,
668 (unsigned long) arg3,
669 (unsigned long) arg4,
670 (unsigned long) arg5);
671}
672
673/* Note: it is necessary to treat pid as an unsigned int,
674 * with the corresponding cast to a signed int to insure that the
675 * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
676 * and the register representation of a signed int (msr in 64-bit mode) is performed.
677 */
678asmlinkage long sys32_sched_rr_get_interval(u32 pid, struct compat_timespec __user *interval)
679{
680 struct timespec t;
681 int ret;
682 mm_segment_t old_fs = get_fs ();
683
684 /* The __user pointer cast is valid because of the set_fs() */
685 set_fs (KERNEL_DS);
686 ret = sys_sched_rr_get_interval((int)pid, (struct timespec __user *) &t);
687 set_fs (old_fs);
688 if (put_compat_timespec(&t, interval))
689 return -EFAULT;
690 return ret;
691}
692
693asmlinkage int sys32_pciconfig_read(u32 bus, u32 dfn, u32 off, u32 len, u32 ubuf)
694{
695 return sys_pciconfig_read((unsigned long) bus,
696 (unsigned long) dfn,
697 (unsigned long) off,
698 (unsigned long) len,
699 compat_ptr(ubuf));
700}
701
702asmlinkage int sys32_pciconfig_write(u32 bus, u32 dfn, u32 off, u32 len, u32 ubuf)
703{
704 return sys_pciconfig_write((unsigned long) bus,
705 (unsigned long) dfn,
706 (unsigned long) off,
707 (unsigned long) len,
708 compat_ptr(ubuf));
709}
710
711asmlinkage int sys32_pciconfig_iobase(u32 which, u32 in_bus, u32 in_devfn)
712{
713 return sys_pciconfig_iobase(which, in_bus, in_devfn);
714}
715
716
717/* Note: it is necessary to treat mode as an unsigned int,
718 * with the corresponding cast to a signed int to insure that the
719 * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
720 * and the register representation of a signed int (msr in 64-bit mode) is performed.
721 */
722asmlinkage long sys32_access(const char __user * filename, u32 mode)
723{
724 return sys_access(filename, (int)mode);
725}
726
727
728/* Note: it is necessary to treat mode as an unsigned int,
729 * with the corresponding cast to a signed int to insure that the
730 * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
731 * and the register representation of a signed int (msr in 64-bit mode) is performed.
732 */
733asmlinkage long sys32_creat(const char __user * pathname, u32 mode)
734{
735 return sys_creat(pathname, (int)mode);
736}
737
738
739/* Note: it is necessary to treat pid and options as unsigned ints,
740 * with the corresponding cast to a signed int to insure that the
741 * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
742 * and the register representation of a signed int (msr in 64-bit mode) is performed.
743 */
744asmlinkage long sys32_waitpid(u32 pid, unsigned int __user * stat_addr, u32 options)
745{
746 return sys_waitpid((int)pid, stat_addr, (int)options);
747}
748
749
750/* Note: it is necessary to treat gidsetsize as an unsigned int,
751 * with the corresponding cast to a signed int to insure that the
752 * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
753 * and the register representation of a signed int (msr in 64-bit mode) is performed.
754 */
755asmlinkage long sys32_getgroups(u32 gidsetsize, gid_t __user *grouplist)
756{
757 return sys_getgroups((int)gidsetsize, grouplist);
758}
759
760
761/* Note: it is necessary to treat pid as an unsigned int,
762 * with the corresponding cast to a signed int to insure that the
763 * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
764 * and the register representation of a signed int (msr in 64-bit mode) is performed.
765 */
766asmlinkage long sys32_getpgid(u32 pid)
767{
768 return sys_getpgid((int)pid);
769}
770
771
772
773/* Note: it is necessary to treat pid as an unsigned int,
774 * with the corresponding cast to a signed int to insure that the
775 * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
776 * and the register representation of a signed int (msr in 64-bit mode) is performed.
777 */
778asmlinkage long sys32_getsid(u32 pid)
779{
780 return sys_getsid((int)pid);
781}
782
783
784/* Note: it is necessary to treat pid and sig as unsigned ints,
785 * with the corresponding cast to a signed int to insure that the
786 * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
787 * and the register representation of a signed int (msr in 64-bit mode) is performed.
788 */
789asmlinkage long sys32_kill(u32 pid, u32 sig)
790{
791 return sys_kill((int)pid, (int)sig);
792}
793
794
795/* Note: it is necessary to treat mode as an unsigned int,
796 * with the corresponding cast to a signed int to insure that the
797 * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
798 * and the register representation of a signed int (msr in 64-bit mode) is performed.
799 */
800asmlinkage long sys32_mkdir(const char __user * pathname, u32 mode)
801{
802 return sys_mkdir(pathname, (int)mode);
803}
804
805long sys32_nice(u32 increment)
806{
807 /* sign extend increment */
808 return sys_nice((int)increment);
809}
810
811off_t ppc32_lseek(unsigned int fd, u32 offset, unsigned int origin)
812{
813 /* sign extend n */
814 return sys_lseek(fd, (int)offset, origin);
815}
816
817/* Note: it is necessary to treat bufsiz as an unsigned int,
818 * with the corresponding cast to a signed int to insure that the
819 * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
820 * and the register representation of a signed int (msr in 64-bit mode) is performed.
821 */
822asmlinkage long sys32_readlink(const char __user * path, char __user * buf, u32 bufsiz)
823{
824 return sys_readlink(path, buf, (int)bufsiz);
825}
826
827/* Note: it is necessary to treat option as an unsigned int,
828 * with the corresponding cast to a signed int to insure that the
829 * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
830 * and the register representation of a signed int (msr in 64-bit mode) is performed.
831 */
832asmlinkage long sys32_sched_get_priority_max(u32 policy)
833{
834 return sys_sched_get_priority_max((int)policy);
835}
836
837
838/* Note: it is necessary to treat policy as an unsigned int,
839 * with the corresponding cast to a signed int to insure that the
840 * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
841 * and the register representation of a signed int (msr in 64-bit mode) is performed.
842 */
843asmlinkage long sys32_sched_get_priority_min(u32 policy)
844{
845 return sys_sched_get_priority_min((int)policy);
846}
847
848
849/* Note: it is necessary to treat pid as an unsigned int,
850 * with the corresponding cast to a signed int to insure that the
851 * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
852 * and the register representation of a signed int (msr in 64-bit mode) is performed.
853 */
854asmlinkage long sys32_sched_getparam(u32 pid, struct sched_param __user *param)
855{
856 return sys_sched_getparam((int)pid, param);
857}
858
859
860/* Note: it is necessary to treat pid as an unsigned int,
861 * with the corresponding cast to a signed int to insure that the
862 * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
863 * and the register representation of a signed int (msr in 64-bit mode) is performed.
864 */
865asmlinkage long sys32_sched_getscheduler(u32 pid)
866{
867 return sys_sched_getscheduler((int)pid);
868}
869
870
871/* Note: it is necessary to treat pid as an unsigned int,
872 * with the corresponding cast to a signed int to insure that the
873 * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
874 * and the register representation of a signed int (msr in 64-bit mode) is performed.
875 */
876asmlinkage long sys32_sched_setparam(u32 pid, struct sched_param __user *param)
877{
878 return sys_sched_setparam((int)pid, param);
879}
880
881
882/* Note: it is necessary to treat pid and policy as unsigned ints,
883 * with the corresponding cast to a signed int to insure that the
884 * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
885 * and the register representation of a signed int (msr in 64-bit mode) is performed.
886 */
887asmlinkage long sys32_sched_setscheduler(u32 pid, u32 policy, struct sched_param __user *param)
888{
889 return sys_sched_setscheduler((int)pid, (int)policy, param);
890}
891
892
893/* Note: it is necessary to treat len as an unsigned int,
894 * with the corresponding cast to a signed int to insure that the
895 * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
896 * and the register representation of a signed int (msr in 64-bit mode) is performed.
897 */
898asmlinkage long sys32_setdomainname(char __user *name, u32 len)
899{
900 return sys_setdomainname(name, (int)len);
901}
902
903
904/* Note: it is necessary to treat gidsetsize as an unsigned int,
905 * with the corresponding cast to a signed int to insure that the
906 * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
907 * and the register representation of a signed int (msr in 64-bit mode) is performed.
908 */
909asmlinkage long sys32_setgroups(u32 gidsetsize, gid_t __user *grouplist)
910{
911 return sys_setgroups((int)gidsetsize, grouplist);
912}
913
914
915asmlinkage long sys32_sethostname(char __user *name, u32 len)
916{
917 /* sign extend len */
918 return sys_sethostname(name, (int)len);
919}
920
921
922/* Note: it is necessary to treat pid and pgid as unsigned ints,
923 * with the corresponding cast to a signed int to insure that the
924 * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
925 * and the register representation of a signed int (msr in 64-bit mode) is performed.
926 */
927asmlinkage long sys32_setpgid(u32 pid, u32 pgid)
928{
929 return sys_setpgid((int)pid, (int)pgid);
930}
931
932long sys32_getpriority(u32 which, u32 who)
933{
934 /* sign extend which and who */
935 return sys_getpriority((int)which, (int)who);
936}
937
938long sys32_setpriority(u32 which, u32 who, u32 niceval)
939{
940 /* sign extend which, who and niceval */
941 return sys_setpriority((int)which, (int)who, (int)niceval);
942}
943
944long sys32_ioprio_get(u32 which, u32 who)
945{
946 /* sign extend which and who */
947 return sys_ioprio_get((int)which, (int)who);
948}
949
950long sys32_ioprio_set(u32 which, u32 who, u32 ioprio)
951{
952 /* sign extend which, who and ioprio */
953 return sys_ioprio_set((int)which, (int)who, (int)ioprio);
954}
955
956/* Note: it is necessary to treat newmask as an unsigned int,
957 * with the corresponding cast to a signed int to insure that the
958 * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
959 * and the register representation of a signed int (msr in 64-bit mode) is performed.
960 */
961asmlinkage long sys32_ssetmask(u32 newmask)
962{
963 return sys_ssetmask((int) newmask);
964}
965
966asmlinkage long sys32_syslog(u32 type, char __user * buf, u32 len)
967{
968 /* sign extend len */
969 return sys_syslog(type, buf, (int)len);
970}
971
972
973/* Note: it is necessary to treat mask as an unsigned int,
974 * with the corresponding cast to a signed int to insure that the
975 * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
976 * and the register representation of a signed int (msr in 64-bit mode) is performed.
977 */
978asmlinkage long sys32_umask(u32 mask)
979{
980 return sys_umask((int)mask);
981}
982
983#ifdef CONFIG_SYSCTL
984struct __sysctl_args32 {
985 u32 name;
986 int nlen;
987 u32 oldval;
988 u32 oldlenp;
989 u32 newval;
990 u32 newlen;
991 u32 __unused[4];
992};
993
994asmlinkage long sys32_sysctl(struct __sysctl_args32 __user *args)
995{
996 struct __sysctl_args32 tmp;
997 int error;
998 size_t oldlen;
999 size_t __user *oldlenp = NULL;
1000 unsigned long addr = (((unsigned long)&args->__unused[0]) + 7) & ~7;
1001
1002 if (copy_from_user(&tmp, args, sizeof(tmp)))
1003 return -EFAULT;
1004
1005 if (tmp.oldval && tmp.oldlenp) {
1006 /* Duh, this is ugly and might not work if sysctl_args
1007 is in read-only memory, but do_sysctl does indirectly
1008 a lot of uaccess in both directions and we'd have to
1009 basically copy the whole sysctl.c here, and
1010 glibc's __sysctl uses rw memory for the structure
1011 anyway. */
1012 oldlenp = (size_t __user *)addr;
1013 if (get_user(oldlen, (compat_size_t __user *)compat_ptr(tmp.oldlenp)) ||
1014 put_user(oldlen, oldlenp))
1015 return -EFAULT;
1016 }
1017
1018 lock_kernel();
1019 error = do_sysctl(compat_ptr(tmp.name), tmp.nlen,
1020 compat_ptr(tmp.oldval), oldlenp,
1021 compat_ptr(tmp.newval), tmp.newlen);
1022 unlock_kernel();
1023 if (oldlenp) {
1024 if (!error) {
1025 if (get_user(oldlen, oldlenp) ||
1026 put_user(oldlen, (compat_size_t __user *)compat_ptr(tmp.oldlenp)))
1027 error = -EFAULT;
1028 }
1029 copy_to_user(args->__unused, tmp.__unused, sizeof(tmp.__unused));
1030 }
1031 return error;
1032}
1033#endif
1034
1035asmlinkage int sys32_uname(struct old_utsname __user * name)
1036{
1037 int err = 0;
1038
1039 down_read(&uts_sem);
1040 if (copy_to_user(name, &system_utsname, sizeof(*name)))
1041 err = -EFAULT;
1042 up_read(&uts_sem);
1043 if (!err && personality(current->personality) == PER_LINUX32) {
1044 /* change "ppc64" to "ppc" */
1045 if (__put_user(0, name->machine + 3)
1046 || __put_user(0, name->machine + 4))
1047 err = -EFAULT;
1048 }
1049 return err;
1050}
1051
1052asmlinkage int sys32_olduname(struct oldold_utsname __user * name)
1053{
1054 int error;
1055
1056 if (!access_ok(VERIFY_WRITE,name,sizeof(struct oldold_utsname)))
1057 return -EFAULT;
1058
1059 down_read(&uts_sem);
1060 error = __copy_to_user(&name->sysname,&system_utsname.sysname,__OLD_UTS_LEN);
1061 error |= __put_user(0,name->sysname+__OLD_UTS_LEN);
1062 error |= __copy_to_user(&name->nodename,&system_utsname.nodename,__OLD_UTS_LEN);
1063 error |= __put_user(0,name->nodename+__OLD_UTS_LEN);
1064 error |= __copy_to_user(&name->release,&system_utsname.release,__OLD_UTS_LEN);
1065 error |= __put_user(0,name->release+__OLD_UTS_LEN);
1066 error |= __copy_to_user(&name->version,&system_utsname.version,__OLD_UTS_LEN);
1067 error |= __put_user(0,name->version+__OLD_UTS_LEN);
1068 error |= __copy_to_user(&name->machine,&system_utsname.machine,__OLD_UTS_LEN);
1069 error |= __put_user(0,name->machine+__OLD_UTS_LEN);
1070 if (personality(current->personality) == PER_LINUX32) {
1071 /* change "ppc64" to "ppc" */
1072 error |= __put_user(0, name->machine + 3);
1073 error |= __put_user(0, name->machine + 4);
1074 }
1075
1076 up_read(&uts_sem);
1077
1078 error = error ? -EFAULT : 0;
1079
1080 return error;
1081}
1082
1083unsigned long sys32_mmap2(unsigned long addr, size_t len,
1084 unsigned long prot, unsigned long flags,
1085 unsigned long fd, unsigned long pgoff)
1086{
1087 /* This should remain 12 even if PAGE_SIZE changes */
1088 return sys_mmap(addr, len, prot, flags, fd, pgoff << 12);
1089}
1090
1091int get_compat_timeval(struct timeval *tv, struct compat_timeval __user *ctv)
1092{
1093 return (!access_ok(VERIFY_READ, ctv, sizeof(*ctv)) ||
1094 __get_user(tv->tv_sec, &ctv->tv_sec) ||
1095 __get_user(tv->tv_usec, &ctv->tv_usec)) ? -EFAULT : 0;
1096}
1097
1098asmlinkage long sys32_utimes(char __user *filename, struct compat_timeval __user *tvs)
1099{
1100 struct timeval ktvs[2], *ptr;
1101
1102 ptr = NULL;
1103 if (tvs) {
1104 if (get_compat_timeval(&ktvs[0], &tvs[0]) ||
1105 get_compat_timeval(&ktvs[1], &tvs[1]))
1106 return -EFAULT;
1107 ptr = ktvs;
1108 }
1109
1110 return do_utimes(filename, ptr);
1111}
1112
1113long sys32_tgkill(u32 tgid, u32 pid, int sig)
1114{
1115 /* sign extend tgid, pid */
1116 return sys_tgkill((int)tgid, (int)pid, sig);
1117}
1118
1119/*
1120 * long long munging:
1121 * The 32 bit ABI passes long longs in an odd even register pair.
1122 */
1123
1124compat_ssize_t sys32_pread64(unsigned int fd, char __user *ubuf, compat_size_t count,
1125 u32 reg6, u32 poshi, u32 poslo)
1126{
1127 return sys_pread64(fd, ubuf, count, ((loff_t)poshi << 32) | poslo);
1128}
1129
1130compat_ssize_t sys32_pwrite64(unsigned int fd, char __user *ubuf, compat_size_t count,
1131 u32 reg6, u32 poshi, u32 poslo)
1132{
1133 return sys_pwrite64(fd, ubuf, count, ((loff_t)poshi << 32) | poslo);
1134}
1135
1136compat_ssize_t sys32_readahead(int fd, u32 r4, u32 offhi, u32 offlo, u32 count)
1137{
1138 return sys_readahead(fd, ((loff_t)offhi << 32) | offlo, count);
1139}
1140
1141asmlinkage int sys32_truncate64(const char __user * path, u32 reg4,
1142 unsigned long high, unsigned long low)
1143{
1144 return sys_truncate(path, (high << 32) | low);
1145}
1146
1147asmlinkage int sys32_ftruncate64(unsigned int fd, u32 reg4, unsigned long high,
1148 unsigned long low)
1149{
1150 return sys_ftruncate(fd, (high << 32) | low);
1151}
1152
1153long ppc32_lookup_dcookie(u32 cookie_high, u32 cookie_low, char __user *buf,
1154 size_t len)
1155{
1156 return sys_lookup_dcookie((u64)cookie_high << 32 | cookie_low,
1157 buf, len);
1158}
1159
1160long ppc32_fadvise64(int fd, u32 unused, u32 offset_high, u32 offset_low,
1161 size_t len, int advice)
1162{
1163 return sys_fadvise64(fd, (u64)offset_high << 32 | offset_low, len,
1164 advice);
1165}
1166
1167long ppc32_fadvise64_64(int fd, int advice, u32 offset_high, u32 offset_low,
1168 u32 len_high, u32 len_low)
1169{
1170 return sys_fadvise64(fd, (u64)offset_high << 32 | offset_low,
1171 (u64)len_high << 32 | len_low, advice);
1172}
1173
1174long ppc32_timer_create(clockid_t clock,
1175 struct compat_sigevent __user *ev32,
1176 timer_t __user *timer_id)
1177{
1178 sigevent_t event;
1179 timer_t t;
1180 long err;
1181 mm_segment_t savefs;
1182
1183 if (ev32 == NULL)
1184 return sys_timer_create(clock, NULL, timer_id);
1185
1186 if (get_compat_sigevent(&event, ev32))
1187 return -EFAULT;
1188
1189 if (!access_ok(VERIFY_WRITE, timer_id, sizeof(timer_t)))
1190 return -EFAULT;
1191
1192 savefs = get_fs();
1193 set_fs(KERNEL_DS);
1194 /* The __user pointer casts are valid due to the set_fs() */
1195 err = sys_timer_create(clock,
1196 (sigevent_t __user *) &event,
1197 (timer_t __user *) &t);
1198 set_fs(savefs);
1199
1200 if (err == 0)
1201 err = __put_user(t, timer_id);
1202
1203 return err;
1204}
1205
1206asmlinkage long sys32_add_key(const char __user *_type,
1207 const char __user *_description,
1208 const void __user *_payload,
1209 u32 plen,
1210 u32 ringid)
1211{
1212 return sys_add_key(_type, _description, _payload, plen, ringid);
1213}
1214
1215asmlinkage long sys32_request_key(const char __user *_type,
1216 const char __user *_description,
1217 const char __user *_callout_info,
1218 u32 destringid)
1219{
1220 return sys_request_key(_type, _description, _callout_info, destringid);
1221}
1222
diff --git a/arch/ppc64/kernel/syscalls.c b/arch/ppc64/kernel/syscalls.c
deleted file mode 100644
index 05f16633bd2c..000000000000
--- a/arch/ppc64/kernel/syscalls.c
+++ /dev/null
@@ -1,263 +0,0 @@
1/*
2 * linux/arch/ppc64/kernel/sys_ppc.c
3 *
4 * PowerPC version
5 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
6 *
7 * Derived from "arch/i386/kernel/sys_i386.c"
8 * Adapted from the i386 version by Gary Thomas
9 * Modified by Cort Dougan (cort@cs.nmt.edu)
10 * and Paul Mackerras (paulus@cs.anu.edu.au).
11 *
12 * This file contains various random system calls that
13 * have a non-standard calling sequence on the Linux/PPC
14 * platform.
15 *
16 * This program is free software; you can redistribute it and/or
17 * modify it under the terms of the GNU General Public License
18 * as published by the Free Software Foundation; either version
19 * 2 of the License, or (at your option) any later version.
20 *
21 */
22
23#include <linux/errno.h>
24#include <linux/sched.h>
25#include <linux/syscalls.h>
26#include <linux/mm.h>
27#include <linux/smp.h>
28#include <linux/smp_lock.h>
29#include <linux/sem.h>
30#include <linux/msg.h>
31#include <linux/shm.h>
32#include <linux/stat.h>
33#include <linux/mman.h>
34#include <linux/sys.h>
35#include <linux/ipc.h>
36#include <linux/utsname.h>
37#include <linux/file.h>
38#include <linux/init.h>
39#include <linux/personality.h>
40
41#include <asm/uaccess.h>
42#include <asm/ipc.h>
43#include <asm/semaphore.h>
44#include <asm/time.h>
45#include <asm/unistd.h>
46
47extern unsigned long wall_jiffies;
48
49
50/*
51 * sys_ipc() is the de-multiplexer for the SysV IPC calls..
52 *
53 * This is really horribly ugly.
54 */
55asmlinkage int
56sys_ipc (uint call, int first, unsigned long second, long third,
57 void __user *ptr, long fifth)
58{
59 int version, ret;
60
61 version = call >> 16; /* hack for backward compatibility */
62 call &= 0xffff;
63
64 ret = -ENOSYS;
65 switch (call) {
66 case SEMOP:
67 ret = sys_semtimedop(first, (struct sembuf __user *)ptr,
68 (unsigned)second, NULL);
69 break;
70 case SEMTIMEDOP:
71 ret = sys_semtimedop(first, (struct sembuf __user *)ptr,
72 (unsigned)second,
73 (const struct timespec __user *) fifth);
74 break;
75 case SEMGET:
76 ret = sys_semget (first, (int)second, third);
77 break;
78 case SEMCTL: {
79 union semun fourth;
80
81 ret = -EINVAL;
82 if (!ptr)
83 break;
84 if ((ret = get_user(fourth.__pad, (void __user * __user *)ptr)))
85 break;
86 ret = sys_semctl(first, (int)second, third, fourth);
87 break;
88 }
89 case MSGSND:
90 ret = sys_msgsnd(first, (struct msgbuf __user *)ptr,
91 (size_t)second, third);
92 break;
93 case MSGRCV:
94 switch (version) {
95 case 0: {
96 struct ipc_kludge tmp;
97
98 ret = -EINVAL;
99 if (!ptr)
100 break;
101 if ((ret = copy_from_user(&tmp,
102 (struct ipc_kludge __user *) ptr,
103 sizeof (tmp)) ? -EFAULT : 0))
104 break;
105 ret = sys_msgrcv(first, tmp.msgp, (size_t) second,
106 tmp.msgtyp, third);
107 break;
108 }
109 default:
110 ret = sys_msgrcv (first, (struct msgbuf __user *) ptr,
111 (size_t)second, fifth, third);
112 break;
113 }
114 break;
115 case MSGGET:
116 ret = sys_msgget ((key_t)first, (int)second);
117 break;
118 case MSGCTL:
119 ret = sys_msgctl(first, (int)second,
120 (struct msqid_ds __user *)ptr);
121 break;
122 case SHMAT:
123 switch (version) {
124 default: {
125 ulong raddr;
126 ret = do_shmat(first, (char __user *) ptr,
127 (int)second, &raddr);
128 if (ret)
129 break;
130 ret = put_user (raddr, (ulong __user *) third);
131 break;
132 }
133 case 1: /* iBCS2 emulator entry point */
134 ret = -EINVAL;
135 if (!segment_eq(get_fs(), get_ds()))
136 break;
137 ret = do_shmat(first, (char __user *)ptr,
138 (int)second, (ulong *)third);
139 break;
140 }
141 break;
142 case SHMDT:
143 ret = sys_shmdt ((char __user *)ptr);
144 break;
145 case SHMGET:
146 ret = sys_shmget (first, (size_t)second, third);
147 break;
148 case SHMCTL:
149 ret = sys_shmctl(first, (int)second,
150 (struct shmid_ds __user *)ptr);
151 break;
152 }
153
154 return ret;
155}
156
157/*
158 * sys_pipe() is the normal C calling standard for creating
159 * a pipe. It's not the way unix traditionally does this, though.
160 */
161asmlinkage int sys_pipe(int __user *fildes)
162{
163 int fd[2];
164 int error;
165
166 error = do_pipe(fd);
167 if (!error) {
168 if (copy_to_user(fildes, fd, 2*sizeof(int)))
169 error = -EFAULT;
170 }
171
172 return error;
173}
174
175unsigned long sys_mmap(unsigned long addr, size_t len,
176 unsigned long prot, unsigned long flags,
177 unsigned long fd, off_t offset)
178{
179 struct file * file = NULL;
180 unsigned long ret = -EBADF;
181
182 if (!(flags & MAP_ANONYMOUS)) {
183 if (!(file = fget(fd)))
184 goto out;
185 }
186
187 flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
188 down_write(&current->mm->mmap_sem);
189 ret = do_mmap(file, addr, len, prot, flags, offset);
190 up_write(&current->mm->mmap_sem);
191 if (file)
192 fput(file);
193
194out:
195 return ret;
196}
197
198long ppc64_personality(unsigned long personality)
199{
200 long ret;
201
202 if (personality(current->personality) == PER_LINUX32
203 && personality == PER_LINUX)
204 personality = PER_LINUX32;
205 ret = sys_personality(personality);
206 if (ret == PER_LINUX32)
207 ret = PER_LINUX;
208 return ret;
209}
210
211long ppc64_newuname(struct new_utsname __user * name)
212{
213 int err = 0;
214
215 down_read(&uts_sem);
216 if (copy_to_user(name, &system_utsname, sizeof(*name)))
217 err = -EFAULT;
218 up_read(&uts_sem);
219 if (!err && personality(current->personality) == PER_LINUX32) {
220 /* change ppc64 to ppc */
221 if (__put_user(0, name->machine + 3)
222 || __put_user(0, name->machine + 4))
223 err = -EFAULT;
224 }
225 return err;
226}
227
228asmlinkage time_t sys64_time(time_t __user * tloc)
229{
230 time_t secs;
231 time_t usecs;
232
233 long tb_delta = tb_ticks_since(tb_last_stamp);
234 tb_delta += (jiffies - wall_jiffies) * tb_ticks_per_jiffy;
235
236 secs = xtime.tv_sec;
237 usecs = (xtime.tv_nsec/1000) + tb_delta / tb_ticks_per_usec;
238 while (usecs >= USEC_PER_SEC) {
239 ++secs;
240 usecs -= USEC_PER_SEC;
241 }
242
243 if (tloc) {
244 if (put_user(secs,tloc))
245 secs = -EFAULT;
246 }
247
248 return secs;
249}
250
251void do_show_syscall(unsigned long r3, unsigned long r4, unsigned long r5,
252 unsigned long r6, unsigned long r7, unsigned long r8,
253 struct pt_regs *regs)
254{
255 printk("syscall %ld(%lx, %lx, %lx, %lx, %lx, %lx) regs=%p current=%p"
256 " cpu=%d\n", regs->gpr[0], r3, r4, r5, r6, r7, r8, regs,
257 current, smp_processor_id());
258}
259
260void do_show_syscall_exit(unsigned long r3)
261{
262 printk(" -> %lx, current=%p cpu=%d\n", r3, current, smp_processor_id());
263}
diff --git a/arch/ppc64/kernel/time.c b/arch/ppc64/kernel/time.c
deleted file mode 100644
index b56c6a324e17..000000000000
--- a/arch/ppc64/kernel/time.c
+++ /dev/null
@@ -1,881 +0,0 @@
1/*
2 *
3 * Common time routines among all ppc machines.
4 *
5 * Written by Cort Dougan (cort@cs.nmt.edu) to merge
6 * Paul Mackerras' version and mine for PReP and Pmac.
7 * MPC8xx/MBX changes by Dan Malek (dmalek@jlc.net).
8 * Converted for 64-bit by Mike Corrigan (mikejc@us.ibm.com)
9 *
10 * First round of bugfixes by Gabriel Paubert (paubert@iram.es)
11 * to make clock more stable (2.4.0-test5). The only thing
12 * that this code assumes is that the timebases have been synchronized
13 * by firmware on SMP and are never stopped (never do sleep
14 * on SMP then, nap and doze are OK).
15 *
16 * Speeded up do_gettimeofday by getting rid of references to
17 * xtime (which required locks for consistency). (mikejc@us.ibm.com)
18 *
19 * TODO (not necessarily in this file):
20 * - improve precision and reproducibility of timebase frequency
21 * measurement at boot time. (for iSeries, we calibrate the timebase
22 * against the Titan chip's clock.)
23 * - for astronomical applications: add a new function to get
24 * non ambiguous timestamps even around leap seconds. This needs
25 * a new timestamp format and a good name.
26 *
27 * 1997-09-10 Updated NTP code according to technical memorandum Jan '96
28 * "A Kernel Model for Precision Timekeeping" by Dave Mills
29 *
30 * This program is free software; you can redistribute it and/or
31 * modify it under the terms of the GNU General Public License
32 * as published by the Free Software Foundation; either version
33 * 2 of the License, or (at your option) any later version.
34 */
35
36#include <linux/config.h>
37#include <linux/errno.h>
38#include <linux/module.h>
39#include <linux/sched.h>
40#include <linux/kernel.h>
41#include <linux/param.h>
42#include <linux/string.h>
43#include <linux/mm.h>
44#include <linux/interrupt.h>
45#include <linux/timex.h>
46#include <linux/kernel_stat.h>
47#include <linux/mc146818rtc.h>
48#include <linux/time.h>
49#include <linux/init.h>
50#include <linux/profile.h>
51#include <linux/cpu.h>
52#include <linux/security.h>
53
54#include <asm/io.h>
55#include <asm/processor.h>
56#include <asm/nvram.h>
57#include <asm/cache.h>
58#include <asm/machdep.h>
59#ifdef CONFIG_PPC_ISERIES
60#include <asm/iSeries/ItLpQueue.h>
61#include <asm/iSeries/HvCallXm.h>
62#endif
63#include <asm/uaccess.h>
64#include <asm/time.h>
65#include <asm/ppcdebug.h>
66#include <asm/prom.h>
67#include <asm/sections.h>
68#include <asm/systemcfg.h>
69#include <asm/firmware.h>
70
71u64 jiffies_64 __cacheline_aligned_in_smp = INITIAL_JIFFIES;
72
73EXPORT_SYMBOL(jiffies_64);
74
75/* keep track of when we need to update the rtc */
76time_t last_rtc_update;
77extern int piranha_simulator;
78#ifdef CONFIG_PPC_ISERIES
79unsigned long iSeries_recal_titan = 0;
80unsigned long iSeries_recal_tb = 0;
81static unsigned long first_settimeofday = 1;
82#endif
83
84#define XSEC_PER_SEC (1024*1024)
85
86unsigned long tb_ticks_per_jiffy;
87unsigned long tb_ticks_per_usec = 100; /* sane default */
88EXPORT_SYMBOL(tb_ticks_per_usec);
89unsigned long tb_ticks_per_sec;
90unsigned long tb_to_xs;
91unsigned tb_to_us;
92unsigned long processor_freq;
93DEFINE_SPINLOCK(rtc_lock);
94EXPORT_SYMBOL_GPL(rtc_lock);
95
96unsigned long tb_to_ns_scale;
97unsigned long tb_to_ns_shift;
98
99struct gettimeofday_struct do_gtod;
100
101extern unsigned long wall_jiffies;
102extern int smp_tb_synchronized;
103
104extern struct timezone sys_tz;
105
106void ppc_adjtimex(void);
107
108static unsigned adjusting_time = 0;
109
110unsigned long ppc_proc_freq;
111unsigned long ppc_tb_freq;
112
113static __inline__ void timer_check_rtc(void)
114{
115 /*
116 * update the rtc when needed, this should be performed on the
117 * right fraction of a second. Half or full second ?
118 * Full second works on mk48t59 clocks, others need testing.
119 * Note that this update is basically only used through
120 * the adjtimex system calls. Setting the HW clock in
121 * any other way is a /dev/rtc and userland business.
122 * This is still wrong by -0.5/+1.5 jiffies because of the
123 * timer interrupt resolution and possible delay, but here we
124 * hit a quantization limit which can only be solved by higher
125 * resolution timers and decoupling time management from timer
126 * interrupts. This is also wrong on the clocks
127 * which require being written at the half second boundary.
128 * We should have an rtc call that only sets the minutes and
129 * seconds like on Intel to avoid problems with non UTC clocks.
130 */
131 if (ntp_synced() &&
132 xtime.tv_sec - last_rtc_update >= 659 &&
133 abs((xtime.tv_nsec/1000) - (1000000-1000000/HZ)) < 500000/HZ &&
134 jiffies - wall_jiffies == 1) {
135 struct rtc_time tm;
136 to_tm(xtime.tv_sec+1, &tm);
137 tm.tm_year -= 1900;
138 tm.tm_mon -= 1;
139 if (ppc_md.set_rtc_time(&tm) == 0)
140 last_rtc_update = xtime.tv_sec+1;
141 else
142 /* Try again one minute later */
143 last_rtc_update += 60;
144 }
145}
146
147/*
148 * This version of gettimeofday has microsecond resolution.
149 */
150static inline void __do_gettimeofday(struct timeval *tv, unsigned long tb_val)
151{
152 unsigned long sec, usec, tb_ticks;
153 unsigned long xsec, tb_xsec;
154 struct gettimeofday_vars * temp_varp;
155 unsigned long temp_tb_to_xs, temp_stamp_xsec;
156
157 /*
158 * These calculations are faster (gets rid of divides)
159 * if done in units of 1/2^20 rather than microseconds.
160 * The conversion to microseconds at the end is done
161 * without a divide (and in fact, without a multiply)
162 */
163 temp_varp = do_gtod.varp;
164 tb_ticks = tb_val - temp_varp->tb_orig_stamp;
165 temp_tb_to_xs = temp_varp->tb_to_xs;
166 temp_stamp_xsec = temp_varp->stamp_xsec;
167 tb_xsec = mulhdu( tb_ticks, temp_tb_to_xs );
168 xsec = temp_stamp_xsec + tb_xsec;
169 sec = xsec / XSEC_PER_SEC;
170 xsec -= sec * XSEC_PER_SEC;
171 usec = (xsec * USEC_PER_SEC)/XSEC_PER_SEC;
172
173 tv->tv_sec = sec;
174 tv->tv_usec = usec;
175}
176
177void do_gettimeofday(struct timeval *tv)
178{
179 __do_gettimeofday(tv, get_tb());
180}
181
182EXPORT_SYMBOL(do_gettimeofday);
183
184/* Synchronize xtime with do_gettimeofday */
185
186static inline void timer_sync_xtime(unsigned long cur_tb)
187{
188 struct timeval my_tv;
189
190 __do_gettimeofday(&my_tv, cur_tb);
191
192 if (xtime.tv_sec <= my_tv.tv_sec) {
193 xtime.tv_sec = my_tv.tv_sec;
194 xtime.tv_nsec = my_tv.tv_usec * 1000;
195 }
196}
197
198/*
199 * When the timebase - tb_orig_stamp gets too big, we do a manipulation
200 * between tb_orig_stamp and stamp_xsec. The goal here is to keep the
201 * difference tb - tb_orig_stamp small enough to always fit inside a
202 * 32 bits number. This is a requirement of our fast 32 bits userland
203 * implementation in the vdso. If we "miss" a call to this function
204 * (interrupt latency, CPU locked in a spinlock, ...) and we end up
205 * with a too big difference, then the vdso will fallback to calling
206 * the syscall
207 */
208static __inline__ void timer_recalc_offset(unsigned long cur_tb)
209{
210 struct gettimeofday_vars * temp_varp;
211 unsigned temp_idx;
212 unsigned long offset, new_stamp_xsec, new_tb_orig_stamp;
213
214 if (((cur_tb - do_gtod.varp->tb_orig_stamp) & 0x80000000u) == 0)
215 return;
216
217 temp_idx = (do_gtod.var_idx == 0);
218 temp_varp = &do_gtod.vars[temp_idx];
219
220 new_tb_orig_stamp = cur_tb;
221 offset = new_tb_orig_stamp - do_gtod.varp->tb_orig_stamp;
222 new_stamp_xsec = do_gtod.varp->stamp_xsec + mulhdu(offset, do_gtod.varp->tb_to_xs);
223
224 temp_varp->tb_to_xs = do_gtod.varp->tb_to_xs;
225 temp_varp->tb_orig_stamp = new_tb_orig_stamp;
226 temp_varp->stamp_xsec = new_stamp_xsec;
227 smp_mb();
228 do_gtod.varp = temp_varp;
229 do_gtod.var_idx = temp_idx;
230
231 ++(systemcfg->tb_update_count);
232 smp_wmb();
233 systemcfg->tb_orig_stamp = new_tb_orig_stamp;
234 systemcfg->stamp_xsec = new_stamp_xsec;
235 smp_wmb();
236 ++(systemcfg->tb_update_count);
237}
238
239#ifdef CONFIG_SMP
240unsigned long profile_pc(struct pt_regs *regs)
241{
242 unsigned long pc = instruction_pointer(regs);
243
244 if (in_lock_functions(pc))
245 return regs->link;
246
247 return pc;
248}
249EXPORT_SYMBOL(profile_pc);
250#endif
251
252#ifdef CONFIG_PPC_ISERIES
253
254/*
255 * This function recalibrates the timebase based on the 49-bit time-of-day
256 * value in the Titan chip. The Titan is much more accurate than the value
257 * returned by the service processor for the timebase frequency.
258 */
259
260static void iSeries_tb_recal(void)
261{
262 struct div_result divres;
263 unsigned long titan, tb;
264 tb = get_tb();
265 titan = HvCallXm_loadTod();
266 if ( iSeries_recal_titan ) {
267 unsigned long tb_ticks = tb - iSeries_recal_tb;
268 unsigned long titan_usec = (titan - iSeries_recal_titan) >> 12;
269 unsigned long new_tb_ticks_per_sec = (tb_ticks * USEC_PER_SEC)/titan_usec;
270 unsigned long new_tb_ticks_per_jiffy = (new_tb_ticks_per_sec+(HZ/2))/HZ;
271 long tick_diff = new_tb_ticks_per_jiffy - tb_ticks_per_jiffy;
272 char sign = '+';
273 /* make sure tb_ticks_per_sec and tb_ticks_per_jiffy are consistent */
274 new_tb_ticks_per_sec = new_tb_ticks_per_jiffy * HZ;
275
276 if ( tick_diff < 0 ) {
277 tick_diff = -tick_diff;
278 sign = '-';
279 }
280 if ( tick_diff ) {
281 if ( tick_diff < tb_ticks_per_jiffy/25 ) {
282 printk( "Titan recalibrate: new tb_ticks_per_jiffy = %lu (%c%ld)\n",
283 new_tb_ticks_per_jiffy, sign, tick_diff );
284 tb_ticks_per_jiffy = new_tb_ticks_per_jiffy;
285 tb_ticks_per_sec = new_tb_ticks_per_sec;
286 div128_by_32( XSEC_PER_SEC, 0, tb_ticks_per_sec, &divres );
287 do_gtod.tb_ticks_per_sec = tb_ticks_per_sec;
288 tb_to_xs = divres.result_low;
289 do_gtod.varp->tb_to_xs = tb_to_xs;
290 systemcfg->tb_ticks_per_sec = tb_ticks_per_sec;
291 systemcfg->tb_to_xs = tb_to_xs;
292 }
293 else {
294 printk( "Titan recalibrate: FAILED (difference > 4 percent)\n"
295 " new tb_ticks_per_jiffy = %lu\n"
296 " old tb_ticks_per_jiffy = %lu\n",
297 new_tb_ticks_per_jiffy, tb_ticks_per_jiffy );
298 }
299 }
300 }
301 iSeries_recal_titan = titan;
302 iSeries_recal_tb = tb;
303}
304#endif
305
306/*
307 * For iSeries shared processors, we have to let the hypervisor
308 * set the hardware decrementer. We set a virtual decrementer
309 * in the lppaca and call the hypervisor if the virtual
310 * decrementer is less than the current value in the hardware
311 * decrementer. (almost always the new decrementer value will
312 * be greater than the current hardware decementer so the hypervisor
313 * call will not be needed)
314 */
315
316unsigned long tb_last_stamp __cacheline_aligned_in_smp;
317
318/*
319 * timer_interrupt - gets called when the decrementer overflows,
320 * with interrupts disabled.
321 */
322int timer_interrupt(struct pt_regs * regs)
323{
324 int next_dec;
325 unsigned long cur_tb;
326 struct paca_struct *lpaca = get_paca();
327 unsigned long cpu = smp_processor_id();
328
329 irq_enter();
330
331 profile_tick(CPU_PROFILING, regs);
332
333 lpaca->lppaca.int_dword.fields.decr_int = 0;
334
335 while (lpaca->next_jiffy_update_tb <= (cur_tb = get_tb())) {
336 /*
337 * We cannot disable the decrementer, so in the period
338 * between this cpu's being marked offline in cpu_online_map
339 * and calling stop-self, it is taking timer interrupts.
340 * Avoid calling into the scheduler rebalancing code if this
341 * is the case.
342 */
343 if (!cpu_is_offline(cpu))
344 update_process_times(user_mode(regs));
345 /*
346 * No need to check whether cpu is offline here; boot_cpuid
347 * should have been fixed up by now.
348 */
349 if (cpu == boot_cpuid) {
350 write_seqlock(&xtime_lock);
351 tb_last_stamp = lpaca->next_jiffy_update_tb;
352 timer_recalc_offset(lpaca->next_jiffy_update_tb);
353 do_timer(regs);
354 timer_sync_xtime(lpaca->next_jiffy_update_tb);
355 timer_check_rtc();
356 write_sequnlock(&xtime_lock);
357 if ( adjusting_time && (time_adjust == 0) )
358 ppc_adjtimex();
359 }
360 lpaca->next_jiffy_update_tb += tb_ticks_per_jiffy;
361 }
362
363 next_dec = lpaca->next_jiffy_update_tb - cur_tb;
364 if (next_dec > lpaca->default_decr)
365 next_dec = lpaca->default_decr;
366 set_dec(next_dec);
367
368#ifdef CONFIG_PPC_ISERIES
369 if (hvlpevent_is_pending())
370 process_hvlpevents(regs);
371#endif
372
373 /* collect purr register values often, for accurate calculations */
374 if (firmware_has_feature(FW_FEATURE_SPLPAR)) {
375 struct cpu_usage *cu = &__get_cpu_var(cpu_usage_array);
376 cu->current_tb = mfspr(SPRN_PURR);
377 }
378
379 irq_exit();
380
381 return 1;
382}
383
384/*
385 * Scheduler clock - returns current time in nanosec units.
386 *
387 * Note: mulhdu(a, b) (multiply high double unsigned) returns
388 * the high 64 bits of a * b, i.e. (a * b) >> 64, where a and b
389 * are 64-bit unsigned numbers.
390 */
391unsigned long long sched_clock(void)
392{
393 return mulhdu(get_tb(), tb_to_ns_scale) << tb_to_ns_shift;
394}
395
396int do_settimeofday(struct timespec *tv)
397{
398 time_t wtm_sec, new_sec = tv->tv_sec;
399 long wtm_nsec, new_nsec = tv->tv_nsec;
400 unsigned long flags;
401 unsigned long delta_xsec;
402 long int tb_delta;
403 unsigned long new_xsec;
404
405 if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
406 return -EINVAL;
407
408 write_seqlock_irqsave(&xtime_lock, flags);
409 /* Updating the RTC is not the job of this code. If the time is
410 * stepped under NTP, the RTC will be update after STA_UNSYNC
411 * is cleared. Tool like clock/hwclock either copy the RTC
412 * to the system time, in which case there is no point in writing
413 * to the RTC again, or write to the RTC but then they don't call
414 * settimeofday to perform this operation.
415 */
416#ifdef CONFIG_PPC_ISERIES
417 if ( first_settimeofday ) {
418 iSeries_tb_recal();
419 first_settimeofday = 0;
420 }
421#endif
422 tb_delta = tb_ticks_since(tb_last_stamp);
423 tb_delta += (jiffies - wall_jiffies) * tb_ticks_per_jiffy;
424
425 new_nsec -= tb_delta / tb_ticks_per_usec / 1000;
426
427 wtm_sec = wall_to_monotonic.tv_sec + (xtime.tv_sec - new_sec);
428 wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - new_nsec);
429
430 set_normalized_timespec(&xtime, new_sec, new_nsec);
431 set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec);
432
433 /* In case of a large backwards jump in time with NTP, we want the
434 * clock to be updated as soon as the PLL is again in lock.
435 */
436 last_rtc_update = new_sec - 658;
437
438 ntp_clear();
439
440 delta_xsec = mulhdu( (tb_last_stamp-do_gtod.varp->tb_orig_stamp),
441 do_gtod.varp->tb_to_xs );
442
443 new_xsec = (new_nsec * XSEC_PER_SEC) / NSEC_PER_SEC;
444 new_xsec += new_sec * XSEC_PER_SEC;
445 if ( new_xsec > delta_xsec ) {
446 do_gtod.varp->stamp_xsec = new_xsec - delta_xsec;
447 systemcfg->stamp_xsec = new_xsec - delta_xsec;
448 }
449 else {
450 /* This is only for the case where the user is setting the time
451 * way back to a time such that the boot time would have been
452 * before 1970 ... eg. we booted ten days ago, and we are setting
453 * the time to Jan 5, 1970 */
454 do_gtod.varp->stamp_xsec = new_xsec;
455 do_gtod.varp->tb_orig_stamp = tb_last_stamp;
456 systemcfg->stamp_xsec = new_xsec;
457 systemcfg->tb_orig_stamp = tb_last_stamp;
458 }
459
460 systemcfg->tz_minuteswest = sys_tz.tz_minuteswest;
461 systemcfg->tz_dsttime = sys_tz.tz_dsttime;
462
463 write_sequnlock_irqrestore(&xtime_lock, flags);
464 clock_was_set();
465 return 0;
466}
467
468EXPORT_SYMBOL(do_settimeofday);
469
470#if defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_MAPLE) || defined(CONFIG_PPC_BPA)
471void __init generic_calibrate_decr(void)
472{
473 struct device_node *cpu;
474 struct div_result divres;
475 unsigned int *fp;
476 int node_found;
477
478 /*
479 * The cpu node should have a timebase-frequency property
480 * to tell us the rate at which the decrementer counts.
481 */
482 cpu = of_find_node_by_type(NULL, "cpu");
483
484 ppc_tb_freq = DEFAULT_TB_FREQ; /* hardcoded default */
485 node_found = 0;
486 if (cpu != 0) {
487 fp = (unsigned int *)get_property(cpu, "timebase-frequency",
488 NULL);
489 if (fp != 0) {
490 node_found = 1;
491 ppc_tb_freq = *fp;
492 }
493 }
494 if (!node_found)
495 printk(KERN_ERR "WARNING: Estimating decrementer frequency "
496 "(not found)\n");
497
498 ppc_proc_freq = DEFAULT_PROC_FREQ;
499 node_found = 0;
500 if (cpu != 0) {
501 fp = (unsigned int *)get_property(cpu, "clock-frequency",
502 NULL);
503 if (fp != 0) {
504 node_found = 1;
505 ppc_proc_freq = *fp;
506 }
507 }
508 if (!node_found)
509 printk(KERN_ERR "WARNING: Estimating processor frequency "
510 "(not found)\n");
511
512 of_node_put(cpu);
513
514 printk(KERN_INFO "time_init: decrementer frequency = %lu.%.6lu MHz\n",
515 ppc_tb_freq/1000000, ppc_tb_freq%1000000);
516 printk(KERN_INFO "time_init: processor frequency = %lu.%.6lu MHz\n",
517 ppc_proc_freq/1000000, ppc_proc_freq%1000000);
518
519 tb_ticks_per_jiffy = ppc_tb_freq / HZ;
520 tb_ticks_per_sec = tb_ticks_per_jiffy * HZ;
521 tb_ticks_per_usec = ppc_tb_freq / 1000000;
522 tb_to_us = mulhwu_scale_factor(ppc_tb_freq, 1000000);
523 div128_by_32(1024*1024, 0, tb_ticks_per_sec, &divres);
524 tb_to_xs = divres.result_low;
525
526 setup_default_decr();
527}
528#endif
529
530void __init time_init(void)
531{
532 /* This function is only called on the boot processor */
533 unsigned long flags;
534 struct rtc_time tm;
535 struct div_result res;
536 unsigned long scale, shift;
537
538 ppc_md.calibrate_decr();
539
540 /*
541 * Compute scale factor for sched_clock.
542 * The calibrate_decr() function has set tb_ticks_per_sec,
543 * which is the timebase frequency.
544 * We compute 1e9 * 2^64 / tb_ticks_per_sec and interpret
545 * the 128-bit result as a 64.64 fixed-point number.
546 * We then shift that number right until it is less than 1.0,
547 * giving us the scale factor and shift count to use in
548 * sched_clock().
549 */
550 div128_by_32(1000000000, 0, tb_ticks_per_sec, &res);
551 scale = res.result_low;
552 for (shift = 0; res.result_high != 0; ++shift) {
553 scale = (scale >> 1) | (res.result_high << 63);
554 res.result_high >>= 1;
555 }
556 tb_to_ns_scale = scale;
557 tb_to_ns_shift = shift;
558
559#ifdef CONFIG_PPC_ISERIES
560 if (!piranha_simulator)
561#endif
562 ppc_md.get_boot_time(&tm);
563
564 write_seqlock_irqsave(&xtime_lock, flags);
565 xtime.tv_sec = mktime(tm.tm_year + 1900, tm.tm_mon + 1, tm.tm_mday,
566 tm.tm_hour, tm.tm_min, tm.tm_sec);
567 tb_last_stamp = get_tb();
568 do_gtod.varp = &do_gtod.vars[0];
569 do_gtod.var_idx = 0;
570 do_gtod.varp->tb_orig_stamp = tb_last_stamp;
571 get_paca()->next_jiffy_update_tb = tb_last_stamp + tb_ticks_per_jiffy;
572 do_gtod.varp->stamp_xsec = xtime.tv_sec * XSEC_PER_SEC;
573 do_gtod.tb_ticks_per_sec = tb_ticks_per_sec;
574 do_gtod.varp->tb_to_xs = tb_to_xs;
575 do_gtod.tb_to_us = tb_to_us;
576 systemcfg->tb_orig_stamp = tb_last_stamp;
577 systemcfg->tb_update_count = 0;
578 systemcfg->tb_ticks_per_sec = tb_ticks_per_sec;
579 systemcfg->stamp_xsec = xtime.tv_sec * XSEC_PER_SEC;
580 systemcfg->tb_to_xs = tb_to_xs;
581
582 time_freq = 0;
583
584 xtime.tv_nsec = 0;
585 last_rtc_update = xtime.tv_sec;
586 set_normalized_timespec(&wall_to_monotonic,
587 -xtime.tv_sec, -xtime.tv_nsec);
588 write_sequnlock_irqrestore(&xtime_lock, flags);
589
590 /* Not exact, but the timer interrupt takes care of this */
591 set_dec(tb_ticks_per_jiffy);
592}
593
594/*
595 * After adjtimex is called, adjust the conversion of tb ticks
596 * to microseconds to keep do_gettimeofday synchronized
597 * with ntpd.
598 *
599 * Use the time_adjust, time_freq and time_offset computed by adjtimex to
600 * adjust the frequency.
601 */
602
603/* #define DEBUG_PPC_ADJTIMEX 1 */
604
605void ppc_adjtimex(void)
606{
607 unsigned long den, new_tb_ticks_per_sec, tb_ticks, old_xsec, new_tb_to_xs, new_xsec, new_stamp_xsec;
608 unsigned long tb_ticks_per_sec_delta;
609 long delta_freq, ltemp;
610 struct div_result divres;
611 unsigned long flags;
612 struct gettimeofday_vars * temp_varp;
613 unsigned temp_idx;
614 long singleshot_ppm = 0;
615
616 /* Compute parts per million frequency adjustment to accomplish the time adjustment
617 implied by time_offset to be applied over the elapsed time indicated by time_constant.
618 Use SHIFT_USEC to get it into the same units as time_freq. */
619 if ( time_offset < 0 ) {
620 ltemp = -time_offset;
621 ltemp <<= SHIFT_USEC - SHIFT_UPDATE;
622 ltemp >>= SHIFT_KG + time_constant;
623 ltemp = -ltemp;
624 }
625 else {
626 ltemp = time_offset;
627 ltemp <<= SHIFT_USEC - SHIFT_UPDATE;
628 ltemp >>= SHIFT_KG + time_constant;
629 }
630
631 /* If there is a single shot time adjustment in progress */
632 if ( time_adjust ) {
633#ifdef DEBUG_PPC_ADJTIMEX
634 printk("ppc_adjtimex: ");
635 if ( adjusting_time == 0 )
636 printk("starting ");
637 printk("single shot time_adjust = %ld\n", time_adjust);
638#endif
639
640 adjusting_time = 1;
641
642 /* Compute parts per million frequency adjustment to match time_adjust */
643 singleshot_ppm = tickadj * HZ;
644 /*
645 * The adjustment should be tickadj*HZ to match the code in
646 * linux/kernel/timer.c, but experiments show that this is too
647 * large. 3/4 of tickadj*HZ seems about right
648 */
649 singleshot_ppm -= singleshot_ppm / 4;
650 /* Use SHIFT_USEC to get it into the same units as time_freq */
651 singleshot_ppm <<= SHIFT_USEC;
652 if ( time_adjust < 0 )
653 singleshot_ppm = -singleshot_ppm;
654 }
655 else {
656#ifdef DEBUG_PPC_ADJTIMEX
657 if ( adjusting_time )
658 printk("ppc_adjtimex: ending single shot time_adjust\n");
659#endif
660 adjusting_time = 0;
661 }
662
663 /* Add up all of the frequency adjustments */
664 delta_freq = time_freq + ltemp + singleshot_ppm;
665
666 /* Compute a new value for tb_ticks_per_sec based on the frequency adjustment */
667 den = 1000000 * (1 << (SHIFT_USEC - 8));
668 if ( delta_freq < 0 ) {
669 tb_ticks_per_sec_delta = ( tb_ticks_per_sec * ( (-delta_freq) >> (SHIFT_USEC - 8))) / den;
670 new_tb_ticks_per_sec = tb_ticks_per_sec + tb_ticks_per_sec_delta;
671 }
672 else {
673 tb_ticks_per_sec_delta = ( tb_ticks_per_sec * ( delta_freq >> (SHIFT_USEC - 8))) / den;
674 new_tb_ticks_per_sec = tb_ticks_per_sec - tb_ticks_per_sec_delta;
675 }
676
677#ifdef DEBUG_PPC_ADJTIMEX
678 printk("ppc_adjtimex: ltemp = %ld, time_freq = %ld, singleshot_ppm = %ld\n", ltemp, time_freq, singleshot_ppm);
679 printk("ppc_adjtimex: tb_ticks_per_sec - base = %ld new = %ld\n", tb_ticks_per_sec, new_tb_ticks_per_sec);
680#endif
681
682 /* Compute a new value of tb_to_xs (used to convert tb to microseconds and a new value of
683 stamp_xsec which is the time (in 1/2^20 second units) corresponding to tb_orig_stamp. This
684 new value of stamp_xsec compensates for the change in frequency (implied by the new tb_to_xs)
685 which guarantees that the current time remains the same */
686 write_seqlock_irqsave( &xtime_lock, flags );
687 tb_ticks = get_tb() - do_gtod.varp->tb_orig_stamp;
688 div128_by_32( 1024*1024, 0, new_tb_ticks_per_sec, &divres );
689 new_tb_to_xs = divres.result_low;
690 new_xsec = mulhdu( tb_ticks, new_tb_to_xs );
691
692 old_xsec = mulhdu( tb_ticks, do_gtod.varp->tb_to_xs );
693 new_stamp_xsec = do_gtod.varp->stamp_xsec + old_xsec - new_xsec;
694
695 /* There are two copies of tb_to_xs and stamp_xsec so that no lock is needed to access and use these
696 values in do_gettimeofday. We alternate the copies and as long as a reasonable time elapses between
697 changes, there will never be inconsistent values. ntpd has a minimum of one minute between updates */
698
699 temp_idx = (do_gtod.var_idx == 0);
700 temp_varp = &do_gtod.vars[temp_idx];
701
702 temp_varp->tb_to_xs = new_tb_to_xs;
703 temp_varp->stamp_xsec = new_stamp_xsec;
704 temp_varp->tb_orig_stamp = do_gtod.varp->tb_orig_stamp;
705 smp_mb();
706 do_gtod.varp = temp_varp;
707 do_gtod.var_idx = temp_idx;
708
709 /*
710 * tb_update_count is used to allow the problem state gettimeofday code
711 * to assure itself that it sees a consistent view of the tb_to_xs and
712 * stamp_xsec variables. It reads the tb_update_count, then reads
713 * tb_to_xs and stamp_xsec and then reads tb_update_count again. If
714 * the two values of tb_update_count match and are even then the
715 * tb_to_xs and stamp_xsec values are consistent. If not, then it
716 * loops back and reads them again until this criteria is met.
717 */
718 ++(systemcfg->tb_update_count);
719 smp_wmb();
720 systemcfg->tb_to_xs = new_tb_to_xs;
721 systemcfg->stamp_xsec = new_stamp_xsec;
722 smp_wmb();
723 ++(systemcfg->tb_update_count);
724
725 write_sequnlock_irqrestore( &xtime_lock, flags );
726
727}
728
729
730#define TICK_SIZE tick
731#define FEBRUARY 2
732#define STARTOFTIME 1970
733#define SECDAY 86400L
734#define SECYR (SECDAY * 365)
735#define leapyear(year) ((year) % 4 == 0)
736#define days_in_year(a) (leapyear(a) ? 366 : 365)
737#define days_in_month(a) (month_days[(a) - 1])
738
739static int month_days[12] = {
740 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31
741};
742
743/*
744 * This only works for the Gregorian calendar - i.e. after 1752 (in the UK)
745 */
746void GregorianDay(struct rtc_time * tm)
747{
748 int leapsToDate;
749 int lastYear;
750 int day;
751 int MonthOffset[] = { 0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334 };
752
753 lastYear=tm->tm_year-1;
754
755 /*
756 * Number of leap corrections to apply up to end of last year
757 */
758 leapsToDate = lastYear/4 - lastYear/100 + lastYear/400;
759
760 /*
761 * This year is a leap year if it is divisible by 4 except when it is
762 * divisible by 100 unless it is divisible by 400
763 *
764 * e.g. 1904 was a leap year, 1900 was not, 1996 is, and 2000 will be
765 */
766 if((tm->tm_year%4==0) &&
767 ((tm->tm_year%100!=0) || (tm->tm_year%400==0)) &&
768 (tm->tm_mon>2))
769 {
770 /*
771 * We are past Feb. 29 in a leap year
772 */
773 day=1;
774 }
775 else
776 {
777 day=0;
778 }
779
780 day += lastYear*365 + leapsToDate + MonthOffset[tm->tm_mon-1] +
781 tm->tm_mday;
782
783 tm->tm_wday=day%7;
784}
785
786void to_tm(int tim, struct rtc_time * tm)
787{
788 register int i;
789 register long hms, day;
790
791 day = tim / SECDAY;
792 hms = tim % SECDAY;
793
794 /* Hours, minutes, seconds are easy */
795 tm->tm_hour = hms / 3600;
796 tm->tm_min = (hms % 3600) / 60;
797 tm->tm_sec = (hms % 3600) % 60;
798
799 /* Number of years in days */
800 for (i = STARTOFTIME; day >= days_in_year(i); i++)
801 day -= days_in_year(i);
802 tm->tm_year = i;
803
804 /* Number of months in days left */
805 if (leapyear(tm->tm_year))
806 days_in_month(FEBRUARY) = 29;
807 for (i = 1; day >= days_in_month(i); i++)
808 day -= days_in_month(i);
809 days_in_month(FEBRUARY) = 28;
810 tm->tm_mon = i;
811
812 /* Days are what is left over (+1) from all that. */
813 tm->tm_mday = day + 1;
814
815 /*
816 * Determine the day of week
817 */
818 GregorianDay(tm);
819}
820
821/* Auxiliary function to compute scaling factors */
822/* Actually the choice of a timebase running at 1/4 the of the bus
823 * frequency giving resolution of a few tens of nanoseconds is quite nice.
824 * It makes this computation very precise (27-28 bits typically) which
825 * is optimistic considering the stability of most processor clock
826 * oscillators and the precision with which the timebase frequency
827 * is measured but does not harm.
828 */
829unsigned mulhwu_scale_factor(unsigned inscale, unsigned outscale) {
830 unsigned mlt=0, tmp, err;
831 /* No concern for performance, it's done once: use a stupid
832 * but safe and compact method to find the multiplier.
833 */
834
835 for (tmp = 1U<<31; tmp != 0; tmp >>= 1) {
836 if (mulhwu(inscale, mlt|tmp) < outscale) mlt|=tmp;
837 }
838
839 /* We might still be off by 1 for the best approximation.
840 * A side effect of this is that if outscale is too large
841 * the returned value will be zero.
842 * Many corner cases have been checked and seem to work,
843 * some might have been forgotten in the test however.
844 */
845
846 err = inscale*(mlt+1);
847 if (err <= inscale/2) mlt++;
848 return mlt;
849 }
850
851/*
852 * Divide a 128-bit dividend by a 32-bit divisor, leaving a 128 bit
853 * result.
854 */
855
856void div128_by_32( unsigned long dividend_high, unsigned long dividend_low,
857 unsigned divisor, struct div_result *dr )
858{
859 unsigned long a,b,c,d, w,x,y,z, ra,rb,rc;
860
861 a = dividend_high >> 32;
862 b = dividend_high & 0xffffffff;
863 c = dividend_low >> 32;
864 d = dividend_low & 0xffffffff;
865
866 w = a/divisor;
867 ra = (a - (w * divisor)) << 32;
868
869 x = (ra + b)/divisor;
870 rb = ((ra + b) - (x * divisor)) << 32;
871
872 y = (rb + c)/divisor;
873 rc = ((rb + c) - (y * divisor)) << 32;
874
875 z = (rc + d)/divisor;
876
877 dr->result_high = (w << 32) + x;
878 dr->result_low = (y << 32) + z;
879
880}
881
diff --git a/arch/ppc64/kernel/traps.c b/arch/ppc64/kernel/traps.c
deleted file mode 100644
index 7467ae508e6e..000000000000
--- a/arch/ppc64/kernel/traps.c
+++ /dev/null
@@ -1,568 +0,0 @@
1/*
2 * linux/arch/ppc64/kernel/traps.c
3 *
4 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 *
11 * Modified by Cort Dougan (cort@cs.nmt.edu)
12 * and Paul Mackerras (paulus@cs.anu.edu.au)
13 */
14
15/*
16 * This file handles the architecture-dependent parts of hardware exceptions
17 */
18
19#include <linux/config.h>
20#include <linux/errno.h>
21#include <linux/sched.h>
22#include <linux/kernel.h>
23#include <linux/mm.h>
24#include <linux/stddef.h>
25#include <linux/unistd.h>
26#include <linux/slab.h>
27#include <linux/user.h>
28#include <linux/a.out.h>
29#include <linux/interrupt.h>
30#include <linux/init.h>
31#include <linux/module.h>
32#include <linux/delay.h>
33#include <linux/kprobes.h>
34#include <asm/kdebug.h>
35
36#include <asm/pgtable.h>
37#include <asm/uaccess.h>
38#include <asm/system.h>
39#include <asm/io.h>
40#include <asm/processor.h>
41#include <asm/ppcdebug.h>
42#include <asm/rtas.h>
43#include <asm/systemcfg.h>
44#include <asm/machdep.h>
45#include <asm/pmc.h>
46
47#ifdef CONFIG_DEBUGGER
48int (*__debugger)(struct pt_regs *regs);
49int (*__debugger_ipi)(struct pt_regs *regs);
50int (*__debugger_bpt)(struct pt_regs *regs);
51int (*__debugger_sstep)(struct pt_regs *regs);
52int (*__debugger_iabr_match)(struct pt_regs *regs);
53int (*__debugger_dabr_match)(struct pt_regs *regs);
54int (*__debugger_fault_handler)(struct pt_regs *regs);
55
56EXPORT_SYMBOL(__debugger);
57EXPORT_SYMBOL(__debugger_ipi);
58EXPORT_SYMBOL(__debugger_bpt);
59EXPORT_SYMBOL(__debugger_sstep);
60EXPORT_SYMBOL(__debugger_iabr_match);
61EXPORT_SYMBOL(__debugger_dabr_match);
62EXPORT_SYMBOL(__debugger_fault_handler);
63#endif
64
65struct notifier_block *ppc64_die_chain;
66static DEFINE_SPINLOCK(die_notifier_lock);
67
68int register_die_notifier(struct notifier_block *nb)
69{
70 int err = 0;
71 unsigned long flags;
72
73 spin_lock_irqsave(&die_notifier_lock, flags);
74 err = notifier_chain_register(&ppc64_die_chain, nb);
75 spin_unlock_irqrestore(&die_notifier_lock, flags);
76 return err;
77}
78
79/*
80 * Trap & Exception support
81 */
82
83static DEFINE_SPINLOCK(die_lock);
84
85int die(const char *str, struct pt_regs *regs, long err)
86{
87 static int die_counter;
88 int nl = 0;
89
90 if (debugger(regs))
91 return 1;
92
93 console_verbose();
94 spin_lock_irq(&die_lock);
95 bust_spinlocks(1);
96 printk("Oops: %s, sig: %ld [#%d]\n", str, err, ++die_counter);
97#ifdef CONFIG_PREEMPT
98 printk("PREEMPT ");
99 nl = 1;
100#endif
101#ifdef CONFIG_SMP
102 printk("SMP NR_CPUS=%d ", NR_CPUS);
103 nl = 1;
104#endif
105#ifdef CONFIG_DEBUG_PAGEALLOC
106 printk("DEBUG_PAGEALLOC ");
107 nl = 1;
108#endif
109#ifdef CONFIG_NUMA
110 printk("NUMA ");
111 nl = 1;
112#endif
113 switch(systemcfg->platform) {
114 case PLATFORM_PSERIES:
115 printk("PSERIES ");
116 nl = 1;
117 break;
118 case PLATFORM_PSERIES_LPAR:
119 printk("PSERIES LPAR ");
120 nl = 1;
121 break;
122 case PLATFORM_ISERIES_LPAR:
123 printk("ISERIES LPAR ");
124 nl = 1;
125 break;
126 case PLATFORM_POWERMAC:
127 printk("POWERMAC ");
128 nl = 1;
129 break;
130 case PLATFORM_BPA:
131 printk("BPA ");
132 nl = 1;
133 break;
134 }
135 if (nl)
136 printk("\n");
137 print_modules();
138 show_regs(regs);
139 bust_spinlocks(0);
140 spin_unlock_irq(&die_lock);
141
142 if (in_interrupt())
143 panic("Fatal exception in interrupt");
144
145 if (panic_on_oops) {
146 printk(KERN_EMERG "Fatal exception: panic in 5 seconds\n");
147 ssleep(5);
148 panic("Fatal exception");
149 }
150 do_exit(SIGSEGV);
151
152 return 0;
153}
154
155void _exception(int signr, struct pt_regs *regs, int code, unsigned long addr)
156{
157 siginfo_t info;
158
159 if (!user_mode(regs)) {
160 if (die("Exception in kernel mode", regs, signr))
161 return;
162 }
163
164 memset(&info, 0, sizeof(info));
165 info.si_signo = signr;
166 info.si_code = code;
167 info.si_addr = (void __user *) addr;
168 force_sig_info(signr, &info, current);
169}
170
171void system_reset_exception(struct pt_regs *regs)
172{
173 /* See if any machine dependent calls */
174 if (ppc_md.system_reset_exception)
175 ppc_md.system_reset_exception(regs);
176
177 die("System Reset", regs, 0);
178
179 /* Must die if the interrupt is not recoverable */
180 if (!(regs->msr & MSR_RI))
181 panic("Unrecoverable System Reset");
182
183 /* What should we do here? We could issue a shutdown or hard reset. */
184}
185
186void machine_check_exception(struct pt_regs *regs)
187{
188 int recover = 0;
189
190 /* See if any machine dependent calls */
191 if (ppc_md.machine_check_exception)
192 recover = ppc_md.machine_check_exception(regs);
193
194 if (recover)
195 return;
196
197 if (debugger_fault_handler(regs))
198 return;
199 die("Machine check", regs, 0);
200
201 /* Must die if the interrupt is not recoverable */
202 if (!(regs->msr & MSR_RI))
203 panic("Unrecoverable Machine check");
204}
205
206void unknown_exception(struct pt_regs *regs)
207{
208 printk("Bad trap at PC: %lx, SR: %lx, vector=%lx\n",
209 regs->nip, regs->msr, regs->trap);
210
211 _exception(SIGTRAP, regs, 0, 0);
212}
213
214void instruction_breakpoint_exception(struct pt_regs *regs)
215{
216 if (notify_die(DIE_IABR_MATCH, "iabr_match", regs, 5,
217 5, SIGTRAP) == NOTIFY_STOP)
218 return;
219 if (debugger_iabr_match(regs))
220 return;
221 _exception(SIGTRAP, regs, TRAP_BRKPT, regs->nip);
222}
223
224void __kprobes single_step_exception(struct pt_regs *regs)
225{
226 regs->msr &= ~MSR_SE; /* Turn off 'trace' bit */
227
228 if (notify_die(DIE_SSTEP, "single_step", regs, 5,
229 5, SIGTRAP) == NOTIFY_STOP)
230 return;
231 if (debugger_sstep(regs))
232 return;
233
234 _exception(SIGTRAP, regs, TRAP_TRACE, regs->nip);
235}
236
237/*
238 * After we have successfully emulated an instruction, we have to
239 * check if the instruction was being single-stepped, and if so,
240 * pretend we got a single-step exception. This was pointed out
241 * by Kumar Gala. -- paulus
242 */
243static inline void emulate_single_step(struct pt_regs *regs)
244{
245 if (regs->msr & MSR_SE)
246 single_step_exception(regs);
247}
248
249static void parse_fpe(struct pt_regs *regs)
250{
251 int code = 0;
252 unsigned long fpscr;
253
254 flush_fp_to_thread(current);
255
256 fpscr = current->thread.fpscr;
257
258 /* Invalid operation */
259 if ((fpscr & FPSCR_VE) && (fpscr & FPSCR_VX))
260 code = FPE_FLTINV;
261
262 /* Overflow */
263 else if ((fpscr & FPSCR_OE) && (fpscr & FPSCR_OX))
264 code = FPE_FLTOVF;
265
266 /* Underflow */
267 else if ((fpscr & FPSCR_UE) && (fpscr & FPSCR_UX))
268 code = FPE_FLTUND;
269
270 /* Divide by zero */
271 else if ((fpscr & FPSCR_ZE) && (fpscr & FPSCR_ZX))
272 code = FPE_FLTDIV;
273
274 /* Inexact result */
275 else if ((fpscr & FPSCR_XE) && (fpscr & FPSCR_XX))
276 code = FPE_FLTRES;
277
278 _exception(SIGFPE, regs, code, regs->nip);
279}
280
281/*
282 * Illegal instruction emulation support. Return non-zero if we can't
283 * emulate, or -EFAULT if the associated memory access caused an access
284 * fault. Return zero on success.
285 */
286
287#define INST_MFSPR_PVR 0x7c1f42a6
288#define INST_MFSPR_PVR_MASK 0xfc1fffff
289
290#define INST_DCBA 0x7c0005ec
291#define INST_DCBA_MASK 0x7c0007fe
292
293#define INST_MCRXR 0x7c000400
294#define INST_MCRXR_MASK 0x7c0007fe
295
296static int emulate_instruction(struct pt_regs *regs)
297{
298 unsigned int instword;
299
300 if (!user_mode(regs))
301 return -EINVAL;
302
303 CHECK_FULL_REGS(regs);
304
305 if (get_user(instword, (unsigned int __user *)(regs->nip)))
306 return -EFAULT;
307
308 /* Emulate the mfspr rD, PVR. */
309 if ((instword & INST_MFSPR_PVR_MASK) == INST_MFSPR_PVR) {
310 unsigned int rd;
311
312 rd = (instword >> 21) & 0x1f;
313 regs->gpr[rd] = mfspr(SPRN_PVR);
314 return 0;
315 }
316
317 /* Emulating the dcba insn is just a no-op. */
318 if ((instword & INST_DCBA_MASK) == INST_DCBA) {
319 static int warned;
320
321 if (!warned) {
322 printk(KERN_WARNING
323 "process %d (%s) uses obsolete 'dcba' insn\n",
324 current->pid, current->comm);
325 warned = 1;
326 }
327 return 0;
328 }
329
330 /* Emulate the mcrxr insn. */
331 if ((instword & INST_MCRXR_MASK) == INST_MCRXR) {
332 static int warned;
333 unsigned int shift;
334
335 if (!warned) {
336 printk(KERN_WARNING
337 "process %d (%s) uses obsolete 'mcrxr' insn\n",
338 current->pid, current->comm);
339 warned = 1;
340 }
341
342 shift = (instword >> 21) & 0x1c;
343 regs->ccr &= ~(0xf0000000 >> shift);
344 regs->ccr |= (regs->xer & 0xf0000000) >> shift;
345 regs->xer &= ~0xf0000000;
346 return 0;
347 }
348
349 return -EINVAL;
350}
351
352/*
353 * Look through the list of trap instructions that are used for BUG(),
354 * BUG_ON() and WARN_ON() and see if we hit one. At this point we know
355 * that the exception was caused by a trap instruction of some kind.
356 * Returns 1 if we should continue (i.e. it was a WARN_ON) or 0
357 * otherwise.
358 */
359extern struct bug_entry __start___bug_table[], __stop___bug_table[];
360
361#ifndef CONFIG_MODULES
362#define module_find_bug(x) NULL
363#endif
364
365struct bug_entry *find_bug(unsigned long bugaddr)
366{
367 struct bug_entry *bug;
368
369 for (bug = __start___bug_table; bug < __stop___bug_table; ++bug)
370 if (bugaddr == bug->bug_addr)
371 return bug;
372 return module_find_bug(bugaddr);
373}
374
375static int
376check_bug_trap(struct pt_regs *regs)
377{
378 struct bug_entry *bug;
379 unsigned long addr;
380
381 if (regs->msr & MSR_PR)
382 return 0; /* not in kernel */
383 addr = regs->nip; /* address of trap instruction */
384 if (addr < PAGE_OFFSET)
385 return 0;
386 bug = find_bug(regs->nip);
387 if (bug == NULL)
388 return 0;
389 if (bug->line & BUG_WARNING_TRAP) {
390 /* this is a WARN_ON rather than BUG/BUG_ON */
391 printk(KERN_ERR "Badness in %s at %s:%d\n",
392 bug->function, bug->file,
393 (unsigned int)bug->line & ~BUG_WARNING_TRAP);
394 show_stack(current, (void *)regs->gpr[1]);
395 return 1;
396 }
397 printk(KERN_CRIT "kernel BUG in %s at %s:%d!\n",
398 bug->function, bug->file, (unsigned int)bug->line);
399 return 0;
400}
401
402void __kprobes program_check_exception(struct pt_regs *regs)
403{
404 if (debugger_fault_handler(regs))
405 return;
406
407 if (regs->msr & 0x100000) {
408 /* IEEE FP exception */
409 parse_fpe(regs);
410 } else if (regs->msr & 0x20000) {
411 /* trap exception */
412
413 if (notify_die(DIE_BPT, "breakpoint", regs, 5,
414 5, SIGTRAP) == NOTIFY_STOP)
415 return;
416 if (debugger_bpt(regs))
417 return;
418
419 if (check_bug_trap(regs)) {
420 regs->nip += 4;
421 return;
422 }
423 _exception(SIGTRAP, regs, TRAP_BRKPT, regs->nip);
424
425 } else {
426 /* Privileged or illegal instruction; try to emulate it. */
427 switch (emulate_instruction(regs)) {
428 case 0:
429 regs->nip += 4;
430 emulate_single_step(regs);
431 break;
432
433 case -EFAULT:
434 _exception(SIGSEGV, regs, SEGV_MAPERR, regs->nip);
435 break;
436
437 default:
438 if (regs->msr & 0x40000)
439 /* priveleged */
440 _exception(SIGILL, regs, ILL_PRVOPC, regs->nip);
441 else
442 /* illegal */
443 _exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
444 break;
445 }
446 }
447}
448
449void kernel_fp_unavailable_exception(struct pt_regs *regs)
450{
451 printk(KERN_EMERG "Unrecoverable FP Unavailable Exception "
452 "%lx at %lx\n", regs->trap, regs->nip);
453 die("Unrecoverable FP Unavailable Exception", regs, SIGABRT);
454}
455
456void altivec_unavailable_exception(struct pt_regs *regs)
457{
458 if (user_mode(regs)) {
459 /* A user program has executed an altivec instruction,
460 but this kernel doesn't support altivec. */
461 _exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
462 return;
463 }
464 printk(KERN_EMERG "Unrecoverable VMX/Altivec Unavailable Exception "
465 "%lx at %lx\n", regs->trap, regs->nip);
466 die("Unrecoverable VMX/Altivec Unavailable Exception", regs, SIGABRT);
467}
468
469extern perf_irq_t perf_irq;
470
471void performance_monitor_exception(struct pt_regs *regs)
472{
473 perf_irq(regs);
474}
475
476void alignment_exception(struct pt_regs *regs)
477{
478 int fixed;
479
480 fixed = fix_alignment(regs);
481
482 if (fixed == 1) {
483 regs->nip += 4; /* skip over emulated instruction */
484 emulate_single_step(regs);
485 return;
486 }
487
488 /* Operand address was bad */
489 if (fixed == -EFAULT) {
490 if (user_mode(regs)) {
491 _exception(SIGSEGV, regs, SEGV_MAPERR, regs->dar);
492 } else {
493 /* Search exception table */
494 bad_page_fault(regs, regs->dar, SIGSEGV);
495 }
496
497 return;
498 }
499
500 _exception(SIGBUS, regs, BUS_ADRALN, regs->nip);
501}
502
503#ifdef CONFIG_ALTIVEC
504void altivec_assist_exception(struct pt_regs *regs)
505{
506 int err;
507 siginfo_t info;
508
509 if (!user_mode(regs)) {
510 printk(KERN_EMERG "VMX/Altivec assist exception in kernel mode"
511 " at %lx\n", regs->nip);
512 die("Kernel VMX/Altivec assist exception", regs, SIGILL);
513 }
514
515 flush_altivec_to_thread(current);
516
517 err = emulate_altivec(regs);
518 if (err == 0) {
519 regs->nip += 4; /* skip emulated instruction */
520 emulate_single_step(regs);
521 return;
522 }
523
524 if (err == -EFAULT) {
525 /* got an error reading the instruction */
526 info.si_signo = SIGSEGV;
527 info.si_errno = 0;
528 info.si_code = SEGV_MAPERR;
529 info.si_addr = (void __user *) regs->nip;
530 force_sig_info(SIGSEGV, &info, current);
531 } else {
532 /* didn't recognize the instruction */
533 /* XXX quick hack for now: set the non-Java bit in the VSCR */
534 if (printk_ratelimit())
535 printk(KERN_ERR "Unrecognized altivec instruction "
536 "in %s at %lx\n", current->comm, regs->nip);
537 current->thread.vscr.u[3] |= 0x10000;
538 }
539}
540#endif /* CONFIG_ALTIVEC */
541
542/*
543 * We enter here if we get an unrecoverable exception, that is, one
544 * that happened at a point where the RI (recoverable interrupt) bit
545 * in the MSR is 0. This indicates that SRR0/1 are live, and that
546 * we therefore lost state by taking this exception.
547 */
548void unrecoverable_exception(struct pt_regs *regs)
549{
550 printk(KERN_EMERG "Unrecoverable exception %lx at %lx\n",
551 regs->trap, regs->nip);
552 die("Unrecoverable exception", regs, SIGABRT);
553}
554
555/*
556 * We enter here if we discover during exception entry that we are
557 * running in supervisor mode with a userspace value in the stack pointer.
558 */
559void kernel_bad_stack(struct pt_regs *regs)
560{
561 printk(KERN_EMERG "Bad kernel stack pointer %lx at %lx\n",
562 regs->gpr[1], regs->nip);
563 die("Bad kernel stack pointer", regs, SIGABRT);
564}
565
566void __init trap_init(void)
567{
568}
diff --git a/arch/ppc64/kernel/u3_iommu.c b/arch/ppc64/kernel/u3_iommu.c
deleted file mode 100644
index 41ea09cb9ac7..000000000000
--- a/arch/ppc64/kernel/u3_iommu.c
+++ /dev/null
@@ -1,349 +0,0 @@
1/*
2 * arch/ppc64/kernel/u3_iommu.c
3 *
4 * Copyright (C) 2004 Olof Johansson <olof@austin.ibm.com>, IBM Corporation
5 *
6 * Based on pSeries_iommu.c:
7 * Copyright (C) 2001 Mike Corrigan & Dave Engebretsen, IBM Corporation
8 * Copyright (C) 2004 Olof Johansson <olof@austin.ibm.com>, IBM Corporation
9 *
10 * Dynamic DMA mapping support, Apple U3 & IBM CPC925 "DART" iommu.
11 *
12 *
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License as published by
15 * the Free Software Foundation; either version 2 of the License, or
16 * (at your option) any later version.
17 *
18 * This program is distributed in the hope that it will be useful,
19 * but WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 * GNU General Public License for more details.
22 *
23 * You should have received a copy of the GNU General Public License
24 * along with this program; if not, write to the Free Software
25 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
26 */
27
28#include <linux/config.h>
29#include <linux/init.h>
30#include <linux/types.h>
31#include <linux/slab.h>
32#include <linux/mm.h>
33#include <linux/spinlock.h>
34#include <linux/string.h>
35#include <linux/pci.h>
36#include <linux/dma-mapping.h>
37#include <linux/vmalloc.h>
38#include <asm/io.h>
39#include <asm/prom.h>
40#include <asm/ppcdebug.h>
41#include <asm/iommu.h>
42#include <asm/pci-bridge.h>
43#include <asm/machdep.h>
44#include <asm/abs_addr.h>
45#include <asm/cacheflush.h>
46#include <asm/lmb.h>
47
48#include "pci.h"
49
50extern int iommu_force_on;
51
52/* physical base of DART registers */
53#define DART_BASE 0xf8033000UL
54
55/* Offset from base to control register */
56#define DARTCNTL 0
57/* Offset from base to exception register */
58#define DARTEXCP 0x10
59/* Offset from base to TLB tag registers */
60#define DARTTAG 0x1000
61
62
63/* Control Register fields */
64
65/* base address of table (pfn) */
66#define DARTCNTL_BASE_MASK 0xfffff
67#define DARTCNTL_BASE_SHIFT 12
68
69#define DARTCNTL_FLUSHTLB 0x400
70#define DARTCNTL_ENABLE 0x200
71
72/* size of table in pages */
73#define DARTCNTL_SIZE_MASK 0x1ff
74#define DARTCNTL_SIZE_SHIFT 0
75
76/* DART table fields */
77#define DARTMAP_VALID 0x80000000
78#define DARTMAP_RPNMASK 0x00ffffff
79
80/* Physical base address and size of the DART table */
81unsigned long dart_tablebase; /* exported to htab_initialize */
82static unsigned long dart_tablesize;
83
84/* Virtual base address of the DART table */
85static u32 *dart_vbase;
86
87/* Mapped base address for the dart */
88static unsigned int *dart;
89
90/* Dummy val that entries are set to when unused */
91static unsigned int dart_emptyval;
92
93static struct iommu_table iommu_table_u3;
94static int iommu_table_u3_inited;
95static int dart_dirty;
96
97#define DBG(...)
98
99static inline void dart_tlb_invalidate_all(void)
100{
101 unsigned long l = 0;
102 unsigned int reg;
103 unsigned long limit;
104
105 DBG("dart: flush\n");
106
107 /* To invalidate the DART, set the DARTCNTL_FLUSHTLB bit in the
108 * control register and wait for it to clear.
109 *
110 * Gotcha: Sometimes, the DART won't detect that the bit gets
111 * set. If so, clear it and set it again.
112 */
113
114 limit = 0;
115
116retry:
117 reg = in_be32((unsigned int *)dart+DARTCNTL);
118 reg |= DARTCNTL_FLUSHTLB;
119 out_be32((unsigned int *)dart+DARTCNTL, reg);
120
121 l = 0;
122 while ((in_be32((unsigned int *)dart+DARTCNTL) & DARTCNTL_FLUSHTLB) &&
123 l < (1L<<limit)) {
124 l++;
125 }
126 if (l == (1L<<limit)) {
127 if (limit < 4) {
128 limit++;
129 reg = in_be32((unsigned int *)dart+DARTCNTL);
130 reg &= ~DARTCNTL_FLUSHTLB;
131 out_be32((unsigned int *)dart+DARTCNTL, reg);
132 goto retry;
133 } else
134 panic("U3-DART: TLB did not flush after waiting a long "
135 "time. Buggy U3 ?");
136 }
137}
138
139static void dart_flush(struct iommu_table *tbl)
140{
141 if (dart_dirty)
142 dart_tlb_invalidate_all();
143 dart_dirty = 0;
144}
145
146static void dart_build(struct iommu_table *tbl, long index,
147 long npages, unsigned long uaddr,
148 enum dma_data_direction direction)
149{
150 unsigned int *dp;
151 unsigned int rpn;
152
153 DBG("dart: build at: %lx, %lx, addr: %x\n", index, npages, uaddr);
154
155 dp = ((unsigned int*)tbl->it_base) + index;
156
157 /* On U3, all memory is contigous, so we can move this
158 * out of the loop.
159 */
160 while (npages--) {
161 rpn = virt_to_abs(uaddr) >> PAGE_SHIFT;
162
163 *(dp++) = DARTMAP_VALID | (rpn & DARTMAP_RPNMASK);
164
165 rpn++;
166 uaddr += PAGE_SIZE;
167 }
168
169 dart_dirty = 1;
170}
171
172
173static void dart_free(struct iommu_table *tbl, long index, long npages)
174{
175 unsigned int *dp;
176
177 /* We don't worry about flushing the TLB cache. The only drawback of
178 * not doing it is that we won't catch buggy device drivers doing
179 * bad DMAs, but then no 32-bit architecture ever does either.
180 */
181
182 DBG("dart: free at: %lx, %lx\n", index, npages);
183
184 dp = ((unsigned int *)tbl->it_base) + index;
185
186 while (npages--)
187 *(dp++) = dart_emptyval;
188}
189
190
191static int dart_init(struct device_node *dart_node)
192{
193 unsigned int regword;
194 unsigned int i;
195 unsigned long tmp;
196
197 if (dart_tablebase == 0 || dart_tablesize == 0) {
198 printk(KERN_INFO "U3-DART: table not allocated, using direct DMA\n");
199 return -ENODEV;
200 }
201
202 /* Make sure nothing from the DART range remains in the CPU cache
203 * from a previous mapping that existed before the kernel took
204 * over
205 */
206 flush_dcache_phys_range(dart_tablebase, dart_tablebase + dart_tablesize);
207
208 /* Allocate a spare page to map all invalid DART pages. We need to do
209 * that to work around what looks like a problem with the HT bridge
210 * prefetching into invalid pages and corrupting data
211 */
212 tmp = lmb_alloc(PAGE_SIZE, PAGE_SIZE);
213 if (!tmp)
214 panic("U3-DART: Cannot allocate spare page!");
215 dart_emptyval = DARTMAP_VALID | ((tmp >> PAGE_SHIFT) & DARTMAP_RPNMASK);
216
217 /* Map in DART registers. FIXME: Use device node to get base address */
218 dart = ioremap(DART_BASE, 0x7000);
219 if (dart == NULL)
220 panic("U3-DART: Cannot map registers!");
221
222 /* Set initial control register contents: table base,
223 * table size and enable bit
224 */
225 regword = DARTCNTL_ENABLE |
226 ((dart_tablebase >> PAGE_SHIFT) << DARTCNTL_BASE_SHIFT) |
227 (((dart_tablesize >> PAGE_SHIFT) & DARTCNTL_SIZE_MASK)
228 << DARTCNTL_SIZE_SHIFT);
229 dart_vbase = ioremap(virt_to_abs(dart_tablebase), dart_tablesize);
230
231 /* Fill initial table */
232 for (i = 0; i < dart_tablesize/4; i++)
233 dart_vbase[i] = dart_emptyval;
234
235 /* Initialize DART with table base and enable it. */
236 out_be32((unsigned int *)dart, regword);
237
238 /* Invalidate DART to get rid of possible stale TLBs */
239 dart_tlb_invalidate_all();
240
241 printk(KERN_INFO "U3/CPC925 DART IOMMU initialized\n");
242
243 return 0;
244}
245
246static void iommu_table_u3_setup(void)
247{
248 iommu_table_u3.it_busno = 0;
249 iommu_table_u3.it_offset = 0;
250 /* it_size is in number of entries */
251 iommu_table_u3.it_size = dart_tablesize / sizeof(u32);
252
253 /* Initialize the common IOMMU code */
254 iommu_table_u3.it_base = (unsigned long)dart_vbase;
255 iommu_table_u3.it_index = 0;
256 iommu_table_u3.it_blocksize = 1;
257 iommu_init_table(&iommu_table_u3);
258
259 /* Reserve the last page of the DART to avoid possible prefetch
260 * past the DART mapped area
261 */
262 set_bit(iommu_table_u3.it_size - 1, iommu_table_u3.it_map);
263}
264
265static void iommu_dev_setup_u3(struct pci_dev *dev)
266{
267 struct device_node *dn;
268
269 /* We only have one iommu table on the mac for now, which makes
270 * things simple. Setup all PCI devices to point to this table
271 *
272 * We must use pci_device_to_OF_node() to make sure that
273 * we get the real "final" pointer to the device in the
274 * pci_dev sysdata and not the temporary PHB one
275 */
276 dn = pci_device_to_OF_node(dev);
277
278 if (dn)
279 PCI_DN(dn)->iommu_table = &iommu_table_u3;
280}
281
282static void iommu_bus_setup_u3(struct pci_bus *bus)
283{
284 struct device_node *dn;
285
286 if (!iommu_table_u3_inited) {
287 iommu_table_u3_inited = 1;
288 iommu_table_u3_setup();
289 }
290
291 dn = pci_bus_to_OF_node(bus);
292
293 if (dn)
294 PCI_DN(dn)->iommu_table = &iommu_table_u3;
295}
296
297static void iommu_dev_setup_null(struct pci_dev *dev) { }
298static void iommu_bus_setup_null(struct pci_bus *bus) { }
299
300void iommu_init_early_u3(void)
301{
302 struct device_node *dn;
303
304 /* Find the DART in the device-tree */
305 dn = of_find_compatible_node(NULL, "dart", "u3-dart");
306 if (dn == NULL)
307 return;
308
309 /* Setup low level TCE operations for the core IOMMU code */
310 ppc_md.tce_build = dart_build;
311 ppc_md.tce_free = dart_free;
312 ppc_md.tce_flush = dart_flush;
313
314 /* Initialize the DART HW */
315 if (dart_init(dn)) {
316 /* If init failed, use direct iommu and null setup functions */
317 ppc_md.iommu_dev_setup = iommu_dev_setup_null;
318 ppc_md.iommu_bus_setup = iommu_bus_setup_null;
319
320 /* Setup pci_dma ops */
321 pci_direct_iommu_init();
322 } else {
323 ppc_md.iommu_dev_setup = iommu_dev_setup_u3;
324 ppc_md.iommu_bus_setup = iommu_bus_setup_u3;
325
326 /* Setup pci_dma ops */
327 pci_iommu_init();
328 }
329}
330
331
332void __init alloc_u3_dart_table(void)
333{
334 /* Only reserve DART space if machine has more than 2GB of RAM
335 * or if requested with iommu=on on cmdline.
336 */
337 if (lmb_end_of_DRAM() <= 0x80000000ull && !iommu_force_on)
338 return;
339
340 /* 512 pages (2MB) is max DART tablesize. */
341 dart_tablesize = 1UL << 21;
342 /* 16MB (1 << 24) alignment. We allocate a full 16Mb chuck since we
343 * will blow up an entire large page anyway in the kernel mapping
344 */
345 dart_tablebase = (unsigned long)
346 abs_to_virt(lmb_alloc_base(1UL<<24, 1UL<<24, 0x80000000L));
347
348 printk(KERN_INFO "U3-DART allocated at: %lx\n", dart_tablebase);
349}
diff --git a/arch/ppc64/kernel/vdso64/sigtramp.S b/arch/ppc64/kernel/vdso64/sigtramp.S
index 8ae8f205e470..31b604ab56de 100644
--- a/arch/ppc64/kernel/vdso64/sigtramp.S
+++ b/arch/ppc64/kernel/vdso64/sigtramp.S
@@ -15,6 +15,7 @@
15#include <asm/ppc_asm.h> 15#include <asm/ppc_asm.h>
16#include <asm/unistd.h> 16#include <asm/unistd.h>
17#include <asm/vdso.h> 17#include <asm/vdso.h>
18#include <asm/ptrace.h> /* XXX for __SIGNAL_FRAMESIZE */
18 19
19 .text 20 .text
20 21
diff --git a/arch/ppc64/kernel/vecemu.c b/arch/ppc64/kernel/vecemu.c
deleted file mode 100644
index cb207629f21f..000000000000
--- a/arch/ppc64/kernel/vecemu.c
+++ /dev/null
@@ -1,346 +0,0 @@
1/*
2 * Routines to emulate some Altivec/VMX instructions, specifically
3 * those that can trap when given denormalized operands in Java mode.
4 */
5#include <linux/kernel.h>
6#include <linux/errno.h>
7#include <linux/sched.h>
8#include <asm/ptrace.h>
9#include <asm/processor.h>
10#include <asm/uaccess.h>
11
12/* Functions in vector.S */
13extern void vaddfp(vector128 *dst, vector128 *a, vector128 *b);
14extern void vsubfp(vector128 *dst, vector128 *a, vector128 *b);
15extern void vmaddfp(vector128 *dst, vector128 *a, vector128 *b, vector128 *c);
16extern void vnmsubfp(vector128 *dst, vector128 *a, vector128 *b, vector128 *c);
17extern void vrefp(vector128 *dst, vector128 *src);
18extern void vrsqrtefp(vector128 *dst, vector128 *src);
19extern void vexptep(vector128 *dst, vector128 *src);
20
21static unsigned int exp2s[8] = {
22 0x800000,
23 0x8b95c2,
24 0x9837f0,
25 0xa5fed7,
26 0xb504f3,
27 0xc5672a,
28 0xd744fd,
29 0xeac0c7
30};
31
32/*
33 * Computes an estimate of 2^x. The `s' argument is the 32-bit
34 * single-precision floating-point representation of x.
35 */
36static unsigned int eexp2(unsigned int s)
37{
38 int exp, pwr;
39 unsigned int mant, frac;
40
41 /* extract exponent field from input */
42 exp = ((s >> 23) & 0xff) - 127;
43 if (exp > 7) {
44 /* check for NaN input */
45 if (exp == 128 && (s & 0x7fffff) != 0)
46 return s | 0x400000; /* return QNaN */
47 /* 2^-big = 0, 2^+big = +Inf */
48 return (s & 0x80000000)? 0: 0x7f800000; /* 0 or +Inf */
49 }
50 if (exp < -23)
51 return 0x3f800000; /* 1.0 */
52
53 /* convert to fixed point integer in 9.23 representation */
54 pwr = (s & 0x7fffff) | 0x800000;
55 if (exp > 0)
56 pwr <<= exp;
57 else
58 pwr >>= -exp;
59 if (s & 0x80000000)
60 pwr = -pwr;
61
62 /* extract integer part, which becomes exponent part of result */
63 exp = (pwr >> 23) + 126;
64 if (exp >= 254)
65 return 0x7f800000;
66 if (exp < -23)
67 return 0;
68
69 /* table lookup on top 3 bits of fraction to get mantissa */
70 mant = exp2s[(pwr >> 20) & 7];
71
72 /* linear interpolation using remaining 20 bits of fraction */
73 asm("mulhwu %0,%1,%2" : "=r" (frac)
74 : "r" (pwr << 12), "r" (0x172b83ff));
75 asm("mulhwu %0,%1,%2" : "=r" (frac) : "r" (frac), "r" (mant));
76 mant += frac;
77
78 if (exp >= 0)
79 return mant + (exp << 23);
80
81 /* denormalized result */
82 exp = -exp;
83 mant += 1 << (exp - 1);
84 return mant >> exp;
85}
86
87/*
88 * Computes an estimate of log_2(x). The `s' argument is the 32-bit
89 * single-precision floating-point representation of x.
90 */
91static unsigned int elog2(unsigned int s)
92{
93 int exp, mant, lz, frac;
94
95 exp = s & 0x7f800000;
96 mant = s & 0x7fffff;
97 if (exp == 0x7f800000) { /* Inf or NaN */
98 if (mant != 0)
99 s |= 0x400000; /* turn NaN into QNaN */
100 return s;
101 }
102 if ((exp | mant) == 0) /* +0 or -0 */
103 return 0xff800000; /* return -Inf */
104
105 if (exp == 0) {
106 /* denormalized */
107 asm("cntlzw %0,%1" : "=r" (lz) : "r" (mant));
108 mant <<= lz - 8;
109 exp = (-118 - lz) << 23;
110 } else {
111 mant |= 0x800000;
112 exp -= 127 << 23;
113 }
114
115 if (mant >= 0xb504f3) { /* 2^0.5 * 2^23 */
116 exp |= 0x400000; /* 0.5 * 2^23 */
117 asm("mulhwu %0,%1,%2" : "=r" (mant)
118 : "r" (mant), "r" (0xb504f334)); /* 2^-0.5 * 2^32 */
119 }
120 if (mant >= 0x9837f0) { /* 2^0.25 * 2^23 */
121 exp |= 0x200000; /* 0.25 * 2^23 */
122 asm("mulhwu %0,%1,%2" : "=r" (mant)
123 : "r" (mant), "r" (0xd744fccb)); /* 2^-0.25 * 2^32 */
124 }
125 if (mant >= 0x8b95c2) { /* 2^0.125 * 2^23 */
126 exp |= 0x100000; /* 0.125 * 2^23 */
127 asm("mulhwu %0,%1,%2" : "=r" (mant)
128 : "r" (mant), "r" (0xeac0c6e8)); /* 2^-0.125 * 2^32 */
129 }
130 if (mant > 0x800000) { /* 1.0 * 2^23 */
131 /* calculate (mant - 1) * 1.381097463 */
132 /* 1.381097463 == 0.125 / (2^0.125 - 1) */
133 asm("mulhwu %0,%1,%2" : "=r" (frac)
134 : "r" ((mant - 0x800000) << 1), "r" (0xb0c7cd3a));
135 exp += frac;
136 }
137 s = exp & 0x80000000;
138 if (exp != 0) {
139 if (s)
140 exp = -exp;
141 asm("cntlzw %0,%1" : "=r" (lz) : "r" (exp));
142 lz = 8 - lz;
143 if (lz > 0)
144 exp >>= lz;
145 else if (lz < 0)
146 exp <<= -lz;
147 s += ((lz + 126) << 23) + exp;
148 }
149 return s;
150}
151
152#define VSCR_SAT 1
153
154static int ctsxs(unsigned int x, int scale, unsigned int *vscrp)
155{
156 int exp, mant;
157
158 exp = (x >> 23) & 0xff;
159 mant = x & 0x7fffff;
160 if (exp == 255 && mant != 0)
161 return 0; /* NaN -> 0 */
162 exp = exp - 127 + scale;
163 if (exp < 0)
164 return 0; /* round towards zero */
165 if (exp >= 31) {
166 /* saturate, unless the result would be -2^31 */
167 if (x + (scale << 23) != 0xcf000000)
168 *vscrp |= VSCR_SAT;
169 return (x & 0x80000000)? 0x80000000: 0x7fffffff;
170 }
171 mant |= 0x800000;
172 mant = (mant << 7) >> (30 - exp);
173 return (x & 0x80000000)? -mant: mant;
174}
175
176static unsigned int ctuxs(unsigned int x, int scale, unsigned int *vscrp)
177{
178 int exp;
179 unsigned int mant;
180
181 exp = (x >> 23) & 0xff;
182 mant = x & 0x7fffff;
183 if (exp == 255 && mant != 0)
184 return 0; /* NaN -> 0 */
185 exp = exp - 127 + scale;
186 if (exp < 0)
187 return 0; /* round towards zero */
188 if (x & 0x80000000) {
189 /* negative => saturate to 0 */
190 *vscrp |= VSCR_SAT;
191 return 0;
192 }
193 if (exp >= 32) {
194 /* saturate */
195 *vscrp |= VSCR_SAT;
196 return 0xffffffff;
197 }
198 mant |= 0x800000;
199 mant = (mant << 8) >> (31 - exp);
200 return mant;
201}
202
203/* Round to floating integer, towards 0 */
204static unsigned int rfiz(unsigned int x)
205{
206 int exp;
207
208 exp = ((x >> 23) & 0xff) - 127;
209 if (exp == 128 && (x & 0x7fffff) != 0)
210 return x | 0x400000; /* NaN -> make it a QNaN */
211 if (exp >= 23)
212 return x; /* it's an integer already (or Inf) */
213 if (exp < 0)
214 return x & 0x80000000; /* |x| < 1.0 rounds to 0 */
215 return x & ~(0x7fffff >> exp);
216}
217
218/* Round to floating integer, towards +/- Inf */
219static unsigned int rfii(unsigned int x)
220{
221 int exp, mask;
222
223 exp = ((x >> 23) & 0xff) - 127;
224 if (exp == 128 && (x & 0x7fffff) != 0)
225 return x | 0x400000; /* NaN -> make it a QNaN */
226 if (exp >= 23)
227 return x; /* it's an integer already (or Inf) */
228 if ((x & 0x7fffffff) == 0)
229 return x; /* +/-0 -> +/-0 */
230 if (exp < 0)
231 /* 0 < |x| < 1.0 rounds to +/- 1.0 */
232 return (x & 0x80000000) | 0x3f800000;
233 mask = 0x7fffff >> exp;
234 /* mantissa overflows into exponent - that's OK,
235 it can't overflow into the sign bit */
236 return (x + mask) & ~mask;
237}
238
239/* Round to floating integer, to nearest */
240static unsigned int rfin(unsigned int x)
241{
242 int exp, half;
243
244 exp = ((x >> 23) & 0xff) - 127;
245 if (exp == 128 && (x & 0x7fffff) != 0)
246 return x | 0x400000; /* NaN -> make it a QNaN */
247 if (exp >= 23)
248 return x; /* it's an integer already (or Inf) */
249 if (exp < -1)
250 return x & 0x80000000; /* |x| < 0.5 -> +/-0 */
251 if (exp == -1)
252 /* 0.5 <= |x| < 1.0 rounds to +/- 1.0 */
253 return (x & 0x80000000) | 0x3f800000;
254 half = 0x400000 >> exp;
255 /* add 0.5 to the magnitude and chop off the fraction bits */
256 return (x + half) & ~(0x7fffff >> exp);
257}
258
259int
260emulate_altivec(struct pt_regs *regs)
261{
262 unsigned int instr, i;
263 unsigned int va, vb, vc, vd;
264 vector128 *vrs;
265
266 if (get_user(instr, (unsigned int __user *) regs->nip))
267 return -EFAULT;
268 if ((instr >> 26) != 4)
269 return -EINVAL; /* not an altivec instruction */
270 vd = (instr >> 21) & 0x1f;
271 va = (instr >> 16) & 0x1f;
272 vb = (instr >> 11) & 0x1f;
273 vc = (instr >> 6) & 0x1f;
274
275 vrs = current->thread.vr;
276 switch (instr & 0x3f) {
277 case 10:
278 switch (vc) {
279 case 0: /* vaddfp */
280 vaddfp(&vrs[vd], &vrs[va], &vrs[vb]);
281 break;
282 case 1: /* vsubfp */
283 vsubfp(&vrs[vd], &vrs[va], &vrs[vb]);
284 break;
285 case 4: /* vrefp */
286 vrefp(&vrs[vd], &vrs[vb]);
287 break;
288 case 5: /* vrsqrtefp */
289 vrsqrtefp(&vrs[vd], &vrs[vb]);
290 break;
291 case 6: /* vexptefp */
292 for (i = 0; i < 4; ++i)
293 vrs[vd].u[i] = eexp2(vrs[vb].u[i]);
294 break;
295 case 7: /* vlogefp */
296 for (i = 0; i < 4; ++i)
297 vrs[vd].u[i] = elog2(vrs[vb].u[i]);
298 break;
299 case 8: /* vrfin */
300 for (i = 0; i < 4; ++i)
301 vrs[vd].u[i] = rfin(vrs[vb].u[i]);
302 break;
303 case 9: /* vrfiz */
304 for (i = 0; i < 4; ++i)
305 vrs[vd].u[i] = rfiz(vrs[vb].u[i]);
306 break;
307 case 10: /* vrfip */
308 for (i = 0; i < 4; ++i) {
309 u32 x = vrs[vb].u[i];
310 x = (x & 0x80000000)? rfiz(x): rfii(x);
311 vrs[vd].u[i] = x;
312 }
313 break;
314 case 11: /* vrfim */
315 for (i = 0; i < 4; ++i) {
316 u32 x = vrs[vb].u[i];
317 x = (x & 0x80000000)? rfii(x): rfiz(x);
318 vrs[vd].u[i] = x;
319 }
320 break;
321 case 14: /* vctuxs */
322 for (i = 0; i < 4; ++i)
323 vrs[vd].u[i] = ctuxs(vrs[vb].u[i], va,
324 &current->thread.vscr.u[3]);
325 break;
326 case 15: /* vctsxs */
327 for (i = 0; i < 4; ++i)
328 vrs[vd].u[i] = ctsxs(vrs[vb].u[i], va,
329 &current->thread.vscr.u[3]);
330 break;
331 default:
332 return -EINVAL;
333 }
334 break;
335 case 46: /* vmaddfp */
336 vmaddfp(&vrs[vd], &vrs[va], &vrs[vb], &vrs[vc]);
337 break;
338 case 47: /* vnmsubfp */
339 vnmsubfp(&vrs[vd], &vrs[va], &vrs[vb], &vrs[vc]);
340 break;
341 default:
342 return -EINVAL;
343 }
344
345 return 0;
346}
diff --git a/arch/ppc64/kernel/vector.S b/arch/ppc64/kernel/vector.S
deleted file mode 100644
index b79d33e4001e..000000000000
--- a/arch/ppc64/kernel/vector.S
+++ /dev/null
@@ -1,172 +0,0 @@
1#include <asm/ppc_asm.h>
2#include <asm/processor.h>
3
4/*
5 * The routines below are in assembler so we can closely control the
6 * usage of floating-point registers. These routines must be called
7 * with preempt disabled.
8 */
9 .section ".toc","aw"
10fpzero:
11 .tc FD_0_0[TC],0
12fpone:
13 .tc FD_3ff00000_0[TC],0x3ff0000000000000 /* 1.0 */
14fphalf:
15 .tc FD_3fe00000_0[TC],0x3fe0000000000000 /* 0.5 */
16
17 .text
18/*
19 * Internal routine to enable floating point and set FPSCR to 0.
20 * Don't call it from C; it doesn't use the normal calling convention.
21 */
22fpenable:
23 mfmsr r10
24 ori r11,r10,MSR_FP
25 mtmsr r11
26 isync
27 stfd fr31,-8(r1)
28 stfd fr0,-16(r1)
29 stfd fr1,-24(r1)
30 mffs fr31
31 lfd fr1,fpzero@toc(r2)
32 mtfsf 0xff,fr1
33 blr
34
35fpdisable:
36 mtlr r12
37 mtfsf 0xff,fr31
38 lfd fr1,-24(r1)
39 lfd fr0,-16(r1)
40 lfd fr31,-8(r1)
41 mtmsr r10
42 isync
43 blr
44
45/*
46 * Vector add, floating point.
47 */
48_GLOBAL(vaddfp)
49 mflr r12
50 bl fpenable
51 li r0,4
52 mtctr r0
53 li r6,0
541: lfsx fr0,r4,r6
55 lfsx fr1,r5,r6
56 fadds fr0,fr0,fr1
57 stfsx fr0,r3,r6
58 addi r6,r6,4
59 bdnz 1b
60 b fpdisable
61
62/*
63 * Vector subtract, floating point.
64 */
65_GLOBAL(vsubfp)
66 mflr r12
67 bl fpenable
68 li r0,4
69 mtctr r0
70 li r6,0
711: lfsx fr0,r4,r6
72 lfsx fr1,r5,r6
73 fsubs fr0,fr0,fr1
74 stfsx fr0,r3,r6
75 addi r6,r6,4
76 bdnz 1b
77 b fpdisable
78
79/*
80 * Vector multiply and add, floating point.
81 */
82_GLOBAL(vmaddfp)
83 mflr r12
84 bl fpenable
85 stfd fr2,-32(r1)
86 li r0,4
87 mtctr r0
88 li r7,0
891: lfsx fr0,r4,r7
90 lfsx fr1,r5,r7
91 lfsx fr2,r6,r7
92 fmadds fr0,fr0,fr2,fr1
93 stfsx fr0,r3,r7
94 addi r7,r7,4
95 bdnz 1b
96 lfd fr2,-32(r1)
97 b fpdisable
98
99/*
100 * Vector negative multiply and subtract, floating point.
101 */
102_GLOBAL(vnmsubfp)
103 mflr r12
104 bl fpenable
105 stfd fr2,-32(r1)
106 li r0,4
107 mtctr r0
108 li r7,0
1091: lfsx fr0,r4,r7
110 lfsx fr1,r5,r7
111 lfsx fr2,r6,r7
112 fnmsubs fr0,fr0,fr2,fr1
113 stfsx fr0,r3,r7
114 addi r7,r7,4
115 bdnz 1b
116 lfd fr2,-32(r1)
117 b fpdisable
118
119/*
120 * Vector reciprocal estimate. We just compute 1.0/x.
121 * r3 -> destination, r4 -> source.
122 */
123_GLOBAL(vrefp)
124 mflr r12
125 bl fpenable
126 li r0,4
127 lfd fr1,fpone@toc(r2)
128 mtctr r0
129 li r6,0
1301: lfsx fr0,r4,r6
131 fdivs fr0,fr1,fr0
132 stfsx fr0,r3,r6
133 addi r6,r6,4
134 bdnz 1b
135 b fpdisable
136
137/*
138 * Vector reciprocal square-root estimate, floating point.
139 * We use the frsqrte instruction for the initial estimate followed
140 * by 2 iterations of Newton-Raphson to get sufficient accuracy.
141 * r3 -> destination, r4 -> source.
142 */
143_GLOBAL(vrsqrtefp)
144 mflr r12
145 bl fpenable
146 stfd fr2,-32(r1)
147 stfd fr3,-40(r1)
148 stfd fr4,-48(r1)
149 stfd fr5,-56(r1)
150 li r0,4
151 lfd fr4,fpone@toc(r2)
152 lfd fr5,fphalf@toc(r2)
153 mtctr r0
154 li r6,0
1551: lfsx fr0,r4,r6
156 frsqrte fr1,fr0 /* r = frsqrte(s) */
157 fmuls fr3,fr1,fr0 /* r * s */
158 fmuls fr2,fr1,fr5 /* r * 0.5 */
159 fnmsubs fr3,fr1,fr3,fr4 /* 1 - s * r * r */
160 fmadds fr1,fr2,fr3,fr1 /* r = r + 0.5 * r * (1 - s * r * r) */
161 fmuls fr3,fr1,fr0 /* r * s */
162 fmuls fr2,fr1,fr5 /* r * 0.5 */
163 fnmsubs fr3,fr1,fr3,fr4 /* 1 - s * r * r */
164 fmadds fr1,fr2,fr3,fr1 /* r = r + 0.5 * r * (1 - s * r * r) */
165 stfsx fr1,r3,r6
166 addi r6,r6,4
167 bdnz 1b
168 lfd fr5,-56(r1)
169 lfd fr4,-48(r1)
170 lfd fr3,-40(r1)
171 lfd fr2,-32(r1)
172 b fpdisable
diff --git a/arch/ppc64/kernel/vio.c b/arch/ppc64/kernel/vio.c
deleted file mode 100644
index 0e555b7a6587..000000000000
--- a/arch/ppc64/kernel/vio.c
+++ /dev/null
@@ -1,261 +0,0 @@
1/*
2 * IBM PowerPC Virtual I/O Infrastructure Support.
3 *
4 * Copyright (c) 2003-2005 IBM Corp.
5 * Dave Engebretsen engebret@us.ibm.com
6 * Santiago Leon santil@us.ibm.com
7 * Hollis Blanchard <hollisb@us.ibm.com>
8 * Stephen Rothwell
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version
13 * 2 of the License, or (at your option) any later version.
14 */
15
16#include <linux/init.h>
17#include <linux/console.h>
18#include <linux/module.h>
19#include <linux/mm.h>
20#include <linux/dma-mapping.h>
21#include <asm/iommu.h>
22#include <asm/dma.h>
23#include <asm/vio.h>
24
25static const struct vio_device_id *vio_match_device(
26 const struct vio_device_id *, const struct vio_dev *);
27
28struct vio_dev vio_bus_device = { /* fake "parent" device */
29 .name = vio_bus_device.dev.bus_id,
30 .type = "",
31 .dev.bus_id = "vio",
32 .dev.bus = &vio_bus_type,
33};
34
35static struct vio_bus_ops vio_bus_ops;
36
37/*
38 * Convert from struct device to struct vio_dev and pass to driver.
39 * dev->driver has already been set by generic code because vio_bus_match
40 * succeeded.
41 */
42static int vio_bus_probe(struct device *dev)
43{
44 struct vio_dev *viodev = to_vio_dev(dev);
45 struct vio_driver *viodrv = to_vio_driver(dev->driver);
46 const struct vio_device_id *id;
47 int error = -ENODEV;
48
49 if (!viodrv->probe)
50 return error;
51
52 id = vio_match_device(viodrv->id_table, viodev);
53 if (id)
54 error = viodrv->probe(viodev, id);
55
56 return error;
57}
58
59/* convert from struct device to struct vio_dev and pass to driver. */
60static int vio_bus_remove(struct device *dev)
61{
62 struct vio_dev *viodev = to_vio_dev(dev);
63 struct vio_driver *viodrv = to_vio_driver(dev->driver);
64
65 if (viodrv->remove)
66 return viodrv->remove(viodev);
67
68 /* driver can't remove */
69 return 1;
70}
71
72/**
73 * vio_register_driver: - Register a new vio driver
74 * @drv: The vio_driver structure to be registered.
75 */
76int vio_register_driver(struct vio_driver *viodrv)
77{
78 printk(KERN_DEBUG "%s: driver %s registering\n", __FUNCTION__,
79 viodrv->name);
80
81 /* fill in 'struct driver' fields */
82 viodrv->driver.name = viodrv->name;
83 viodrv->driver.bus = &vio_bus_type;
84 viodrv->driver.probe = vio_bus_probe;
85 viodrv->driver.remove = vio_bus_remove;
86
87 return driver_register(&viodrv->driver);
88}
89EXPORT_SYMBOL(vio_register_driver);
90
91/**
92 * vio_unregister_driver - Remove registration of vio driver.
93 * @driver: The vio_driver struct to be removed form registration
94 */
95void vio_unregister_driver(struct vio_driver *viodrv)
96{
97 driver_unregister(&viodrv->driver);
98}
99EXPORT_SYMBOL(vio_unregister_driver);
100
101/**
102 * vio_match_device: - Tell if a VIO device has a matching
103 * VIO device id structure.
104 * @ids: array of VIO device id structures to search in
105 * @dev: the VIO device structure to match against
106 *
107 * Used by a driver to check whether a VIO device present in the
108 * system is in its list of supported devices. Returns the matching
109 * vio_device_id structure or NULL if there is no match.
110 */
111static const struct vio_device_id *vio_match_device(
112 const struct vio_device_id *ids, const struct vio_dev *dev)
113{
114 while (ids->type[0] != '\0') {
115 if (vio_bus_ops.match(ids, dev))
116 return ids;
117 ids++;
118 }
119 return NULL;
120}
121
122/**
123 * vio_bus_init: - Initialize the virtual IO bus
124 */
125int __init vio_bus_init(struct vio_bus_ops *ops)
126{
127 int err;
128
129 vio_bus_ops = *ops;
130
131 err = bus_register(&vio_bus_type);
132 if (err) {
133 printk(KERN_ERR "failed to register VIO bus\n");
134 return err;
135 }
136
137 /*
138 * The fake parent of all vio devices, just to give us
139 * a nice directory
140 */
141 err = device_register(&vio_bus_device.dev);
142 if (err) {
143 printk(KERN_WARNING "%s: device_register returned %i\n",
144 __FUNCTION__, err);
145 return err;
146 }
147
148 return 0;
149}
150
151/* vio_dev refcount hit 0 */
152static void __devinit vio_dev_release(struct device *dev)
153{
154 if (vio_bus_ops.release_device)
155 vio_bus_ops.release_device(dev);
156 kfree(to_vio_dev(dev));
157}
158
159static ssize_t viodev_show_name(struct device *dev,
160 struct device_attribute *attr, char *buf)
161{
162 return sprintf(buf, "%s\n", to_vio_dev(dev)->name);
163}
164DEVICE_ATTR(name, S_IRUSR | S_IRGRP | S_IROTH, viodev_show_name, NULL);
165
166struct vio_dev * __devinit vio_register_device(struct vio_dev *viodev)
167{
168 /* init generic 'struct device' fields: */
169 viodev->dev.parent = &vio_bus_device.dev;
170 viodev->dev.bus = &vio_bus_type;
171 viodev->dev.release = vio_dev_release;
172
173 /* register with generic device framework */
174 if (device_register(&viodev->dev)) {
175 printk(KERN_ERR "%s: failed to register device %s\n",
176 __FUNCTION__, viodev->dev.bus_id);
177 return NULL;
178 }
179 device_create_file(&viodev->dev, &dev_attr_name);
180
181 return viodev;
182}
183
184void __devinit vio_unregister_device(struct vio_dev *viodev)
185{
186 if (vio_bus_ops.unregister_device)
187 vio_bus_ops.unregister_device(viodev);
188 device_remove_file(&viodev->dev, &dev_attr_name);
189 device_unregister(&viodev->dev);
190}
191EXPORT_SYMBOL(vio_unregister_device);
192
193static dma_addr_t vio_map_single(struct device *dev, void *vaddr,
194 size_t size, enum dma_data_direction direction)
195{
196 return iommu_map_single(to_vio_dev(dev)->iommu_table, vaddr, size,
197 direction);
198}
199
200static void vio_unmap_single(struct device *dev, dma_addr_t dma_handle,
201 size_t size, enum dma_data_direction direction)
202{
203 iommu_unmap_single(to_vio_dev(dev)->iommu_table, dma_handle, size,
204 direction);
205}
206
207static int vio_map_sg(struct device *dev, struct scatterlist *sglist,
208 int nelems, enum dma_data_direction direction)
209{
210 return iommu_map_sg(dev, to_vio_dev(dev)->iommu_table, sglist,
211 nelems, direction);
212}
213
214static void vio_unmap_sg(struct device *dev, struct scatterlist *sglist,
215 int nelems, enum dma_data_direction direction)
216{
217 iommu_unmap_sg(to_vio_dev(dev)->iommu_table, sglist, nelems, direction);
218}
219
220static void *vio_alloc_coherent(struct device *dev, size_t size,
221 dma_addr_t *dma_handle, gfp_t flag)
222{
223 return iommu_alloc_coherent(to_vio_dev(dev)->iommu_table, size,
224 dma_handle, flag);
225}
226
227static void vio_free_coherent(struct device *dev, size_t size,
228 void *vaddr, dma_addr_t dma_handle)
229{
230 iommu_free_coherent(to_vio_dev(dev)->iommu_table, size, vaddr,
231 dma_handle);
232}
233
234static int vio_dma_supported(struct device *dev, u64 mask)
235{
236 return 1;
237}
238
239struct dma_mapping_ops vio_dma_ops = {
240 .alloc_coherent = vio_alloc_coherent,
241 .free_coherent = vio_free_coherent,
242 .map_single = vio_map_single,
243 .unmap_single = vio_unmap_single,
244 .map_sg = vio_map_sg,
245 .unmap_sg = vio_unmap_sg,
246 .dma_supported = vio_dma_supported,
247};
248
249static int vio_bus_match(struct device *dev, struct device_driver *drv)
250{
251 const struct vio_dev *vio_dev = to_vio_dev(dev);
252 struct vio_driver *vio_drv = to_vio_driver(drv);
253 const struct vio_device_id *ids = vio_drv->id_table;
254
255 return (ids != NULL) && (vio_match_device(ids, vio_dev) != NULL);
256}
257
258struct bus_type vio_bus_type = {
259 .name = "vio",
260 .match = vio_bus_match,
261};
diff --git a/arch/ppc64/kernel/viopath.c b/arch/ppc64/kernel/viopath.c
deleted file mode 100644
index 2a6c4f01c45e..000000000000
--- a/arch/ppc64/kernel/viopath.c
+++ /dev/null
@@ -1,673 +0,0 @@
1/* -*- linux-c -*-
2 * arch/ppc64/kernel/viopath.c
3 *
4 * iSeries Virtual I/O Message Path code
5 *
6 * Authors: Dave Boutcher <boutcher@us.ibm.com>
7 * Ryan Arnold <ryanarn@us.ibm.com>
8 * Colin Devilbiss <devilbis@us.ibm.com>
9 *
10 * (C) Copyright 2000-2003 IBM Corporation
11 *
12 * This code is used by the iSeries virtual disk, cd,
13 * tape, and console to communicate with OS/400 in another
14 * partition.
15 *
16 * This program is free software; you can redistribute it and/or
17 * modify it under the terms of the GNU General Public License as
18 * published by the Free Software Foundation; either version 2 of the
19 * License, or (at your option) anyu later version.
20 *
21 * This program is distributed in the hope that it will be useful, but
22 * WITHOUT ANY WARRANTY; without even the implied warranty of
23 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
24 * General Public License for more details.
25 *
26 * You should have received a copy of the GNU General Public License
27 * along with this program; if not, write to the Free Software Foundation,
28 * Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
29 *
30 */
31#include <linux/module.h>
32#include <linux/kernel.h>
33#include <linux/errno.h>
34#include <linux/vmalloc.h>
35#include <linux/string.h>
36#include <linux/proc_fs.h>
37#include <linux/dma-mapping.h>
38#include <linux/wait.h>
39#include <linux/seq_file.h>
40#include <linux/smp_lock.h>
41#include <linux/interrupt.h>
42
43#include <asm/system.h>
44#include <asm/uaccess.h>
45#include <asm/iSeries/HvTypes.h>
46#include <asm/iSeries/ItExtVpdPanel.h>
47#include <asm/iSeries/HvLpEvent.h>
48#include <asm/iSeries/HvLpConfig.h>
49#include <asm/iSeries/mf.h>
50#include <asm/iSeries/vio.h>
51
52/* Status of the path to each other partition in the system.
53 * This is overkill, since we will only ever establish connections
54 * to our hosting partition and the primary partition on the system.
55 * But this allows for other support in the future.
56 */
57static struct viopathStatus {
58 int isOpen; /* Did we open the path? */
59 int isActive; /* Do we have a mon msg outstanding */
60 int users[VIO_MAX_SUBTYPES];
61 HvLpInstanceId mSourceInst;
62 HvLpInstanceId mTargetInst;
63 int numberAllocated;
64} viopathStatus[HVMAXARCHITECTEDLPS];
65
66static DEFINE_SPINLOCK(statuslock);
67
68/*
69 * For each kind of event we allocate a buffer that is
70 * guaranteed not to cross a page boundary
71 */
72static unsigned char event_buffer[VIO_MAX_SUBTYPES * 256] __page_aligned;
73static atomic_t event_buffer_available[VIO_MAX_SUBTYPES];
74static int event_buffer_initialised;
75
76static void handleMonitorEvent(struct HvLpEvent *event);
77
78/*
79 * We use this structure to handle asynchronous responses. The caller
80 * blocks on the semaphore and the handler posts the semaphore. However,
81 * if system_state is not SYSTEM_RUNNING, then wait_atomic is used ...
82 */
83struct alloc_parms {
84 struct semaphore sem;
85 int number;
86 atomic_t wait_atomic;
87 int used_wait_atomic;
88};
89
90/* Put a sequence number in each mon msg. The value is not
91 * important. Start at something other than 0 just for
92 * readability. wrapping this is ok.
93 */
94static u8 viomonseq = 22;
95
96/* Our hosting logical partition. We get this at startup
97 * time, and different modules access this variable directly.
98 */
99HvLpIndex viopath_hostLp = HvLpIndexInvalid;
100EXPORT_SYMBOL(viopath_hostLp);
101HvLpIndex viopath_ourLp = HvLpIndexInvalid;
102EXPORT_SYMBOL(viopath_ourLp);
103
104/* For each kind of incoming event we set a pointer to a
105 * routine to call.
106 */
107static vio_event_handler_t *vio_handler[VIO_MAX_SUBTYPES];
108
109#define VIOPATH_KERN_WARN KERN_WARNING "viopath: "
110#define VIOPATH_KERN_INFO KERN_INFO "viopath: "
111
112static int proc_viopath_show(struct seq_file *m, void *v)
113{
114 char *buf;
115 u16 vlanMap;
116 dma_addr_t handle;
117 HvLpEvent_Rc hvrc;
118 DECLARE_MUTEX_LOCKED(Semaphore);
119
120 buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
121 if (!buf)
122 return 0;
123 memset(buf, 0, PAGE_SIZE);
124
125 handle = dma_map_single(iSeries_vio_dev, buf, PAGE_SIZE,
126 DMA_FROM_DEVICE);
127
128 hvrc = HvCallEvent_signalLpEventFast(viopath_hostLp,
129 HvLpEvent_Type_VirtualIo,
130 viomajorsubtype_config | vioconfigget,
131 HvLpEvent_AckInd_DoAck, HvLpEvent_AckType_ImmediateAck,
132 viopath_sourceinst(viopath_hostLp),
133 viopath_targetinst(viopath_hostLp),
134 (u64)(unsigned long)&Semaphore, VIOVERSION << 16,
135 ((u64)handle) << 32, PAGE_SIZE, 0, 0);
136
137 if (hvrc != HvLpEvent_Rc_Good)
138 printk(VIOPATH_KERN_WARN "hv error on op %d\n", (int)hvrc);
139
140 down(&Semaphore);
141
142 vlanMap = HvLpConfig_getVirtualLanIndexMap();
143
144 buf[PAGE_SIZE-1] = '\0';
145 seq_printf(m, "%s", buf);
146 seq_printf(m, "AVAILABLE_VETH=%x\n", vlanMap);
147 seq_printf(m, "SRLNBR=%c%c%c%c%c%c%c\n",
148 e2a(xItExtVpdPanel.mfgID[2]),
149 e2a(xItExtVpdPanel.mfgID[3]),
150 e2a(xItExtVpdPanel.systemSerial[1]),
151 e2a(xItExtVpdPanel.systemSerial[2]),
152 e2a(xItExtVpdPanel.systemSerial[3]),
153 e2a(xItExtVpdPanel.systemSerial[4]),
154 e2a(xItExtVpdPanel.systemSerial[5]));
155
156 dma_unmap_single(iSeries_vio_dev, handle, PAGE_SIZE, DMA_FROM_DEVICE);
157 kfree(buf);
158
159 return 0;
160}
161
162static int proc_viopath_open(struct inode *inode, struct file *file)
163{
164 return single_open(file, proc_viopath_show, NULL);
165}
166
167static struct file_operations proc_viopath_operations = {
168 .open = proc_viopath_open,
169 .read = seq_read,
170 .llseek = seq_lseek,
171 .release = single_release,
172};
173
174static int __init vio_proc_init(void)
175{
176 struct proc_dir_entry *e;
177
178 e = create_proc_entry("iSeries/config", 0, NULL);
179 if (e)
180 e->proc_fops = &proc_viopath_operations;
181
182 return 0;
183}
184__initcall(vio_proc_init);
185
186/* See if a given LP is active. Allow for invalid lps to be passed in
187 * and just return invalid
188 */
189int viopath_isactive(HvLpIndex lp)
190{
191 if (lp == HvLpIndexInvalid)
192 return 0;
193 if (lp < HVMAXARCHITECTEDLPS)
194 return viopathStatus[lp].isActive;
195 else
196 return 0;
197}
198EXPORT_SYMBOL(viopath_isactive);
199
200/*
201 * We cache the source and target instance ids for each
202 * partition.
203 */
204HvLpInstanceId viopath_sourceinst(HvLpIndex lp)
205{
206 return viopathStatus[lp].mSourceInst;
207}
208EXPORT_SYMBOL(viopath_sourceinst);
209
210HvLpInstanceId viopath_targetinst(HvLpIndex lp)
211{
212 return viopathStatus[lp].mTargetInst;
213}
214EXPORT_SYMBOL(viopath_targetinst);
215
216/*
217 * Send a monitor message. This is a message with the acknowledge
218 * bit on that the other side will NOT explicitly acknowledge. When
219 * the other side goes down, the hypervisor will acknowledge any
220 * outstanding messages....so we will know when the other side dies.
221 */
222static void sendMonMsg(HvLpIndex remoteLp)
223{
224 HvLpEvent_Rc hvrc;
225
226 viopathStatus[remoteLp].mSourceInst =
227 HvCallEvent_getSourceLpInstanceId(remoteLp,
228 HvLpEvent_Type_VirtualIo);
229 viopathStatus[remoteLp].mTargetInst =
230 HvCallEvent_getTargetLpInstanceId(remoteLp,
231 HvLpEvent_Type_VirtualIo);
232
233 /*
234 * Deliberately ignore the return code here. if we call this
235 * more than once, we don't care.
236 */
237 vio_setHandler(viomajorsubtype_monitor, handleMonitorEvent);
238
239 hvrc = HvCallEvent_signalLpEventFast(remoteLp, HvLpEvent_Type_VirtualIo,
240 viomajorsubtype_monitor, HvLpEvent_AckInd_DoAck,
241 HvLpEvent_AckType_DeferredAck,
242 viopathStatus[remoteLp].mSourceInst,
243 viopathStatus[remoteLp].mTargetInst,
244 viomonseq++, 0, 0, 0, 0, 0);
245
246 if (hvrc == HvLpEvent_Rc_Good)
247 viopathStatus[remoteLp].isActive = 1;
248 else {
249 printk(VIOPATH_KERN_WARN "could not connect to partition %d\n",
250 remoteLp);
251 viopathStatus[remoteLp].isActive = 0;
252 }
253}
254
255static void handleMonitorEvent(struct HvLpEvent *event)
256{
257 HvLpIndex remoteLp;
258 int i;
259
260 /*
261 * This handler is _also_ called as part of the loop
262 * at the end of this routine, so it must be able to
263 * ignore NULL events...
264 */
265 if (!event)
266 return;
267
268 /*
269 * First see if this is just a normal monitor message from the
270 * other partition
271 */
272 if (event->xFlags.xFunction == HvLpEvent_Function_Int) {
273 remoteLp = event->xSourceLp;
274 if (!viopathStatus[remoteLp].isActive)
275 sendMonMsg(remoteLp);
276 return;
277 }
278
279 /*
280 * This path is for an acknowledgement; the other partition
281 * died
282 */
283 remoteLp = event->xTargetLp;
284 if ((event->xSourceInstanceId != viopathStatus[remoteLp].mSourceInst) ||
285 (event->xTargetInstanceId != viopathStatus[remoteLp].mTargetInst)) {
286 printk(VIOPATH_KERN_WARN "ignoring ack....mismatched instances\n");
287 return;
288 }
289
290 printk(VIOPATH_KERN_WARN "partition %d ended\n", remoteLp);
291
292 viopathStatus[remoteLp].isActive = 0;
293
294 /*
295 * For each active handler, pass them a NULL
296 * message to indicate that the other partition
297 * died
298 */
299 for (i = 0; i < VIO_MAX_SUBTYPES; i++) {
300 if (vio_handler[i] != NULL)
301 (*vio_handler[i])(NULL);
302 }
303}
304
305int vio_setHandler(int subtype, vio_event_handler_t *beh)
306{
307 subtype = subtype >> VIOMAJOR_SUBTYPE_SHIFT;
308 if ((subtype < 0) || (subtype >= VIO_MAX_SUBTYPES))
309 return -EINVAL;
310 if (vio_handler[subtype] != NULL)
311 return -EBUSY;
312 vio_handler[subtype] = beh;
313 return 0;
314}
315EXPORT_SYMBOL(vio_setHandler);
316
317int vio_clearHandler(int subtype)
318{
319 subtype = subtype >> VIOMAJOR_SUBTYPE_SHIFT;
320 if ((subtype < 0) || (subtype >= VIO_MAX_SUBTYPES))
321 return -EINVAL;
322 if (vio_handler[subtype] == NULL)
323 return -EAGAIN;
324 vio_handler[subtype] = NULL;
325 return 0;
326}
327EXPORT_SYMBOL(vio_clearHandler);
328
329static void handleConfig(struct HvLpEvent *event)
330{
331 if (!event)
332 return;
333 if (event->xFlags.xFunction == HvLpEvent_Function_Int) {
334 printk(VIOPATH_KERN_WARN
335 "unexpected config request from partition %d",
336 event->xSourceLp);
337
338 if ((event->xFlags.xFunction == HvLpEvent_Function_Int) &&
339 (event->xFlags.xAckInd == HvLpEvent_AckInd_DoAck)) {
340 event->xRc = HvLpEvent_Rc_InvalidSubtype;
341 HvCallEvent_ackLpEvent(event);
342 }
343 return;
344 }
345
346 up((struct semaphore *)event->xCorrelationToken);
347}
348
349/*
350 * Initialization of the hosting partition
351 */
352void vio_set_hostlp(void)
353{
354 /*
355 * If this has already been set then we DON'T want to either change
356 * it or re-register the proc file system
357 */
358 if (viopath_hostLp != HvLpIndexInvalid)
359 return;
360
361 /*
362 * Figure out our hosting partition. This isn't allowed to change
363 * while we're active
364 */
365 viopath_ourLp = HvLpConfig_getLpIndex();
366 viopath_hostLp = HvLpConfig_getHostingLpIndex(viopath_ourLp);
367
368 if (viopath_hostLp != HvLpIndexInvalid)
369 vio_setHandler(viomajorsubtype_config, handleConfig);
370}
371EXPORT_SYMBOL(vio_set_hostlp);
372
373static void vio_handleEvent(struct HvLpEvent *event, struct pt_regs *regs)
374{
375 HvLpIndex remoteLp;
376 int subtype = (event->xSubtype & VIOMAJOR_SUBTYPE_MASK)
377 >> VIOMAJOR_SUBTYPE_SHIFT;
378
379 if (event->xFlags.xFunction == HvLpEvent_Function_Int) {
380 remoteLp = event->xSourceLp;
381 /*
382 * The isActive is checked because if the hosting partition
383 * went down and came back up it would not be active but it
384 * would have different source and target instances, in which
385 * case we'd want to reset them. This case really protects
386 * against an unauthorized active partition sending interrupts
387 * or acks to this linux partition.
388 */
389 if (viopathStatus[remoteLp].isActive
390 && (event->xSourceInstanceId !=
391 viopathStatus[remoteLp].mTargetInst)) {
392 printk(VIOPATH_KERN_WARN
393 "message from invalid partition. "
394 "int msg rcvd, source inst (%d) doesnt match (%d)\n",
395 viopathStatus[remoteLp].mTargetInst,
396 event->xSourceInstanceId);
397 return;
398 }
399
400 if (viopathStatus[remoteLp].isActive
401 && (event->xTargetInstanceId !=
402 viopathStatus[remoteLp].mSourceInst)) {
403 printk(VIOPATH_KERN_WARN
404 "message from invalid partition. "
405 "int msg rcvd, target inst (%d) doesnt match (%d)\n",
406 viopathStatus[remoteLp].mSourceInst,
407 event->xTargetInstanceId);
408 return;
409 }
410 } else {
411 remoteLp = event->xTargetLp;
412 if (event->xSourceInstanceId !=
413 viopathStatus[remoteLp].mSourceInst) {
414 printk(VIOPATH_KERN_WARN
415 "message from invalid partition. "
416 "ack msg rcvd, source inst (%d) doesnt match (%d)\n",
417 viopathStatus[remoteLp].mSourceInst,
418 event->xSourceInstanceId);
419 return;
420 }
421
422 if (event->xTargetInstanceId !=
423 viopathStatus[remoteLp].mTargetInst) {
424 printk(VIOPATH_KERN_WARN
425 "message from invalid partition. "
426 "viopath: ack msg rcvd, target inst (%d) doesnt match (%d)\n",
427 viopathStatus[remoteLp].mTargetInst,
428 event->xTargetInstanceId);
429 return;
430 }
431 }
432
433 if (vio_handler[subtype] == NULL) {
434 printk(VIOPATH_KERN_WARN
435 "unexpected virtual io event subtype %d from partition %d\n",
436 event->xSubtype, remoteLp);
437 /* No handler. Ack if necessary */
438 if ((event->xFlags.xFunction == HvLpEvent_Function_Int) &&
439 (event->xFlags.xAckInd == HvLpEvent_AckInd_DoAck)) {
440 event->xRc = HvLpEvent_Rc_InvalidSubtype;
441 HvCallEvent_ackLpEvent(event);
442 }
443 return;
444 }
445
446 /* This innocuous little line is where all the real work happens */
447 (*vio_handler[subtype])(event);
448}
449
450static void viopath_donealloc(void *parm, int number)
451{
452 struct alloc_parms *parmsp = parm;
453
454 parmsp->number = number;
455 if (parmsp->used_wait_atomic)
456 atomic_set(&parmsp->wait_atomic, 0);
457 else
458 up(&parmsp->sem);
459}
460
461static int allocateEvents(HvLpIndex remoteLp, int numEvents)
462{
463 struct alloc_parms parms;
464
465 if (system_state != SYSTEM_RUNNING) {
466 parms.used_wait_atomic = 1;
467 atomic_set(&parms.wait_atomic, 1);
468 } else {
469 parms.used_wait_atomic = 0;
470 init_MUTEX_LOCKED(&parms.sem);
471 }
472 mf_allocate_lp_events(remoteLp, HvLpEvent_Type_VirtualIo, 250, /* It would be nice to put a real number here! */
473 numEvents, &viopath_donealloc, &parms);
474 if (system_state != SYSTEM_RUNNING) {
475 while (atomic_read(&parms.wait_atomic))
476 mb();
477 } else
478 down(&parms.sem);
479 return parms.number;
480}
481
482int viopath_open(HvLpIndex remoteLp, int subtype, int numReq)
483{
484 int i;
485 unsigned long flags;
486 int tempNumAllocated;
487
488 if ((remoteLp >= HVMAXARCHITECTEDLPS) || (remoteLp == HvLpIndexInvalid))
489 return -EINVAL;
490
491 subtype = subtype >> VIOMAJOR_SUBTYPE_SHIFT;
492 if ((subtype < 0) || (subtype >= VIO_MAX_SUBTYPES))
493 return -EINVAL;
494
495 spin_lock_irqsave(&statuslock, flags);
496
497 if (!event_buffer_initialised) {
498 for (i = 0; i < VIO_MAX_SUBTYPES; i++)
499 atomic_set(&event_buffer_available[i], 1);
500 event_buffer_initialised = 1;
501 }
502
503 viopathStatus[remoteLp].users[subtype]++;
504
505 if (!viopathStatus[remoteLp].isOpen) {
506 viopathStatus[remoteLp].isOpen = 1;
507 HvCallEvent_openLpEventPath(remoteLp, HvLpEvent_Type_VirtualIo);
508
509 /*
510 * Don't hold the spinlock during an operation that
511 * can sleep.
512 */
513 spin_unlock_irqrestore(&statuslock, flags);
514 tempNumAllocated = allocateEvents(remoteLp, 1);
515 spin_lock_irqsave(&statuslock, flags);
516
517 viopathStatus[remoteLp].numberAllocated += tempNumAllocated;
518
519 if (viopathStatus[remoteLp].numberAllocated == 0) {
520 HvCallEvent_closeLpEventPath(remoteLp,
521 HvLpEvent_Type_VirtualIo);
522
523 spin_unlock_irqrestore(&statuslock, flags);
524 return -ENOMEM;
525 }
526
527 viopathStatus[remoteLp].mSourceInst =
528 HvCallEvent_getSourceLpInstanceId(remoteLp,
529 HvLpEvent_Type_VirtualIo);
530 viopathStatus[remoteLp].mTargetInst =
531 HvCallEvent_getTargetLpInstanceId(remoteLp,
532 HvLpEvent_Type_VirtualIo);
533 HvLpEvent_registerHandler(HvLpEvent_Type_VirtualIo,
534 &vio_handleEvent);
535 sendMonMsg(remoteLp);
536 printk(VIOPATH_KERN_INFO "opening connection to partition %d, "
537 "setting sinst %d, tinst %d\n",
538 remoteLp, viopathStatus[remoteLp].mSourceInst,
539 viopathStatus[remoteLp].mTargetInst);
540 }
541
542 spin_unlock_irqrestore(&statuslock, flags);
543 tempNumAllocated = allocateEvents(remoteLp, numReq);
544 spin_lock_irqsave(&statuslock, flags);
545 viopathStatus[remoteLp].numberAllocated += tempNumAllocated;
546 spin_unlock_irqrestore(&statuslock, flags);
547
548 return 0;
549}
550EXPORT_SYMBOL(viopath_open);
551
552int viopath_close(HvLpIndex remoteLp, int subtype, int numReq)
553{
554 unsigned long flags;
555 int i;
556 int numOpen;
557 struct alloc_parms parms;
558
559 if ((remoteLp >= HVMAXARCHITECTEDLPS) || (remoteLp == HvLpIndexInvalid))
560 return -EINVAL;
561
562 subtype = subtype >> VIOMAJOR_SUBTYPE_SHIFT;
563 if ((subtype < 0) || (subtype >= VIO_MAX_SUBTYPES))
564 return -EINVAL;
565
566 spin_lock_irqsave(&statuslock, flags);
567 /*
568 * If the viopath_close somehow gets called before a
569 * viopath_open it could decrement to -1 which is a non
570 * recoverable state so we'll prevent this from
571 * happening.
572 */
573 if (viopathStatus[remoteLp].users[subtype] > 0)
574 viopathStatus[remoteLp].users[subtype]--;
575
576 spin_unlock_irqrestore(&statuslock, flags);
577
578 parms.used_wait_atomic = 0;
579 init_MUTEX_LOCKED(&parms.sem);
580 mf_deallocate_lp_events(remoteLp, HvLpEvent_Type_VirtualIo,
581 numReq, &viopath_donealloc, &parms);
582 down(&parms.sem);
583
584 spin_lock_irqsave(&statuslock, flags);
585 for (i = 0, numOpen = 0; i < VIO_MAX_SUBTYPES; i++)
586 numOpen += viopathStatus[remoteLp].users[i];
587
588 if ((viopathStatus[remoteLp].isOpen) && (numOpen == 0)) {
589 printk(VIOPATH_KERN_INFO "closing connection to partition %d",
590 remoteLp);
591
592 HvCallEvent_closeLpEventPath(remoteLp,
593 HvLpEvent_Type_VirtualIo);
594 viopathStatus[remoteLp].isOpen = 0;
595 viopathStatus[remoteLp].isActive = 0;
596
597 for (i = 0; i < VIO_MAX_SUBTYPES; i++)
598 atomic_set(&event_buffer_available[i], 0);
599 event_buffer_initialised = 0;
600 }
601 spin_unlock_irqrestore(&statuslock, flags);
602 return 0;
603}
604EXPORT_SYMBOL(viopath_close);
605
606void *vio_get_event_buffer(int subtype)
607{
608 subtype = subtype >> VIOMAJOR_SUBTYPE_SHIFT;
609 if ((subtype < 0) || (subtype >= VIO_MAX_SUBTYPES))
610 return NULL;
611
612 if (atomic_dec_if_positive(&event_buffer_available[subtype]) == 0)
613 return &event_buffer[subtype * 256];
614 else
615 return NULL;
616}
617EXPORT_SYMBOL(vio_get_event_buffer);
618
619void vio_free_event_buffer(int subtype, void *buffer)
620{
621 subtype = subtype >> VIOMAJOR_SUBTYPE_SHIFT;
622 if ((subtype < 0) || (subtype >= VIO_MAX_SUBTYPES)) {
623 printk(VIOPATH_KERN_WARN
624 "unexpected subtype %d freeing event buffer\n", subtype);
625 return;
626 }
627
628 if (atomic_read(&event_buffer_available[subtype]) != 0) {
629 printk(VIOPATH_KERN_WARN
630 "freeing unallocated event buffer, subtype %d\n",
631 subtype);
632 return;
633 }
634
635 if (buffer != &event_buffer[subtype * 256]) {
636 printk(VIOPATH_KERN_WARN
637 "freeing invalid event buffer, subtype %d\n", subtype);
638 }
639
640 atomic_set(&event_buffer_available[subtype], 1);
641}
642EXPORT_SYMBOL(vio_free_event_buffer);
643
644static const struct vio_error_entry vio_no_error =
645 { 0, 0, "Non-VIO Error" };
646static const struct vio_error_entry vio_unknown_error =
647 { 0, EIO, "Unknown Error" };
648
649static const struct vio_error_entry vio_default_errors[] = {
650 {0x0001, EIO, "No Connection"},
651 {0x0002, EIO, "No Receiver"},
652 {0x0003, EIO, "No Buffer Available"},
653 {0x0004, EBADRQC, "Invalid Message Type"},
654 {0x0000, 0, NULL},
655};
656
657const struct vio_error_entry *vio_lookup_rc(
658 const struct vio_error_entry *local_table, u16 rc)
659{
660 const struct vio_error_entry *cur;
661
662 if (!rc)
663 return &vio_no_error;
664 if (local_table)
665 for (cur = local_table; cur->rc; ++cur)
666 if (cur->rc == rc)
667 return cur;
668 for (cur = vio_default_errors; cur->rc; ++cur)
669 if (cur->rc == rc)
670 return cur;
671 return &vio_unknown_error;
672}
673EXPORT_SYMBOL(vio_lookup_rc);
diff --git a/arch/ppc64/kernel/vmlinux.lds.S b/arch/ppc64/kernel/vmlinux.lds.S
index 0306510bc4ff..022f220e772f 100644
--- a/arch/ppc64/kernel/vmlinux.lds.S
+++ b/arch/ppc64/kernel/vmlinux.lds.S
@@ -1,3 +1,4 @@
1#include <asm/page.h>
1#include <asm-generic/vmlinux.lds.h> 2#include <asm-generic/vmlinux.lds.h>
2 3
3OUTPUT_ARCH(powerpc:common64) 4OUTPUT_ARCH(powerpc:common64)
@@ -17,7 +18,7 @@ SECTIONS
17 LOCK_TEXT 18 LOCK_TEXT
18 KPROBES_TEXT 19 KPROBES_TEXT
19 *(.fixup) 20 *(.fixup)
20 . = ALIGN(4096); 21 . = ALIGN(PAGE_SIZE);
21 _etext = .; 22 _etext = .;
22 } 23 }
23 24
@@ -43,7 +44,7 @@ SECTIONS
43 44
44 45
45 /* will be freed after init */ 46 /* will be freed after init */
46 . = ALIGN(4096); 47 . = ALIGN(PAGE_SIZE);
47 __init_begin = .; 48 __init_begin = .;
48 49
49 .init.text : { 50 .init.text : {
@@ -83,7 +84,7 @@ SECTIONS
83 84
84 SECURITY_INIT 85 SECURITY_INIT
85 86
86 . = ALIGN(4096); 87 . = ALIGN(PAGE_SIZE);
87 .init.ramfs : { 88 .init.ramfs : {
88 __initramfs_start = .; 89 __initramfs_start = .;
89 *(.init.ramfs) 90 *(.init.ramfs)
@@ -96,18 +97,22 @@ SECTIONS
96 __per_cpu_end = .; 97 __per_cpu_end = .;
97 } 98 }
98 99
100 . = ALIGN(PAGE_SIZE);
99 . = ALIGN(16384); 101 . = ALIGN(16384);
100 __init_end = .; 102 __init_end = .;
101 /* freed after init ends here */ 103 /* freed after init ends here */
102 104
103 105
104 /* Read/write sections */ 106 /* Read/write sections */
107 . = ALIGN(PAGE_SIZE);
105 . = ALIGN(16384); 108 . = ALIGN(16384);
109 _sdata = .;
106 /* The initial task and kernel stack */ 110 /* The initial task and kernel stack */
107 .data.init_task : { 111 .data.init_task : {
108 *(.data.init_task) 112 *(.data.init_task)
109 } 113 }
110 114
115 . = ALIGN(PAGE_SIZE);
111 .data.page_aligned : { 116 .data.page_aligned : {
112 *(.data.page_aligned) 117 *(.data.page_aligned)
113 } 118 }
@@ -129,18 +134,18 @@ SECTIONS
129 __toc_start = .; 134 __toc_start = .;
130 *(.got) 135 *(.got)
131 *(.toc) 136 *(.toc)
132 . = ALIGN(4096); 137 . = ALIGN(PAGE_SIZE);
133 _edata = .; 138 _edata = .;
134 } 139 }
135 140
136 141
137 . = ALIGN(4096); 142 . = ALIGN(PAGE_SIZE);
138 .bss : { 143 .bss : {
139 __bss_start = .; 144 __bss_start = .;
140 *(.bss) 145 *(.bss)
141 __bss_stop = .; 146 __bss_stop = .;
142 } 147 }
143 148
144 . = ALIGN(4096); 149 . = ALIGN(PAGE_SIZE);
145 _end = . ; 150 _end = . ;
146} 151}
diff --git a/arch/ppc64/kernel/xics.c b/arch/ppc64/kernel/xics.c
deleted file mode 100644
index daf93885dcfa..000000000000
--- a/arch/ppc64/kernel/xics.c
+++ /dev/null
@@ -1,747 +0,0 @@
1/*
2 * arch/ppc64/kernel/xics.c
3 *
4 * Copyright 2000 IBM Corporation.
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11#include <linux/config.h>
12#include <linux/types.h>
13#include <linux/threads.h>
14#include <linux/kernel.h>
15#include <linux/irq.h>
16#include <linux/smp.h>
17#include <linux/interrupt.h>
18#include <linux/signal.h>
19#include <linux/init.h>
20#include <linux/gfp.h>
21#include <linux/radix-tree.h>
22#include <linux/cpu.h>
23#include <asm/prom.h>
24#include <asm/io.h>
25#include <asm/pgtable.h>
26#include <asm/smp.h>
27#include <asm/rtas.h>
28#include <asm/xics.h>
29#include <asm/hvcall.h>
30#include <asm/machdep.h>
31
32#include "i8259.h"
33
34static unsigned int xics_startup(unsigned int irq);
35static void xics_enable_irq(unsigned int irq);
36static void xics_disable_irq(unsigned int irq);
37static void xics_mask_and_ack_irq(unsigned int irq);
38static void xics_end_irq(unsigned int irq);
39static void xics_set_affinity(unsigned int irq_nr, cpumask_t cpumask);
40
41static struct hw_interrupt_type xics_pic = {
42 .typename = " XICS ",
43 .startup = xics_startup,
44 .enable = xics_enable_irq,
45 .disable = xics_disable_irq,
46 .ack = xics_mask_and_ack_irq,
47 .end = xics_end_irq,
48 .set_affinity = xics_set_affinity
49};
50
51static struct hw_interrupt_type xics_8259_pic = {
52 .typename = " XICS/8259",
53 .ack = xics_mask_and_ack_irq,
54};
55
56/* This is used to map real irq numbers to virtual */
57static struct radix_tree_root irq_map = RADIX_TREE_INIT(GFP_ATOMIC);
58
59#define XICS_IPI 2
60#define XICS_IRQ_SPURIOUS 0
61
62/* Want a priority other than 0. Various HW issues require this. */
63#define DEFAULT_PRIORITY 5
64
65/*
66 * Mark IPIs as higher priority so we can take them inside interrupts that
67 * arent marked SA_INTERRUPT
68 */
69#define IPI_PRIORITY 4
70
71struct xics_ipl {
72 union {
73 u32 word;
74 u8 bytes[4];
75 } xirr_poll;
76 union {
77 u32 word;
78 u8 bytes[4];
79 } xirr;
80 u32 dummy;
81 union {
82 u32 word;
83 u8 bytes[4];
84 } qirr;
85};
86
87static struct xics_ipl __iomem *xics_per_cpu[NR_CPUS];
88
89static int xics_irq_8259_cascade = 0;
90static int xics_irq_8259_cascade_real = 0;
91static unsigned int default_server = 0xFF;
92static unsigned int default_distrib_server = 0;
93static unsigned int interrupt_server_size = 8;
94
95/*
96 * XICS only has a single IPI, so encode the messages per CPU
97 */
98struct xics_ipi_struct xics_ipi_message[NR_CPUS] __cacheline_aligned;
99
100/* RTAS service tokens */
101static int ibm_get_xive;
102static int ibm_set_xive;
103static int ibm_int_on;
104static int ibm_int_off;
105
106typedef struct {
107 int (*xirr_info_get)(int cpu);
108 void (*xirr_info_set)(int cpu, int val);
109 void (*cppr_info)(int cpu, u8 val);
110 void (*qirr_info)(int cpu, u8 val);
111} xics_ops;
112
113
114/* SMP */
115
116static int pSeries_xirr_info_get(int n_cpu)
117{
118 return in_be32(&xics_per_cpu[n_cpu]->xirr.word);
119}
120
121static void pSeries_xirr_info_set(int n_cpu, int value)
122{
123 out_be32(&xics_per_cpu[n_cpu]->xirr.word, value);
124}
125
126static void pSeries_cppr_info(int n_cpu, u8 value)
127{
128 out_8(&xics_per_cpu[n_cpu]->xirr.bytes[0], value);
129}
130
131static void pSeries_qirr_info(int n_cpu, u8 value)
132{
133 out_8(&xics_per_cpu[n_cpu]->qirr.bytes[0], value);
134}
135
136static xics_ops pSeries_ops = {
137 pSeries_xirr_info_get,
138 pSeries_xirr_info_set,
139 pSeries_cppr_info,
140 pSeries_qirr_info
141};
142
143static xics_ops *ops = &pSeries_ops;
144
145
146/* LPAR */
147
148static inline long plpar_eoi(unsigned long xirr)
149{
150 return plpar_hcall_norets(H_EOI, xirr);
151}
152
153static inline long plpar_cppr(unsigned long cppr)
154{
155 return plpar_hcall_norets(H_CPPR, cppr);
156}
157
158static inline long plpar_ipi(unsigned long servernum, unsigned long mfrr)
159{
160 return plpar_hcall_norets(H_IPI, servernum, mfrr);
161}
162
163static inline long plpar_xirr(unsigned long *xirr_ret)
164{
165 unsigned long dummy;
166 return plpar_hcall(H_XIRR, 0, 0, 0, 0, xirr_ret, &dummy, &dummy);
167}
168
169static int pSeriesLP_xirr_info_get(int n_cpu)
170{
171 unsigned long lpar_rc;
172 unsigned long return_value;
173
174 lpar_rc = plpar_xirr(&return_value);
175 if (lpar_rc != H_Success)
176 panic(" bad return code xirr - rc = %lx \n", lpar_rc);
177 return (int)return_value;
178}
179
180static void pSeriesLP_xirr_info_set(int n_cpu, int value)
181{
182 unsigned long lpar_rc;
183 unsigned long val64 = value & 0xffffffff;
184
185 lpar_rc = plpar_eoi(val64);
186 if (lpar_rc != H_Success)
187 panic("bad return code EOI - rc = %ld, value=%lx\n", lpar_rc,
188 val64);
189}
190
191void pSeriesLP_cppr_info(int n_cpu, u8 value)
192{
193 unsigned long lpar_rc;
194
195 lpar_rc = plpar_cppr(value);
196 if (lpar_rc != H_Success)
197 panic("bad return code cppr - rc = %lx\n", lpar_rc);
198}
199
200static void pSeriesLP_qirr_info(int n_cpu , u8 value)
201{
202 unsigned long lpar_rc;
203
204 lpar_rc = plpar_ipi(get_hard_smp_processor_id(n_cpu), value);
205 if (lpar_rc != H_Success)
206 panic("bad return code qirr - rc = %lx\n", lpar_rc);
207}
208
209xics_ops pSeriesLP_ops = {
210 pSeriesLP_xirr_info_get,
211 pSeriesLP_xirr_info_set,
212 pSeriesLP_cppr_info,
213 pSeriesLP_qirr_info
214};
215
216static unsigned int xics_startup(unsigned int virq)
217{
218 unsigned int irq;
219
220 irq = irq_offset_down(virq);
221 if (radix_tree_insert(&irq_map, virt_irq_to_real(irq),
222 &virt_irq_to_real_map[irq]) == -ENOMEM)
223 printk(KERN_CRIT "Out of memory creating real -> virtual"
224 " IRQ mapping for irq %u (real 0x%x)\n",
225 virq, virt_irq_to_real(irq));
226 xics_enable_irq(virq);
227 return 0; /* return value is ignored */
228}
229
230static unsigned int real_irq_to_virt(unsigned int real_irq)
231{
232 unsigned int *ptr;
233
234 ptr = radix_tree_lookup(&irq_map, real_irq);
235 if (ptr == NULL)
236 return NO_IRQ;
237 return ptr - virt_irq_to_real_map;
238}
239
240#ifdef CONFIG_SMP
241static int get_irq_server(unsigned int irq)
242{
243 unsigned int server;
244 /* For the moment only implement delivery to all cpus or one cpu */
245 cpumask_t cpumask = irq_affinity[irq];
246 cpumask_t tmp = CPU_MASK_NONE;
247
248 if (!distribute_irqs)
249 return default_server;
250
251 if (cpus_equal(cpumask, CPU_MASK_ALL)) {
252 server = default_distrib_server;
253 } else {
254 cpus_and(tmp, cpu_online_map, cpumask);
255
256 if (cpus_empty(tmp))
257 server = default_distrib_server;
258 else
259 server = get_hard_smp_processor_id(first_cpu(tmp));
260 }
261
262 return server;
263
264}
265#else
266static int get_irq_server(unsigned int irq)
267{
268 return default_server;
269}
270#endif
271
272static void xics_enable_irq(unsigned int virq)
273{
274 unsigned int irq;
275 int call_status;
276 unsigned int server;
277
278 irq = virt_irq_to_real(irq_offset_down(virq));
279 if (irq == XICS_IPI)
280 return;
281
282 server = get_irq_server(virq);
283 call_status = rtas_call(ibm_set_xive, 3, 1, NULL, irq, server,
284 DEFAULT_PRIORITY);
285 if (call_status != 0) {
286 printk(KERN_ERR "xics_enable_irq: irq=%u: ibm_set_xive "
287 "returned %d\n", irq, call_status);
288 printk("set_xive %x, server %x\n", ibm_set_xive, server);
289 return;
290 }
291
292 /* Now unmask the interrupt (often a no-op) */
293 call_status = rtas_call(ibm_int_on, 1, 1, NULL, irq);
294 if (call_status != 0) {
295 printk(KERN_ERR "xics_enable_irq: irq=%u: ibm_int_on "
296 "returned %d\n", irq, call_status);
297 return;
298 }
299}
300
301static void xics_disable_real_irq(unsigned int irq)
302{
303 int call_status;
304 unsigned int server;
305
306 if (irq == XICS_IPI)
307 return;
308
309 call_status = rtas_call(ibm_int_off, 1, 1, NULL, irq);
310 if (call_status != 0) {
311 printk(KERN_ERR "xics_disable_real_irq: irq=%u: "
312 "ibm_int_off returned %d\n", irq, call_status);
313 return;
314 }
315
316 server = get_irq_server(irq);
317 /* Have to set XIVE to 0xff to be able to remove a slot */
318 call_status = rtas_call(ibm_set_xive, 3, 1, NULL, irq, server, 0xff);
319 if (call_status != 0) {
320 printk(KERN_ERR "xics_disable_irq: irq=%u: ibm_set_xive(0xff)"
321 " returned %d\n", irq, call_status);
322 return;
323 }
324}
325
326static void xics_disable_irq(unsigned int virq)
327{
328 unsigned int irq;
329
330 irq = virt_irq_to_real(irq_offset_down(virq));
331 xics_disable_real_irq(irq);
332}
333
334static void xics_end_irq(unsigned int irq)
335{
336 int cpu = smp_processor_id();
337
338 iosync();
339 ops->xirr_info_set(cpu, ((0xff << 24) |
340 (virt_irq_to_real(irq_offset_down(irq)))));
341
342}
343
344static void xics_mask_and_ack_irq(unsigned int irq)
345{
346 int cpu = smp_processor_id();
347
348 if (irq < irq_offset_value()) {
349 i8259_pic.ack(irq);
350 iosync();
351 ops->xirr_info_set(cpu, ((0xff<<24) |
352 xics_irq_8259_cascade_real));
353 iosync();
354 }
355}
356
357int xics_get_irq(struct pt_regs *regs)
358{
359 unsigned int cpu = smp_processor_id();
360 unsigned int vec;
361 int irq;
362
363 vec = ops->xirr_info_get(cpu);
364 /* (vec >> 24) == old priority */
365 vec &= 0x00ffffff;
366
367 /* for sanity, this had better be < NR_IRQS - 16 */
368 if (vec == xics_irq_8259_cascade_real) {
369 irq = i8259_irq(cpu);
370 if (irq == -1) {
371 /* Spurious cascaded interrupt. Still must ack xics */
372 xics_end_irq(irq_offset_up(xics_irq_8259_cascade));
373
374 irq = -1;
375 }
376 } else if (vec == XICS_IRQ_SPURIOUS) {
377 irq = -1;
378 } else {
379 irq = real_irq_to_virt(vec);
380 if (irq == NO_IRQ)
381 irq = real_irq_to_virt_slowpath(vec);
382 if (irq == NO_IRQ) {
383 printk(KERN_ERR "Interrupt %u (real) is invalid,"
384 " disabling it.\n", vec);
385 xics_disable_real_irq(vec);
386 } else
387 irq = irq_offset_up(irq);
388 }
389 return irq;
390}
391
392#ifdef CONFIG_SMP
393
394irqreturn_t xics_ipi_action(int irq, void *dev_id, struct pt_regs *regs)
395{
396 int cpu = smp_processor_id();
397
398 ops->qirr_info(cpu, 0xff);
399
400 WARN_ON(cpu_is_offline(cpu));
401
402 while (xics_ipi_message[cpu].value) {
403 if (test_and_clear_bit(PPC_MSG_CALL_FUNCTION,
404 &xics_ipi_message[cpu].value)) {
405 mb();
406 smp_message_recv(PPC_MSG_CALL_FUNCTION, regs);
407 }
408 if (test_and_clear_bit(PPC_MSG_RESCHEDULE,
409 &xics_ipi_message[cpu].value)) {
410 mb();
411 smp_message_recv(PPC_MSG_RESCHEDULE, regs);
412 }
413#if 0
414 if (test_and_clear_bit(PPC_MSG_MIGRATE_TASK,
415 &xics_ipi_message[cpu].value)) {
416 mb();
417 smp_message_recv(PPC_MSG_MIGRATE_TASK, regs);
418 }
419#endif
420#ifdef CONFIG_DEBUGGER
421 if (test_and_clear_bit(PPC_MSG_DEBUGGER_BREAK,
422 &xics_ipi_message[cpu].value)) {
423 mb();
424 smp_message_recv(PPC_MSG_DEBUGGER_BREAK, regs);
425 }
426#endif
427 }
428 return IRQ_HANDLED;
429}
430
431void xics_cause_IPI(int cpu)
432{
433 ops->qirr_info(cpu, IPI_PRIORITY);
434}
435#endif /* CONFIG_SMP */
436
437void xics_setup_cpu(void)
438{
439 int cpu = smp_processor_id();
440
441 ops->cppr_info(cpu, 0xff);
442 iosync();
443
444 /*
445 * Put the calling processor into the GIQ. This is really only
446 * necessary from a secondary thread as the OF start-cpu interface
447 * performs this function for us on primary threads.
448 *
449 * XXX: undo of teardown on kexec needs this too, as may hotplug
450 */
451 rtas_set_indicator(GLOBAL_INTERRUPT_QUEUE,
452 (1UL << interrupt_server_size) - 1 - default_distrib_server, 1);
453}
454
455void xics_init_IRQ(void)
456{
457 int i;
458 unsigned long intr_size = 0;
459 struct device_node *np;
460 uint *ireg, ilen, indx = 0;
461 unsigned long intr_base = 0;
462 struct xics_interrupt_node {
463 unsigned long addr;
464 unsigned long size;
465 } intnodes[NR_CPUS];
466
467 ppc64_boot_msg(0x20, "XICS Init");
468
469 ibm_get_xive = rtas_token("ibm,get-xive");
470 ibm_set_xive = rtas_token("ibm,set-xive");
471 ibm_int_on = rtas_token("ibm,int-on");
472 ibm_int_off = rtas_token("ibm,int-off");
473
474 np = of_find_node_by_type(NULL, "PowerPC-External-Interrupt-Presentation");
475 if (!np)
476 panic("xics_init_IRQ: can't find interrupt presentation");
477
478nextnode:
479 ireg = (uint *)get_property(np, "ibm,interrupt-server-ranges", NULL);
480 if (ireg) {
481 /*
482 * set node starting index for this node
483 */
484 indx = *ireg;
485 }
486
487 ireg = (uint *)get_property(np, "reg", &ilen);
488 if (!ireg)
489 panic("xics_init_IRQ: can't find interrupt reg property");
490
491 while (ilen) {
492 intnodes[indx].addr = (unsigned long)*ireg++ << 32;
493 ilen -= sizeof(uint);
494 intnodes[indx].addr |= *ireg++;
495 ilen -= sizeof(uint);
496 intnodes[indx].size = (unsigned long)*ireg++ << 32;
497 ilen -= sizeof(uint);
498 intnodes[indx].size |= *ireg++;
499 ilen -= sizeof(uint);
500 indx++;
501 if (indx >= NR_CPUS) break;
502 }
503
504 np = of_find_node_by_type(np, "PowerPC-External-Interrupt-Presentation");
505 if ((indx < NR_CPUS) && np) goto nextnode;
506
507 /* Find the server numbers for the boot cpu. */
508 for (np = of_find_node_by_type(NULL, "cpu");
509 np;
510 np = of_find_node_by_type(np, "cpu")) {
511 ireg = (uint *)get_property(np, "reg", &ilen);
512 if (ireg && ireg[0] == boot_cpuid_phys) {
513 ireg = (uint *)get_property(np, "ibm,ppc-interrupt-gserver#s",
514 &ilen);
515 i = ilen / sizeof(int);
516 if (ireg && i > 0) {
517 default_server = ireg[0];
518 default_distrib_server = ireg[i-1]; /* take last element */
519 }
520 ireg = (uint *)get_property(np,
521 "ibm,interrupt-server#-size", NULL);
522 if (ireg)
523 interrupt_server_size = *ireg;
524 break;
525 }
526 }
527 of_node_put(np);
528
529 intr_base = intnodes[0].addr;
530 intr_size = intnodes[0].size;
531
532 np = of_find_node_by_type(NULL, "interrupt-controller");
533 if (!np) {
534 printk(KERN_WARNING "xics: no ISA interrupt controller\n");
535 xics_irq_8259_cascade_real = -1;
536 xics_irq_8259_cascade = -1;
537 } else {
538 ireg = (uint *) get_property(np, "interrupts", NULL);
539 if (!ireg)
540 panic("xics_init_IRQ: can't find ISA interrupts property");
541
542 xics_irq_8259_cascade_real = *ireg;
543 xics_irq_8259_cascade
544 = virt_irq_create_mapping(xics_irq_8259_cascade_real);
545 of_node_put(np);
546 }
547
548 if (systemcfg->platform == PLATFORM_PSERIES) {
549#ifdef CONFIG_SMP
550 for_each_cpu(i) {
551 int hard_id;
552
553 /* FIXME: Do this dynamically! --RR */
554 if (!cpu_present(i))
555 continue;
556
557 hard_id = get_hard_smp_processor_id(i);
558 xics_per_cpu[i] = ioremap(intnodes[hard_id].addr,
559 intnodes[hard_id].size);
560 }
561#else
562 xics_per_cpu[0] = ioremap(intr_base, intr_size);
563#endif /* CONFIG_SMP */
564 } else if (systemcfg->platform == PLATFORM_PSERIES_LPAR) {
565 ops = &pSeriesLP_ops;
566 }
567
568 xics_8259_pic.enable = i8259_pic.enable;
569 xics_8259_pic.disable = i8259_pic.disable;
570 for (i = 0; i < 16; ++i)
571 get_irq_desc(i)->handler = &xics_8259_pic;
572 for (; i < NR_IRQS; ++i)
573 get_irq_desc(i)->handler = &xics_pic;
574
575 xics_setup_cpu();
576
577 ppc64_boot_msg(0x21, "XICS Done");
578}
579
580/*
581 * We cant do this in init_IRQ because we need the memory subsystem up for
582 * request_irq()
583 */
584static int __init xics_setup_i8259(void)
585{
586 if (ppc64_interrupt_controller == IC_PPC_XIC &&
587 xics_irq_8259_cascade != -1) {
588 if (request_irq(irq_offset_up(xics_irq_8259_cascade),
589 no_action, 0, "8259 cascade", NULL))
590 printk(KERN_ERR "xics_setup_i8259: couldn't get 8259 "
591 "cascade\n");
592 i8259_init(0);
593 }
594 return 0;
595}
596arch_initcall(xics_setup_i8259);
597
598#ifdef CONFIG_SMP
599void xics_request_IPIs(void)
600{
601 virt_irq_to_real_map[XICS_IPI] = XICS_IPI;
602
603 /* IPIs are marked SA_INTERRUPT as they must run with irqs disabled */
604 request_irq(irq_offset_up(XICS_IPI), xics_ipi_action, SA_INTERRUPT,
605 "IPI", NULL);
606 get_irq_desc(irq_offset_up(XICS_IPI))->status |= IRQ_PER_CPU;
607}
608#endif
609
610static void xics_set_affinity(unsigned int virq, cpumask_t cpumask)
611{
612 unsigned int irq;
613 int status;
614 int xics_status[2];
615 unsigned long newmask;
616 cpumask_t tmp = CPU_MASK_NONE;
617
618 irq = virt_irq_to_real(irq_offset_down(virq));
619 if (irq == XICS_IPI || irq == NO_IRQ)
620 return;
621
622 status = rtas_call(ibm_get_xive, 1, 3, xics_status, irq);
623
624 if (status) {
625 printk(KERN_ERR "xics_set_affinity: irq=%u ibm,get-xive "
626 "returns %d\n", irq, status);
627 return;
628 }
629
630 /* For the moment only implement delivery to all cpus or one cpu */
631 if (cpus_equal(cpumask, CPU_MASK_ALL)) {
632 newmask = default_distrib_server;
633 } else {
634 cpus_and(tmp, cpu_online_map, cpumask);
635 if (cpus_empty(tmp))
636 return;
637 newmask = get_hard_smp_processor_id(first_cpu(tmp));
638 }
639
640 status = rtas_call(ibm_set_xive, 3, 1, NULL,
641 irq, newmask, xics_status[1]);
642
643 if (status) {
644 printk(KERN_ERR "xics_set_affinity: irq=%u ibm,set-xive "
645 "returns %d\n", irq, status);
646 return;
647 }
648}
649
650void xics_teardown_cpu(int secondary)
651{
652 int cpu = smp_processor_id();
653
654 ops->cppr_info(cpu, 0x00);
655 iosync();
656
657 /*
658 * Some machines need to have at least one cpu in the GIQ,
659 * so leave the master cpu in the group.
660 */
661 if (secondary) {
662 /*
663 * we need to EOI the IPI if we got here from kexec down IPI
664 *
665 * probably need to check all the other interrupts too
666 * should we be flagging idle loop instead?
667 * or creating some task to be scheduled?
668 */
669 ops->xirr_info_set(cpu, XICS_IPI);
670 rtas_set_indicator(GLOBAL_INTERRUPT_QUEUE,
671 (1UL << interrupt_server_size) - 1 -
672 default_distrib_server, 0);
673 }
674}
675
676#ifdef CONFIG_HOTPLUG_CPU
677
678/* Interrupts are disabled. */
679void xics_migrate_irqs_away(void)
680{
681 int status;
682 unsigned int irq, virq, cpu = smp_processor_id();
683
684 /* Reject any interrupt that was queued to us... */
685 ops->cppr_info(cpu, 0);
686 iosync();
687
688 /* remove ourselves from the global interrupt queue */
689 status = rtas_set_indicator(GLOBAL_INTERRUPT_QUEUE,
690 (1UL << interrupt_server_size) - 1 - default_distrib_server, 0);
691 WARN_ON(status < 0);
692
693 /* Allow IPIs again... */
694 ops->cppr_info(cpu, DEFAULT_PRIORITY);
695 iosync();
696
697 for_each_irq(virq) {
698 irq_desc_t *desc;
699 int xics_status[2];
700 unsigned long flags;
701
702 /* We cant set affinity on ISA interrupts */
703 if (virq < irq_offset_value())
704 continue;
705
706 desc = get_irq_desc(virq);
707 irq = virt_irq_to_real(irq_offset_down(virq));
708
709 /* We need to get IPIs still. */
710 if (irq == XICS_IPI || irq == NO_IRQ)
711 continue;
712
713 /* We only need to migrate enabled IRQS */
714 if (desc == NULL || desc->handler == NULL
715 || desc->action == NULL
716 || desc->handler->set_affinity == NULL)
717 continue;
718
719 spin_lock_irqsave(&desc->lock, flags);
720
721 status = rtas_call(ibm_get_xive, 1, 3, xics_status, irq);
722 if (status) {
723 printk(KERN_ERR "migrate_irqs_away: irq=%u "
724 "ibm,get-xive returns %d\n",
725 virq, status);
726 goto unlock;
727 }
728
729 /*
730 * We only support delivery to all cpus or to one cpu.
731 * The irq has to be migrated only in the single cpu
732 * case.
733 */
734 if (xics_status[0] != get_hard_smp_processor_id(cpu))
735 goto unlock;
736
737 printk(KERN_WARNING "IRQ %u affinity broken off cpu %u\n",
738 virq, cpu);
739
740 /* Reset affinity to all cpus */
741 desc->handler->set_affinity(virq, CPU_MASK_ALL);
742 irq_affinity[virq] = CPU_MASK_ALL;
743unlock:
744 spin_unlock_irqrestore(&desc->lock, flags);
745 }
746}
747#endif
diff --git a/arch/ppc64/lib/Makefile b/arch/ppc64/lib/Makefile
index 0b6e967de948..42d5295bf345 100644
--- a/arch/ppc64/lib/Makefile
+++ b/arch/ppc64/lib/Makefile
@@ -2,17 +2,4 @@
2# Makefile for ppc64-specific library files.. 2# Makefile for ppc64-specific library files..
3# 3#
4 4
5lib-y := checksum.o string.o strcase.o 5lib-y := string.o
6lib-y += copypage.o memcpy.o copyuser.o usercopy.o
7
8# Lock primitives are defined as no-ops in include/linux/spinlock.h
9# for non-SMP configs. Don't build the real versions.
10
11lib-$(CONFIG_SMP) += locks.o
12
13# e2a provides EBCDIC to ASCII conversions.
14ifdef CONFIG_PPC_ISERIES
15obj-y += e2a.o
16endif
17
18lib-$(CONFIG_DEBUG_KERNEL) += sstep.o
diff --git a/arch/ppc64/lib/checksum.S b/arch/ppc64/lib/checksum.S
deleted file mode 100644
index ef96c6c58efc..000000000000
--- a/arch/ppc64/lib/checksum.S
+++ /dev/null
@@ -1,229 +0,0 @@
1/*
2 * This file contains assembly-language implementations
3 * of IP-style 1's complement checksum routines.
4 *
5 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
11 *
12 * Severely hacked about by Paul Mackerras (paulus@cs.anu.edu.au).
13 */
14
15#include <linux/sys.h>
16#include <asm/processor.h>
17#include <asm/errno.h>
18#include <asm/ppc_asm.h>
19
20/*
21 * ip_fast_csum(r3=buf, r4=len) -- Optimized for IP header
22 * len is in words and is always >= 5.
23 *
24 * In practice len == 5, but this is not guaranteed. So this code does not
25 * attempt to use doubleword instructions.
26 */
27_GLOBAL(ip_fast_csum)
28 lwz r0,0(r3)
29 lwzu r5,4(r3)
30 addic. r4,r4,-2
31 addc r0,r0,r5
32 mtctr r4
33 blelr-
341: lwzu r4,4(r3)
35 adde r0,r0,r4
36 bdnz 1b
37 addze r0,r0 /* add in final carry */
38 rldicl r4,r0,32,0 /* fold two 32-bit halves together */
39 add r0,r0,r4
40 srdi r0,r0,32
41 rlwinm r3,r0,16,0,31 /* fold two halves together */
42 add r3,r0,r3
43 not r3,r3
44 srwi r3,r3,16
45 blr
46
47/*
48 * Compute checksum of TCP or UDP pseudo-header:
49 * csum_tcpudp_magic(r3=saddr, r4=daddr, r5=len, r6=proto, r7=sum)
50 * No real gain trying to do this specially for 64 bit, but
51 * the 32 bit addition may spill into the upper bits of
52 * the doubleword so we still must fold it down from 64.
53 */
54_GLOBAL(csum_tcpudp_magic)
55 rlwimi r5,r6,16,0,15 /* put proto in upper half of len */
56 addc r0,r3,r4 /* add 4 32-bit words together */
57 adde r0,r0,r5
58 adde r0,r0,r7
59 rldicl r4,r0,32,0 /* fold 64 bit value */
60 add r0,r4,r0
61 srdi r0,r0,32
62 rlwinm r3,r0,16,0,31 /* fold two halves together */
63 add r3,r0,r3
64 not r3,r3
65 srwi r3,r3,16
66 blr
67
68/*
69 * Computes the checksum of a memory block at buff, length len,
70 * and adds in "sum" (32-bit).
71 *
72 * This code assumes at least halfword alignment, though the length
73 * can be any number of bytes. The sum is accumulated in r5.
74 *
75 * csum_partial(r3=buff, r4=len, r5=sum)
76 */
77_GLOBAL(csum_partial)
78 subi r3,r3,8 /* we'll offset by 8 for the loads */
79 srdi. r6,r4,3 /* divide by 8 for doubleword count */
80 addic r5,r5,0 /* clear carry */
81 beq 3f /* if we're doing < 8 bytes */
82 andi. r0,r3,2 /* aligned on a word boundary already? */
83 beq+ 1f
84 lhz r6,8(r3) /* do 2 bytes to get aligned */
85 addi r3,r3,2
86 subi r4,r4,2
87 addc r5,r5,r6
88 srdi. r6,r4,3 /* recompute number of doublewords */
89 beq 3f /* any left? */
901: mtctr r6
912: ldu r6,8(r3) /* main sum loop */
92 adde r5,r5,r6
93 bdnz 2b
94 andi. r4,r4,7 /* compute bytes left to sum after doublewords */
953: cmpwi 0,r4,4 /* is at least a full word left? */
96 blt 4f
97 lwz r6,8(r3) /* sum this word */
98 addi r3,r3,4
99 subi r4,r4,4
100 adde r5,r5,r6
1014: cmpwi 0,r4,2 /* is at least a halfword left? */
102 blt+ 5f
103 lhz r6,8(r3) /* sum this halfword */
104 addi r3,r3,2
105 subi r4,r4,2
106 adde r5,r5,r6
1075: cmpwi 0,r4,1 /* is at least a byte left? */
108 bne+ 6f
109 lbz r6,8(r3) /* sum this byte */
110 slwi r6,r6,8 /* this byte is assumed to be the upper byte of a halfword */
111 adde r5,r5,r6
1126: addze r5,r5 /* add in final carry */
113 rldicl r4,r5,32,0 /* fold two 32-bit halves together */
114 add r3,r4,r5
115 srdi r3,r3,32
116 blr
117
118/*
119 * Computes the checksum of a memory block at src, length len,
120 * and adds in "sum" (32-bit), while copying the block to dst.
121 * If an access exception occurs on src or dst, it stores -EFAULT
122 * to *src_err or *dst_err respectively, and (for an error on
123 * src) zeroes the rest of dst.
124 *
125 * This code needs to be reworked to take advantage of 64 bit sum+copy.
126 * However, due to tokenring halfword alignment problems this will be very
127 * tricky. For now we'll leave it until we instrument it somehow.
128 *
129 * csum_partial_copy_generic(r3=src, r4=dst, r5=len, r6=sum, r7=src_err, r8=dst_err)
130 */
131_GLOBAL(csum_partial_copy_generic)
132 addic r0,r6,0
133 subi r3,r3,4
134 subi r4,r4,4
135 srwi. r6,r5,2
136 beq 3f /* if we're doing < 4 bytes */
137 andi. r9,r4,2 /* Align dst to longword boundary */
138 beq+ 1f
13981: lhz r6,4(r3) /* do 2 bytes to get aligned */
140 addi r3,r3,2
141 subi r5,r5,2
14291: sth r6,4(r4)
143 addi r4,r4,2
144 addc r0,r0,r6
145 srwi. r6,r5,2 /* # words to do */
146 beq 3f
1471: mtctr r6
14882: lwzu r6,4(r3) /* the bdnz has zero overhead, so it should */
14992: stwu r6,4(r4) /* be unnecessary to unroll this loop */
150 adde r0,r0,r6
151 bdnz 82b
152 andi. r5,r5,3
1533: cmpwi 0,r5,2
154 blt+ 4f
15583: lhz r6,4(r3)
156 addi r3,r3,2
157 subi r5,r5,2
15893: sth r6,4(r4)
159 addi r4,r4,2
160 adde r0,r0,r6
1614: cmpwi 0,r5,1
162 bne+ 5f
16384: lbz r6,4(r3)
16494: stb r6,4(r4)
165 slwi r6,r6,8 /* Upper byte of word */
166 adde r0,r0,r6
1675: addze r3,r0 /* add in final carry (unlikely with 64-bit regs) */
168 rldicl r4,r3,32,0 /* fold 64 bit value */
169 add r3,r4,r3
170 srdi r3,r3,32
171 blr
172
173/* These shouldn't go in the fixup section, since that would
174 cause the ex_table addresses to get out of order. */
175
176 .globl src_error_1
177src_error_1:
178 li r6,0
179 subi r5,r5,2
18095: sth r6,4(r4)
181 addi r4,r4,2
182 srwi. r6,r5,2
183 beq 3f
184 mtctr r6
185 .globl src_error_2
186src_error_2:
187 li r6,0
18896: stwu r6,4(r4)
189 bdnz 96b
1903: andi. r5,r5,3
191 beq src_error
192 .globl src_error_3
193src_error_3:
194 li r6,0
195 mtctr r5
196 addi r4,r4,3
19797: stbu r6,1(r4)
198 bdnz 97b
199 .globl src_error
200src_error:
201 cmpdi 0,r7,0
202 beq 1f
203 li r6,-EFAULT
204 stw r6,0(r7)
2051: addze r3,r0
206 blr
207
208 .globl dst_error
209dst_error:
210 cmpdi 0,r8,0
211 beq 1f
212 li r6,-EFAULT
213 stw r6,0(r8)
2141: addze r3,r0
215 blr
216
217.section __ex_table,"a"
218 .align 3
219 .llong 81b,src_error_1
220 .llong 91b,dst_error
221 .llong 82b,src_error_2
222 .llong 92b,dst_error
223 .llong 83b,src_error_3
224 .llong 93b,dst_error
225 .llong 84b,src_error_3
226 .llong 94b,dst_error
227 .llong 95b,dst_error
228 .llong 96b,dst_error
229 .llong 97b,dst_error
diff --git a/arch/ppc64/lib/copypage.S b/arch/ppc64/lib/copypage.S
deleted file mode 100644
index 733d61618bbf..000000000000
--- a/arch/ppc64/lib/copypage.S
+++ /dev/null
@@ -1,121 +0,0 @@
1/*
2 * arch/ppc64/lib/copypage.S
3 *
4 * Copyright (C) 2002 Paul Mackerras, IBM Corp.
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11#include <asm/processor.h>
12#include <asm/ppc_asm.h>
13
14_GLOBAL(copy_page)
15 std r31,-8(1)
16 std r30,-16(1)
17 std r29,-24(1)
18 std r28,-32(1)
19 std r27,-40(1)
20 std r26,-48(1)
21 std r25,-56(1)
22 std r24,-64(1)
23 std r23,-72(1)
24 std r22,-80(1)
25 std r21,-88(1)
26 std r20,-96(1)
27 li r5,4096/32 - 1
28 addi r3,r3,-8
29 li r12,5
300: addi r5,r5,-24
31 mtctr r12
32 ld r22,640(4)
33 ld r21,512(4)
34 ld r20,384(4)
35 ld r11,256(4)
36 ld r9,128(4)
37 ld r7,0(4)
38 ld r25,648(4)
39 ld r24,520(4)
40 ld r23,392(4)
41 ld r10,264(4)
42 ld r8,136(4)
43 ldu r6,8(4)
44 cmpwi r5,24
451: std r22,648(3)
46 std r21,520(3)
47 std r20,392(3)
48 std r11,264(3)
49 std r9,136(3)
50 std r7,8(3)
51 ld r28,648(4)
52 ld r27,520(4)
53 ld r26,392(4)
54 ld r31,264(4)
55 ld r30,136(4)
56 ld r29,8(4)
57 std r25,656(3)
58 std r24,528(3)
59 std r23,400(3)
60 std r10,272(3)
61 std r8,144(3)
62 std r6,16(3)
63 ld r22,656(4)
64 ld r21,528(4)
65 ld r20,400(4)
66 ld r11,272(4)
67 ld r9,144(4)
68 ld r7,16(4)
69 std r28,664(3)
70 std r27,536(3)
71 std r26,408(3)
72 std r31,280(3)
73 std r30,152(3)
74 stdu r29,24(3)
75 ld r25,664(4)
76 ld r24,536(4)
77 ld r23,408(4)
78 ld r10,280(4)
79 ld r8,152(4)
80 ldu r6,24(4)
81 bdnz 1b
82 std r22,648(3)
83 std r21,520(3)
84 std r20,392(3)
85 std r11,264(3)
86 std r9,136(3)
87 std r7,8(3)
88 addi r4,r4,640
89 addi r3,r3,648
90 bge 0b
91 mtctr r5
92 ld r7,0(4)
93 ld r8,8(4)
94 ldu r9,16(4)
953: ld r10,8(4)
96 std r7,8(3)
97 ld r7,16(4)
98 std r8,16(3)
99 ld r8,24(4)
100 std r9,24(3)
101 ldu r9,32(4)
102 stdu r10,32(3)
103 bdnz 3b
1044: ld r10,8(4)
105 std r7,8(3)
106 std r8,16(3)
107 std r9,24(3)
108 std r10,32(3)
1099: ld r20,-96(1)
110 ld r21,-88(1)
111 ld r22,-80(1)
112 ld r23,-72(1)
113 ld r24,-64(1)
114 ld r25,-56(1)
115 ld r26,-48(1)
116 ld r27,-40(1)
117 ld r28,-32(1)
118 ld r29,-24(1)
119 ld r30,-16(1)
120 ld r31,-8(1)
121 blr
diff --git a/arch/ppc64/lib/copyuser.S b/arch/ppc64/lib/copyuser.S
deleted file mode 100644
index a0b3fbbd6fb1..000000000000
--- a/arch/ppc64/lib/copyuser.S
+++ /dev/null
@@ -1,576 +0,0 @@
1/*
2 * arch/ppc64/lib/copyuser.S
3 *
4 * Copyright (C) 2002 Paul Mackerras, IBM Corp.
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11#include <asm/processor.h>
12#include <asm/ppc_asm.h>
13
14 .align 7
15_GLOBAL(__copy_tofrom_user)
16 /* first check for a whole page copy on a page boundary */
17 cmpldi cr1,r5,16
18 cmpdi cr6,r5,4096
19 or r0,r3,r4
20 neg r6,r3 /* LS 3 bits = # bytes to 8-byte dest bdry */
21 andi. r0,r0,4095
22 std r3,-24(r1)
23 crand cr0*4+2,cr0*4+2,cr6*4+2
24 std r4,-16(r1)
25 std r5,-8(r1)
26 dcbt 0,r4
27 beq .Lcopy_page
28 andi. r6,r6,7
29 mtcrf 0x01,r5
30 blt cr1,.Lshort_copy
31 bne .Ldst_unaligned
32.Ldst_aligned:
33 andi. r0,r4,7
34 addi r3,r3,-16
35 bne .Lsrc_unaligned
36 srdi r7,r5,4
3720: ld r9,0(r4)
38 addi r4,r4,-8
39 mtctr r7
40 andi. r5,r5,7
41 bf cr7*4+0,22f
42 addi r3,r3,8
43 addi r4,r4,8
44 mr r8,r9
45 blt cr1,72f
4621: ld r9,8(r4)
4770: std r8,8(r3)
4822: ldu r8,16(r4)
4971: stdu r9,16(r3)
50 bdnz 21b
5172: std r8,8(r3)
52 beq+ 3f
53 addi r3,r3,16
5423: ld r9,8(r4)
55.Ldo_tail:
56 bf cr7*4+1,1f
57 rotldi r9,r9,32
5873: stw r9,0(r3)
59 addi r3,r3,4
601: bf cr7*4+2,2f
61 rotldi r9,r9,16
6274: sth r9,0(r3)
63 addi r3,r3,2
642: bf cr7*4+3,3f
65 rotldi r9,r9,8
6675: stb r9,0(r3)
673: li r3,0
68 blr
69
70.Lsrc_unaligned:
71 srdi r6,r5,3
72 addi r5,r5,-16
73 subf r4,r0,r4
74 srdi r7,r5,4
75 sldi r10,r0,3
76 cmpldi cr6,r6,3
77 andi. r5,r5,7
78 mtctr r7
79 subfic r11,r10,64
80 add r5,r5,r0
81 bt cr7*4+0,28f
82
8324: ld r9,0(r4) /* 3+2n loads, 2+2n stores */
8425: ld r0,8(r4)
85 sld r6,r9,r10
8626: ldu r9,16(r4)
87 srd r7,r0,r11
88 sld r8,r0,r10
89 or r7,r7,r6
90 blt cr6,79f
9127: ld r0,8(r4)
92 b 2f
93
9428: ld r0,0(r4) /* 4+2n loads, 3+2n stores */
9529: ldu r9,8(r4)
96 sld r8,r0,r10
97 addi r3,r3,-8
98 blt cr6,5f
9930: ld r0,8(r4)
100 srd r12,r9,r11
101 sld r6,r9,r10
10231: ldu r9,16(r4)
103 or r12,r8,r12
104 srd r7,r0,r11
105 sld r8,r0,r10
106 addi r3,r3,16
107 beq cr6,78f
108
1091: or r7,r7,r6
11032: ld r0,8(r4)
11176: std r12,8(r3)
1122: srd r12,r9,r11
113 sld r6,r9,r10
11433: ldu r9,16(r4)
115 or r12,r8,r12
11677: stdu r7,16(r3)
117 srd r7,r0,r11
118 sld r8,r0,r10
119 bdnz 1b
120
12178: std r12,8(r3)
122 or r7,r7,r6
12379: std r7,16(r3)
1245: srd r12,r9,r11
125 or r12,r8,r12
12680: std r12,24(r3)
127 bne 6f
128 li r3,0
129 blr
1306: cmpwi cr1,r5,8
131 addi r3,r3,32
132 sld r9,r9,r10
133 ble cr1,.Ldo_tail
13434: ld r0,8(r4)
135 srd r7,r0,r11
136 or r9,r7,r9
137 b .Ldo_tail
138
139.Ldst_unaligned:
140 mtcrf 0x01,r6 /* put #bytes to 8B bdry into cr7 */
141 subf r5,r6,r5
142 li r7,0
143 cmpldi r1,r5,16
144 bf cr7*4+3,1f
14535: lbz r0,0(r4)
14681: stb r0,0(r3)
147 addi r7,r7,1
1481: bf cr7*4+2,2f
14936: lhzx r0,r7,r4
15082: sthx r0,r7,r3
151 addi r7,r7,2
1522: bf cr7*4+1,3f
15337: lwzx r0,r7,r4
15483: stwx r0,r7,r3
1553: mtcrf 0x01,r5
156 add r4,r6,r4
157 add r3,r6,r3
158 b .Ldst_aligned
159
160.Lshort_copy:
161 bf cr7*4+0,1f
16238: lwz r0,0(r4)
16339: lwz r9,4(r4)
164 addi r4,r4,8
16584: stw r0,0(r3)
16685: stw r9,4(r3)
167 addi r3,r3,8
1681: bf cr7*4+1,2f
16940: lwz r0,0(r4)
170 addi r4,r4,4
17186: stw r0,0(r3)
172 addi r3,r3,4
1732: bf cr7*4+2,3f
17441: lhz r0,0(r4)
175 addi r4,r4,2
17687: sth r0,0(r3)
177 addi r3,r3,2
1783: bf cr7*4+3,4f
17942: lbz r0,0(r4)
18088: stb r0,0(r3)
1814: li r3,0
182 blr
183
184/*
185 * exception handlers follow
186 * we have to return the number of bytes not copied
187 * for an exception on a load, we set the rest of the destination to 0
188 */
189
190136:
191137:
192 add r3,r3,r7
193 b 1f
194130:
195131:
196 addi r3,r3,8
197120:
198122:
199124:
200125:
201126:
202127:
203128:
204129:
205133:
206 addi r3,r3,8
207121:
208132:
209 addi r3,r3,8
210123:
211134:
212135:
213138:
214139:
215140:
216141:
217142:
218
219/*
220 * here we have had a fault on a load and r3 points to the first
221 * unmodified byte of the destination
222 */
2231: ld r6,-24(r1)
224 ld r4,-16(r1)
225 ld r5,-8(r1)
226 subf r6,r6,r3
227 add r4,r4,r6
228 subf r5,r6,r5 /* #bytes left to go */
229
230/*
231 * first see if we can copy any more bytes before hitting another exception
232 */
233 mtctr r5
23443: lbz r0,0(r4)
235 addi r4,r4,1
23689: stb r0,0(r3)
237 addi r3,r3,1
238 bdnz 43b
239 li r3,0 /* huh? all copied successfully this time? */
240 blr
241
242/*
243 * here we have trapped again, need to clear ctr bytes starting at r3
244 */
245143: mfctr r5
246 li r0,0
247 mr r4,r3
248 mr r3,r5 /* return the number of bytes not copied */
2491: andi. r9,r4,7
250 beq 3f
25190: stb r0,0(r4)
252 addic. r5,r5,-1
253 addi r4,r4,1
254 bne 1b
255 blr
2563: cmpldi cr1,r5,8
257 srdi r9,r5,3
258 andi. r5,r5,7
259 blt cr1,93f
260 mtctr r9
26191: std r0,0(r4)
262 addi r4,r4,8
263 bdnz 91b
26493: beqlr
265 mtctr r5
26692: stb r0,0(r4)
267 addi r4,r4,1
268 bdnz 92b
269 blr
270
271/*
272 * exception handlers for stores: we just need to work
273 * out how many bytes weren't copied
274 */
275182:
276183:
277 add r3,r3,r7
278 b 1f
279180:
280 addi r3,r3,8
281171:
282177:
283 addi r3,r3,8
284170:
285172:
286176:
287178:
288 addi r3,r3,4
289185:
290 addi r3,r3,4
291173:
292174:
293175:
294179:
295181:
296184:
297186:
298187:
299188:
300189:
3011:
302 ld r6,-24(r1)
303 ld r5,-8(r1)
304 add r6,r6,r5
305 subf r3,r3,r6 /* #bytes not copied */
306190:
307191:
308192:
309 blr /* #bytes not copied in r3 */
310
311 .section __ex_table,"a"
312 .align 3
313 .llong 20b,120b
314 .llong 21b,121b
315 .llong 70b,170b
316 .llong 22b,122b
317 .llong 71b,171b
318 .llong 72b,172b
319 .llong 23b,123b
320 .llong 73b,173b
321 .llong 74b,174b
322 .llong 75b,175b
323 .llong 24b,124b
324 .llong 25b,125b
325 .llong 26b,126b
326 .llong 27b,127b
327 .llong 28b,128b
328 .llong 29b,129b
329 .llong 30b,130b
330 .llong 31b,131b
331 .llong 32b,132b
332 .llong 76b,176b
333 .llong 33b,133b
334 .llong 77b,177b
335 .llong 78b,178b
336 .llong 79b,179b
337 .llong 80b,180b
338 .llong 34b,134b
339 .llong 35b,135b
340 .llong 81b,181b
341 .llong 36b,136b
342 .llong 82b,182b
343 .llong 37b,137b
344 .llong 83b,183b
345 .llong 38b,138b
346 .llong 39b,139b
347 .llong 84b,184b
348 .llong 85b,185b
349 .llong 40b,140b
350 .llong 86b,186b
351 .llong 41b,141b
352 .llong 87b,187b
353 .llong 42b,142b
354 .llong 88b,188b
355 .llong 43b,143b
356 .llong 89b,189b
357 .llong 90b,190b
358 .llong 91b,191b
359 .llong 92b,192b
360
361 .text
362
363/*
364 * Routine to copy a whole page of data, optimized for POWER4.
365 * On POWER4 it is more than 50% faster than the simple loop
366 * above (following the .Ldst_aligned label) but it runs slightly
367 * slower on POWER3.
368 */
369.Lcopy_page:
370 std r31,-32(1)
371 std r30,-40(1)
372 std r29,-48(1)
373 std r28,-56(1)
374 std r27,-64(1)
375 std r26,-72(1)
376 std r25,-80(1)
377 std r24,-88(1)
378 std r23,-96(1)
379 std r22,-104(1)
380 std r21,-112(1)
381 std r20,-120(1)
382 li r5,4096/32 - 1
383 addi r3,r3,-8
384 li r0,5
3850: addi r5,r5,-24
386 mtctr r0
38720: ld r22,640(4)
38821: ld r21,512(4)
38922: ld r20,384(4)
39023: ld r11,256(4)
39124: ld r9,128(4)
39225: ld r7,0(4)
39326: ld r25,648(4)
39427: ld r24,520(4)
39528: ld r23,392(4)
39629: ld r10,264(4)
39730: ld r8,136(4)
39831: ldu r6,8(4)
399 cmpwi r5,24
4001:
40132: std r22,648(3)
40233: std r21,520(3)
40334: std r20,392(3)
40435: std r11,264(3)
40536: std r9,136(3)
40637: std r7,8(3)
40738: ld r28,648(4)
40839: ld r27,520(4)
40940: ld r26,392(4)
41041: ld r31,264(4)
41142: ld r30,136(4)
41243: ld r29,8(4)
41344: std r25,656(3)
41445: std r24,528(3)
41546: std r23,400(3)
41647: std r10,272(3)
41748: std r8,144(3)
41849: std r6,16(3)
41950: ld r22,656(4)
42051: ld r21,528(4)
42152: ld r20,400(4)
42253: ld r11,272(4)
42354: ld r9,144(4)
42455: ld r7,16(4)
42556: std r28,664(3)
42657: std r27,536(3)
42758: std r26,408(3)
42859: std r31,280(3)
42960: std r30,152(3)
43061: stdu r29,24(3)
43162: ld r25,664(4)
43263: ld r24,536(4)
43364: ld r23,408(4)
43465: ld r10,280(4)
43566: ld r8,152(4)
43667: ldu r6,24(4)
437 bdnz 1b
43868: std r22,648(3)
43969: std r21,520(3)
44070: std r20,392(3)
44171: std r11,264(3)
44272: std r9,136(3)
44373: std r7,8(3)
44474: addi r4,r4,640
44575: addi r3,r3,648
446 bge 0b
447 mtctr r5
44876: ld r7,0(4)
44977: ld r8,8(4)
45078: ldu r9,16(4)
4513:
45279: ld r10,8(4)
45380: std r7,8(3)
45481: ld r7,16(4)
45582: std r8,16(3)
45683: ld r8,24(4)
45784: std r9,24(3)
45885: ldu r9,32(4)
45986: stdu r10,32(3)
460 bdnz 3b
4614:
46287: ld r10,8(4)
46388: std r7,8(3)
46489: std r8,16(3)
46590: std r9,24(3)
46691: std r10,32(3)
4679: ld r20,-120(1)
468 ld r21,-112(1)
469 ld r22,-104(1)
470 ld r23,-96(1)
471 ld r24,-88(1)
472 ld r25,-80(1)
473 ld r26,-72(1)
474 ld r27,-64(1)
475 ld r28,-56(1)
476 ld r29,-48(1)
477 ld r30,-40(1)
478 ld r31,-32(1)
479 li r3,0
480 blr
481
482/*
483 * on an exception, reset to the beginning and jump back into the
484 * standard __copy_tofrom_user
485 */
486100: ld r20,-120(1)
487 ld r21,-112(1)
488 ld r22,-104(1)
489 ld r23,-96(1)
490 ld r24,-88(1)
491 ld r25,-80(1)
492 ld r26,-72(1)
493 ld r27,-64(1)
494 ld r28,-56(1)
495 ld r29,-48(1)
496 ld r30,-40(1)
497 ld r31,-32(1)
498 ld r3,-24(r1)
499 ld r4,-16(r1)
500 li r5,4096
501 b .Ldst_aligned
502
503 .section __ex_table,"a"
504 .align 3
505 .llong 20b,100b
506 .llong 21b,100b
507 .llong 22b,100b
508 .llong 23b,100b
509 .llong 24b,100b
510 .llong 25b,100b
511 .llong 26b,100b
512 .llong 27b,100b
513 .llong 28b,100b
514 .llong 29b,100b
515 .llong 30b,100b
516 .llong 31b,100b
517 .llong 32b,100b
518 .llong 33b,100b
519 .llong 34b,100b
520 .llong 35b,100b
521 .llong 36b,100b
522 .llong 37b,100b
523 .llong 38b,100b
524 .llong 39b,100b
525 .llong 40b,100b
526 .llong 41b,100b
527 .llong 42b,100b
528 .llong 43b,100b
529 .llong 44b,100b
530 .llong 45b,100b
531 .llong 46b,100b
532 .llong 47b,100b
533 .llong 48b,100b
534 .llong 49b,100b
535 .llong 50b,100b
536 .llong 51b,100b
537 .llong 52b,100b
538 .llong 53b,100b
539 .llong 54b,100b
540 .llong 55b,100b
541 .llong 56b,100b
542 .llong 57b,100b
543 .llong 58b,100b
544 .llong 59b,100b
545 .llong 60b,100b
546 .llong 61b,100b
547 .llong 62b,100b
548 .llong 63b,100b
549 .llong 64b,100b
550 .llong 65b,100b
551 .llong 66b,100b
552 .llong 67b,100b
553 .llong 68b,100b
554 .llong 69b,100b
555 .llong 70b,100b
556 .llong 71b,100b
557 .llong 72b,100b
558 .llong 73b,100b
559 .llong 74b,100b
560 .llong 75b,100b
561 .llong 76b,100b
562 .llong 77b,100b
563 .llong 78b,100b
564 .llong 79b,100b
565 .llong 80b,100b
566 .llong 81b,100b
567 .llong 82b,100b
568 .llong 83b,100b
569 .llong 84b,100b
570 .llong 85b,100b
571 .llong 86b,100b
572 .llong 87b,100b
573 .llong 88b,100b
574 .llong 89b,100b
575 .llong 90b,100b
576 .llong 91b,100b
diff --git a/arch/ppc64/lib/e2a.c b/arch/ppc64/lib/e2a.c
deleted file mode 100644
index d2b834887920..000000000000
--- a/arch/ppc64/lib/e2a.c
+++ /dev/null
@@ -1,108 +0,0 @@
1/*
2 * arch/ppc64/lib/e2a.c
3 *
4 * EBCDIC to ASCII conversion
5 *
6 * This function moved here from arch/ppc64/kernel/viopath.c
7 *
8 * (C) Copyright 2000-2004 IBM Corporation
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License as
12 * published by the Free Software Foundation; either version 2 of the
13 * License, or (at your option) anyu later version.
14 *
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software Foundation,
22 * Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 *
24 */
25
26#include <linux/module.h>
27
28unsigned char e2a(unsigned char x)
29{
30 switch (x) {
31 case 0xF0:
32 return '0';
33 case 0xF1:
34 return '1';
35 case 0xF2:
36 return '2';
37 case 0xF3:
38 return '3';
39 case 0xF4:
40 return '4';
41 case 0xF5:
42 return '5';
43 case 0xF6:
44 return '6';
45 case 0xF7:
46 return '7';
47 case 0xF8:
48 return '8';
49 case 0xF9:
50 return '9';
51 case 0xC1:
52 return 'A';
53 case 0xC2:
54 return 'B';
55 case 0xC3:
56 return 'C';
57 case 0xC4:
58 return 'D';
59 case 0xC5:
60 return 'E';
61 case 0xC6:
62 return 'F';
63 case 0xC7:
64 return 'G';
65 case 0xC8:
66 return 'H';
67 case 0xC9:
68 return 'I';
69 case 0xD1:
70 return 'J';
71 case 0xD2:
72 return 'K';
73 case 0xD3:
74 return 'L';
75 case 0xD4:
76 return 'M';
77 case 0xD5:
78 return 'N';
79 case 0xD6:
80 return 'O';
81 case 0xD7:
82 return 'P';
83 case 0xD8:
84 return 'Q';
85 case 0xD9:
86 return 'R';
87 case 0xE2:
88 return 'S';
89 case 0xE3:
90 return 'T';
91 case 0xE4:
92 return 'U';
93 case 0xE5:
94 return 'V';
95 case 0xE6:
96 return 'W';
97 case 0xE7:
98 return 'X';
99 case 0xE8:
100 return 'Y';
101 case 0xE9:
102 return 'Z';
103 }
104 return ' ';
105}
106EXPORT_SYMBOL(e2a);
107
108
diff --git a/arch/ppc64/lib/locks.c b/arch/ppc64/lib/locks.c
deleted file mode 100644
index 033643ab69e0..000000000000
--- a/arch/ppc64/lib/locks.c
+++ /dev/null
@@ -1,95 +0,0 @@
1/*
2 * Spin and read/write lock operations.
3 *
4 * Copyright (C) 2001-2004 Paul Mackerras <paulus@au.ibm.com>, IBM
5 * Copyright (C) 2001 Anton Blanchard <anton@au.ibm.com>, IBM
6 * Copyright (C) 2002 Dave Engebretsen <engebret@us.ibm.com>, IBM
7 * Rework to support virtual processors
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; either version
12 * 2 of the License, or (at your option) any later version.
13 */
14
15#include <linux/config.h>
16#include <linux/kernel.h>
17#include <linux/spinlock.h>
18#include <linux/module.h>
19#include <linux/stringify.h>
20#include <asm/hvcall.h>
21#include <asm/iSeries/HvCall.h>
22
23/* waiting for a spinlock... */
24#if defined(CONFIG_PPC_SPLPAR) || defined(CONFIG_PPC_ISERIES)
25
26void __spin_yield(raw_spinlock_t *lock)
27{
28 unsigned int lock_value, holder_cpu, yield_count;
29 struct paca_struct *holder_paca;
30
31 lock_value = lock->slock;
32 if (lock_value == 0)
33 return;
34 holder_cpu = lock_value & 0xffff;
35 BUG_ON(holder_cpu >= NR_CPUS);
36 holder_paca = &paca[holder_cpu];
37 yield_count = holder_paca->lppaca.yield_count;
38 if ((yield_count & 1) == 0)
39 return; /* virtual cpu is currently running */
40 rmb();
41 if (lock->slock != lock_value)
42 return; /* something has changed */
43#ifdef CONFIG_PPC_ISERIES
44 HvCall2(HvCallBaseYieldProcessor, HvCall_YieldToProc,
45 ((u64)holder_cpu << 32) | yield_count);
46#else
47 plpar_hcall_norets(H_CONFER, get_hard_smp_processor_id(holder_cpu),
48 yield_count);
49#endif
50}
51
52/*
53 * Waiting for a read lock or a write lock on a rwlock...
54 * This turns out to be the same for read and write locks, since
55 * we only know the holder if it is write-locked.
56 */
57void __rw_yield(raw_rwlock_t *rw)
58{
59 int lock_value;
60 unsigned int holder_cpu, yield_count;
61 struct paca_struct *holder_paca;
62
63 lock_value = rw->lock;
64 if (lock_value >= 0)
65 return; /* no write lock at present */
66 holder_cpu = lock_value & 0xffff;
67 BUG_ON(holder_cpu >= NR_CPUS);
68 holder_paca = &paca[holder_cpu];
69 yield_count = holder_paca->lppaca.yield_count;
70 if ((yield_count & 1) == 0)
71 return; /* virtual cpu is currently running */
72 rmb();
73 if (rw->lock != lock_value)
74 return; /* something has changed */
75#ifdef CONFIG_PPC_ISERIES
76 HvCall2(HvCallBaseYieldProcessor, HvCall_YieldToProc,
77 ((u64)holder_cpu << 32) | yield_count);
78#else
79 plpar_hcall_norets(H_CONFER, get_hard_smp_processor_id(holder_cpu),
80 yield_count);
81#endif
82}
83#endif
84
85void __raw_spin_unlock_wait(raw_spinlock_t *lock)
86{
87 while (lock->slock) {
88 HMT_low();
89 if (SHARED_PROCESSOR)
90 __spin_yield(lock);
91 }
92 HMT_medium();
93}
94
95EXPORT_SYMBOL(__raw_spin_unlock_wait);
diff --git a/arch/ppc64/lib/memcpy.S b/arch/ppc64/lib/memcpy.S
deleted file mode 100644
index 9ccacdf5bcb9..000000000000
--- a/arch/ppc64/lib/memcpy.S
+++ /dev/null
@@ -1,172 +0,0 @@
1/*
2 * arch/ppc64/lib/memcpy.S
3 *
4 * Copyright (C) 2002 Paul Mackerras, IBM Corp.
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11#include <asm/processor.h>
12#include <asm/ppc_asm.h>
13
14 .align 7
15_GLOBAL(memcpy)
16 mtcrf 0x01,r5
17 cmpldi cr1,r5,16
18 neg r6,r3 # LS 3 bits = # bytes to 8-byte dest bdry
19 andi. r6,r6,7
20 dcbt 0,r4
21 blt cr1,.Lshort_copy
22 bne .Ldst_unaligned
23.Ldst_aligned:
24 andi. r0,r4,7
25 addi r3,r3,-16
26 bne .Lsrc_unaligned
27 srdi r7,r5,4
28 ld r9,0(r4)
29 addi r4,r4,-8
30 mtctr r7
31 andi. r5,r5,7
32 bf cr7*4+0,2f
33 addi r3,r3,8
34 addi r4,r4,8
35 mr r8,r9
36 blt cr1,3f
371: ld r9,8(r4)
38 std r8,8(r3)
392: ldu r8,16(r4)
40 stdu r9,16(r3)
41 bdnz 1b
423: std r8,8(r3)
43 beqlr
44 addi r3,r3,16
45 ld r9,8(r4)
46.Ldo_tail:
47 bf cr7*4+1,1f
48 rotldi r9,r9,32
49 stw r9,0(r3)
50 addi r3,r3,4
511: bf cr7*4+2,2f
52 rotldi r9,r9,16
53 sth r9,0(r3)
54 addi r3,r3,2
552: bf cr7*4+3,3f
56 rotldi r9,r9,8
57 stb r9,0(r3)
583: blr
59
60.Lsrc_unaligned:
61 srdi r6,r5,3
62 addi r5,r5,-16
63 subf r4,r0,r4
64 srdi r7,r5,4
65 sldi r10,r0,3
66 cmpdi cr6,r6,3
67 andi. r5,r5,7
68 mtctr r7
69 subfic r11,r10,64
70 add r5,r5,r0
71
72 bt cr7*4+0,0f
73
74 ld r9,0(r4) # 3+2n loads, 2+2n stores
75 ld r0,8(r4)
76 sld r6,r9,r10
77 ldu r9,16(r4)
78 srd r7,r0,r11
79 sld r8,r0,r10
80 or r7,r7,r6
81 blt cr6,4f
82 ld r0,8(r4)
83 # s1<< in r8, d0=(s0<<|s1>>) in r7, s3 in r0, s2 in r9, nix in r6 & r12
84 b 2f
85
860: ld r0,0(r4) # 4+2n loads, 3+2n stores
87 ldu r9,8(r4)
88 sld r8,r0,r10
89 addi r3,r3,-8
90 blt cr6,5f
91 ld r0,8(r4)
92 srd r12,r9,r11
93 sld r6,r9,r10
94 ldu r9,16(r4)
95 or r12,r8,r12
96 srd r7,r0,r11
97 sld r8,r0,r10
98 addi r3,r3,16
99 beq cr6,3f
100
101 # d0=(s0<<|s1>>) in r12, s1<< in r6, s2>> in r7, s2<< in r8, s3 in r9
1021: or r7,r7,r6
103 ld r0,8(r4)
104 std r12,8(r3)
1052: srd r12,r9,r11
106 sld r6,r9,r10
107 ldu r9,16(r4)
108 or r12,r8,r12
109 stdu r7,16(r3)
110 srd r7,r0,r11
111 sld r8,r0,r10
112 bdnz 1b
113
1143: std r12,8(r3)
115 or r7,r7,r6
1164: std r7,16(r3)
1175: srd r12,r9,r11
118 or r12,r8,r12
119 std r12,24(r3)
120 beqlr
121 cmpwi cr1,r5,8
122 addi r3,r3,32
123 sld r9,r9,r10
124 ble cr1,.Ldo_tail
125 ld r0,8(r4)
126 srd r7,r0,r11
127 or r9,r7,r9
128 b .Ldo_tail
129
130.Ldst_unaligned:
131 mtcrf 0x01,r6 # put #bytes to 8B bdry into cr7
132 subf r5,r6,r5
133 li r7,0
134 cmpldi r1,r5,16
135 bf cr7*4+3,1f
136 lbz r0,0(r4)
137 stb r0,0(r3)
138 addi r7,r7,1
1391: bf cr7*4+2,2f
140 lhzx r0,r7,r4
141 sthx r0,r7,r3
142 addi r7,r7,2
1432: bf cr7*4+1,3f
144 lwzx r0,r7,r4
145 stwx r0,r7,r3
1463: mtcrf 0x01,r5
147 add r4,r6,r4
148 add r3,r6,r3
149 b .Ldst_aligned
150
151.Lshort_copy:
152 bf cr7*4+0,1f
153 lwz r0,0(r4)
154 lwz r9,4(r4)
155 addi r4,r4,8
156 stw r0,0(r3)
157 stw r9,4(r3)
158 addi r3,r3,8
1591: bf cr7*4+1,2f
160 lwz r0,0(r4)
161 addi r4,r4,4
162 stw r0,0(r3)
163 addi r3,r3,4
1642: bf cr7*4+2,3f
165 lhz r0,0(r4)
166 addi r4,r4,2
167 sth r0,0(r3)
168 addi r3,r3,2
1693: bf cr7*4+3,4f
170 lbz r0,0(r4)
171 stb r0,0(r3)
1724: blr
diff --git a/arch/ppc64/lib/sstep.c b/arch/ppc64/lib/sstep.c
deleted file mode 100644
index e79123d1485c..000000000000
--- a/arch/ppc64/lib/sstep.c
+++ /dev/null
@@ -1,141 +0,0 @@
1/*
2 * Single-step support.
3 *
4 * Copyright (C) 2004 Paul Mackerras <paulus@au.ibm.com>, IBM
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11#include <linux/kernel.h>
12#include <linux/ptrace.h>
13#include <asm/sstep.h>
14#include <asm/processor.h>
15
16extern char system_call_common[];
17
18/* Bits in SRR1 that are copied from MSR */
19#define MSR_MASK 0xffffffff87c0ffff
20
21/*
22 * Determine whether a conditional branch instruction would branch.
23 */
24static int branch_taken(unsigned int instr, struct pt_regs *regs)
25{
26 unsigned int bo = (instr >> 21) & 0x1f;
27 unsigned int bi;
28
29 if ((bo & 4) == 0) {
30 /* decrement counter */
31 --regs->ctr;
32 if (((bo >> 1) & 1) ^ (regs->ctr == 0))
33 return 0;
34 }
35 if ((bo & 0x10) == 0) {
36 /* check bit from CR */
37 bi = (instr >> 16) & 0x1f;
38 if (((regs->ccr >> (31 - bi)) & 1) != ((bo >> 3) & 1))
39 return 0;
40 }
41 return 1;
42}
43
44/*
45 * Emulate instructions that cause a transfer of control.
46 * Returns 1 if the step was emulated, 0 if not,
47 * or -1 if the instruction is one that should not be stepped,
48 * such as an rfid, or a mtmsrd that would clear MSR_RI.
49 */
50int emulate_step(struct pt_regs *regs, unsigned int instr)
51{
52 unsigned int opcode, rd;
53 unsigned long int imm;
54
55 opcode = instr >> 26;
56 switch (opcode) {
57 case 16: /* bc */
58 imm = (signed short)(instr & 0xfffc);
59 if ((instr & 2) == 0)
60 imm += regs->nip;
61 regs->nip += 4;
62 if ((regs->msr & MSR_SF) == 0)
63 regs->nip &= 0xffffffffUL;
64 if (instr & 1)
65 regs->link = regs->nip;
66 if (branch_taken(instr, regs))
67 regs->nip = imm;
68 return 1;
69 case 17: /* sc */
70 /*
71 * N.B. this uses knowledge about how the syscall
72 * entry code works. If that is changed, this will
73 * need to be changed also.
74 */
75 regs->gpr[9] = regs->gpr[13];
76 regs->gpr[11] = regs->nip + 4;
77 regs->gpr[12] = regs->msr & MSR_MASK;
78 regs->gpr[13] = (unsigned long) get_paca();
79 regs->nip = (unsigned long) &system_call_common;
80 regs->msr = MSR_KERNEL;
81 return 1;
82 case 18: /* b */
83 imm = instr & 0x03fffffc;
84 if (imm & 0x02000000)
85 imm -= 0x04000000;
86 if ((instr & 2) == 0)
87 imm += regs->nip;
88 if (instr & 1) {
89 regs->link = regs->nip + 4;
90 if ((regs->msr & MSR_SF) == 0)
91 regs->link &= 0xffffffffUL;
92 }
93 if ((regs->msr & MSR_SF) == 0)
94 imm &= 0xffffffffUL;
95 regs->nip = imm;
96 return 1;
97 case 19:
98 switch (instr & 0x7fe) {
99 case 0x20: /* bclr */
100 case 0x420: /* bcctr */
101 imm = (instr & 0x400)? regs->ctr: regs->link;
102 regs->nip += 4;
103 if ((regs->msr & MSR_SF) == 0) {
104 regs->nip &= 0xffffffffUL;
105 imm &= 0xffffffffUL;
106 }
107 if (instr & 1)
108 regs->link = regs->nip;
109 if (branch_taken(instr, regs))
110 regs->nip = imm;
111 return 1;
112 case 0x24: /* rfid, scary */
113 return -1;
114 }
115 case 31:
116 rd = (instr >> 21) & 0x1f;
117 switch (instr & 0x7fe) {
118 case 0xa6: /* mfmsr */
119 regs->gpr[rd] = regs->msr & MSR_MASK;
120 regs->nip += 4;
121 if ((regs->msr & MSR_SF) == 0)
122 regs->nip &= 0xffffffffUL;
123 return 1;
124 case 0x164: /* mtmsrd */
125 /* only MSR_EE and MSR_RI get changed if bit 15 set */
126 /* mtmsrd doesn't change MSR_HV and MSR_ME */
127 imm = (instr & 0x10000)? 0x8002: 0xefffffffffffefffUL;
128 imm = (regs->msr & MSR_MASK & ~imm)
129 | (regs->gpr[rd] & imm);
130 if ((imm & MSR_RI) == 0)
131 /* can't step mtmsrd that would clear MSR_RI */
132 return -1;
133 regs->msr = imm;
134 regs->nip += 4;
135 if ((imm & MSR_SF) == 0)
136 regs->nip &= 0xffffffffUL;
137 return 1;
138 }
139 }
140 return 0;
141}
diff --git a/arch/ppc64/lib/strcase.c b/arch/ppc64/lib/strcase.c
deleted file mode 100644
index e84f243368c0..000000000000
--- a/arch/ppc64/lib/strcase.c
+++ /dev/null
@@ -1,31 +0,0 @@
1/*
2 * c 2001 PPC 64 Team, IBM Corp
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 */
9#include <linux/ctype.h>
10
11int strcasecmp(const char *s1, const char *s2)
12{
13 int c1, c2;
14
15 do {
16 c1 = tolower(*s1++);
17 c2 = tolower(*s2++);
18 } while (c1 == c2 && c1 != 0);
19 return c1 - c2;
20}
21
22int strncasecmp(const char *s1, const char *s2, int n)
23{
24 int c1, c2;
25
26 do {
27 c1 = tolower(*s1++);
28 c2 = tolower(*s2++);
29 } while ((--n > 0) && c1 == c2 && c1 != 0);
30 return c1 - c2;
31}
diff --git a/arch/ppc64/lib/string.S b/arch/ppc64/lib/string.S
index 813587e5c2ec..e21a0038a4d6 100644
--- a/arch/ppc64/lib/string.S
+++ b/arch/ppc64/lib/string.S
@@ -65,112 +65,6 @@ _GLOBAL(strlen)
65 subf r3,r3,r4 65 subf r3,r3,r4
66 blr 66 blr
67 67
68_GLOBAL(memset)
69 neg r0,r3
70 rlwimi r4,r4,8,16,23
71 andi. r0,r0,7 /* # bytes to be 8-byte aligned */
72 rlwimi r4,r4,16,0,15
73 cmplw cr1,r5,r0 /* do we get that far? */
74 rldimi r4,r4,32,0
75 mtcrf 1,r0
76 mr r6,r3
77 blt cr1,8f
78 beq+ 3f /* if already 8-byte aligned */
79 subf r5,r0,r5
80 bf 31,1f
81 stb r4,0(r6)
82 addi r6,r6,1
831: bf 30,2f
84 sth r4,0(r6)
85 addi r6,r6,2
862: bf 29,3f
87 stw r4,0(r6)
88 addi r6,r6,4
893: srdi. r0,r5,6
90 clrldi r5,r5,58
91 mtctr r0
92 beq 5f
934: std r4,0(r6)
94 std r4,8(r6)
95 std r4,16(r6)
96 std r4,24(r6)
97 std r4,32(r6)
98 std r4,40(r6)
99 std r4,48(r6)
100 std r4,56(r6)
101 addi r6,r6,64
102 bdnz 4b
1035: srwi. r0,r5,3
104 clrlwi r5,r5,29
105 mtcrf 1,r0
106 beq 8f
107 bf 29,6f
108 std r4,0(r6)
109 std r4,8(r6)
110 std r4,16(r6)
111 std r4,24(r6)
112 addi r6,r6,32
1136: bf 30,7f
114 std r4,0(r6)
115 std r4,8(r6)
116 addi r6,r6,16
1177: bf 31,8f
118 std r4,0(r6)
119 addi r6,r6,8
1208: cmpwi r5,0
121 mtcrf 1,r5
122 beqlr+
123 bf 29,9f
124 stw r4,0(r6)
125 addi r6,r6,4
1269: bf 30,10f
127 sth r4,0(r6)
128 addi r6,r6,2
12910: bflr 31
130 stb r4,0(r6)
131 blr
132
133_GLOBAL(memmove)
134 cmplw 0,r3,r4
135 bgt .backwards_memcpy
136 b .memcpy
137
138_GLOBAL(backwards_memcpy)
139 rlwinm. r7,r5,32-3,3,31 /* r0 = r5 >> 3 */
140 add r6,r3,r5
141 add r4,r4,r5
142 beq 2f
143 andi. r0,r6,3
144 mtctr r7
145 bne 5f
1461: lwz r7,-4(r4)
147 lwzu r8,-8(r4)
148 stw r7,-4(r6)
149 stwu r8,-8(r6)
150 bdnz 1b
151 andi. r5,r5,7
1522: cmplwi 0,r5,4
153 blt 3f
154 lwzu r0,-4(r4)
155 subi r5,r5,4
156 stwu r0,-4(r6)
1573: cmpwi 0,r5,0
158 beqlr
159 mtctr r5
1604: lbzu r0,-1(r4)
161 stbu r0,-1(r6)
162 bdnz 4b
163 blr
1645: mtctr r0
1656: lbzu r7,-1(r4)
166 stbu r7,-1(r6)
167 bdnz 6b
168 subf r5,r0,r5
169 rlwinm. r7,r5,32-3,3,31
170 beq 2b
171 mtctr r7
172 b 1b
173
174_GLOBAL(memcmp) 68_GLOBAL(memcmp)
175 cmpwi 0,r5,0 69 cmpwi 0,r5,0
176 ble- 2f 70 ble- 2f
diff --git a/arch/ppc64/lib/usercopy.c b/arch/ppc64/lib/usercopy.c
deleted file mode 100644
index 5eea6f3c1e03..000000000000
--- a/arch/ppc64/lib/usercopy.c
+++ /dev/null
@@ -1,41 +0,0 @@
1/*
2 * Functions which are too large to be inlined.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 */
9#include <linux/module.h>
10#include <asm/uaccess.h>
11
12unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
13{
14 if (likely(access_ok(VERIFY_READ, from, n)))
15 n = __copy_from_user(to, from, n);
16 else
17 memset(to, 0, n);
18 return n;
19}
20
21unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
22{
23 if (likely(access_ok(VERIFY_WRITE, to, n)))
24 n = __copy_to_user(to, from, n);
25 return n;
26}
27
28unsigned long copy_in_user(void __user *to, const void __user *from,
29 unsigned long n)
30{
31 might_sleep();
32 if (likely(access_ok(VERIFY_READ, from, n) &&
33 access_ok(VERIFY_WRITE, to, n)))
34 n =__copy_tofrom_user(to, from, n);
35 return n;
36}
37
38EXPORT_SYMBOL(copy_from_user);
39EXPORT_SYMBOL(copy_to_user);
40EXPORT_SYMBOL(copy_in_user);
41
diff --git a/arch/ppc64/mm/Makefile b/arch/ppc64/mm/Makefile
deleted file mode 100644
index 3695d00d347f..000000000000
--- a/arch/ppc64/mm/Makefile
+++ /dev/null
@@ -1,11 +0,0 @@
1#
2# Makefile for the linux ppc-specific parts of the memory manager.
3#
4
5EXTRA_CFLAGS += -mno-minimal-toc
6
7obj-y := fault.o init.o imalloc.o hash_utils.o hash_low.o tlb.o \
8 slb_low.o slb.o stab.o mmap.o
9obj-$(CONFIG_NEED_MULTIPLE_NODES) += numa.o
10obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
11obj-$(CONFIG_PPC_MULTIPLATFORM) += hash_native.o
diff --git a/arch/ppc64/mm/fault.c b/arch/ppc64/mm/fault.c
deleted file mode 100644
index be3f25cf3e9f..000000000000
--- a/arch/ppc64/mm/fault.c
+++ /dev/null
@@ -1,333 +0,0 @@
1/*
2 * arch/ppc/mm/fault.c
3 *
4 * PowerPC version
5 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
6 *
7 * Derived from "arch/i386/mm/fault.c"
8 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
9 *
10 * Modified by Cort Dougan and Paul Mackerras.
11 *
12 * Modified for PPC64 by Dave Engebretsen (engebret@ibm.com)
13 *
14 * This program is free software; you can redistribute it and/or
15 * modify it under the terms of the GNU General Public License
16 * as published by the Free Software Foundation; either version
17 * 2 of the License, or (at your option) any later version.
18 */
19
20#include <linux/config.h>
21#include <linux/signal.h>
22#include <linux/sched.h>
23#include <linux/kernel.h>
24#include <linux/errno.h>
25#include <linux/string.h>
26#include <linux/types.h>
27#include <linux/mman.h>
28#include <linux/mm.h>
29#include <linux/interrupt.h>
30#include <linux/smp_lock.h>
31#include <linux/module.h>
32#include <linux/kprobes.h>
33
34#include <asm/page.h>
35#include <asm/pgtable.h>
36#include <asm/mmu.h>
37#include <asm/mmu_context.h>
38#include <asm/system.h>
39#include <asm/uaccess.h>
40#include <asm/kdebug.h>
41#include <asm/siginfo.h>
42
43/*
44 * Check whether the instruction at regs->nip is a store using
45 * an update addressing form which will update r1.
46 */
47static int store_updates_sp(struct pt_regs *regs)
48{
49 unsigned int inst;
50
51 if (get_user(inst, (unsigned int __user *)regs->nip))
52 return 0;
53 /* check for 1 in the rA field */
54 if (((inst >> 16) & 0x1f) != 1)
55 return 0;
56 /* check major opcode */
57 switch (inst >> 26) {
58 case 37: /* stwu */
59 case 39: /* stbu */
60 case 45: /* sthu */
61 case 53: /* stfsu */
62 case 55: /* stfdu */
63 return 1;
64 case 62: /* std or stdu */
65 return (inst & 3) == 1;
66 case 31:
67 /* check minor opcode */
68 switch ((inst >> 1) & 0x3ff) {
69 case 181: /* stdux */
70 case 183: /* stwux */
71 case 247: /* stbux */
72 case 439: /* sthux */
73 case 695: /* stfsux */
74 case 759: /* stfdux */
75 return 1;
76 }
77 }
78 return 0;
79}
80
81static void do_dabr(struct pt_regs *regs, unsigned long error_code)
82{
83 siginfo_t info;
84
85 if (notify_die(DIE_DABR_MATCH, "dabr_match", regs, error_code,
86 11, SIGSEGV) == NOTIFY_STOP)
87 return;
88
89 if (debugger_dabr_match(regs))
90 return;
91
92 /* Clear the DABR */
93 set_dabr(0);
94
95 /* Deliver the signal to userspace */
96 info.si_signo = SIGTRAP;
97 info.si_errno = 0;
98 info.si_code = TRAP_HWBKPT;
99 info.si_addr = (void __user *)regs->nip;
100 force_sig_info(SIGTRAP, &info, current);
101}
102
103/*
104 * The error_code parameter is
105 * - DSISR for a non-SLB data access fault,
106 * - SRR1 & 0x08000000 for a non-SLB instruction access fault
107 * - 0 any SLB fault.
108 * The return value is 0 if the fault was handled, or the signal
109 * number if this is a kernel fault that can't be handled here.
110 */
111int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
112 unsigned long error_code)
113{
114 struct vm_area_struct * vma;
115 struct mm_struct *mm = current->mm;
116 siginfo_t info;
117 unsigned long code = SEGV_MAPERR;
118 unsigned long is_write = error_code & DSISR_ISSTORE;
119 unsigned long trap = TRAP(regs);
120 unsigned long is_exec = trap == 0x400;
121
122 BUG_ON((trap == 0x380) || (trap == 0x480));
123
124 if (notify_die(DIE_PAGE_FAULT, "page_fault", regs, error_code,
125 11, SIGSEGV) == NOTIFY_STOP)
126 return 0;
127
128 if (trap == 0x300) {
129 if (debugger_fault_handler(regs))
130 return 0;
131 }
132
133 /* On a kernel SLB miss we can only check for a valid exception entry */
134 if (!user_mode(regs) && (address >= TASK_SIZE))
135 return SIGSEGV;
136
137 if (error_code & DSISR_DABRMATCH) {
138 do_dabr(regs, error_code);
139 return 0;
140 }
141
142 if (in_atomic() || mm == NULL) {
143 if (!user_mode(regs))
144 return SIGSEGV;
145 /* in_atomic() in user mode is really bad,
146 as is current->mm == NULL. */
147 printk(KERN_EMERG "Page fault in user mode with"
148 "in_atomic() = %d mm = %p\n", in_atomic(), mm);
149 printk(KERN_EMERG "NIP = %lx MSR = %lx\n",
150 regs->nip, regs->msr);
151 die("Weird page fault", regs, SIGSEGV);
152 }
153
154 /* When running in the kernel we expect faults to occur only to
155 * addresses in user space. All other faults represent errors in the
156 * kernel and should generate an OOPS. Unfortunatly, in the case of an
157 * erroneous fault occuring in a code path which already holds mmap_sem
158 * we will deadlock attempting to validate the fault against the
159 * address space. Luckily the kernel only validly references user
160 * space from well defined areas of code, which are listed in the
161 * exceptions table.
162 *
163 * As the vast majority of faults will be valid we will only perform
164 * the source reference check when there is a possibilty of a deadlock.
165 * Attempt to lock the address space, if we cannot we then validate the
166 * source. If this is invalid we can skip the address space check,
167 * thus avoiding the deadlock.
168 */
169 if (!down_read_trylock(&mm->mmap_sem)) {
170 if (!user_mode(regs) && !search_exception_tables(regs->nip))
171 goto bad_area_nosemaphore;
172
173 down_read(&mm->mmap_sem);
174 }
175
176 vma = find_vma(mm, address);
177 if (!vma)
178 goto bad_area;
179
180 if (vma->vm_start <= address) {
181 goto good_area;
182 }
183 if (!(vma->vm_flags & VM_GROWSDOWN))
184 goto bad_area;
185
186 /*
187 * N.B. The POWER/Open ABI allows programs to access up to
188 * 288 bytes below the stack pointer.
189 * The kernel signal delivery code writes up to about 1.5kB
190 * below the stack pointer (r1) before decrementing it.
191 * The exec code can write slightly over 640kB to the stack
192 * before setting the user r1. Thus we allow the stack to
193 * expand to 1MB without further checks.
194 */
195 if (address + 0x100000 < vma->vm_end) {
196 /* get user regs even if this fault is in kernel mode */
197 struct pt_regs *uregs = current->thread.regs;
198 if (uregs == NULL)
199 goto bad_area;
200
201 /*
202 * A user-mode access to an address a long way below
203 * the stack pointer is only valid if the instruction
204 * is one which would update the stack pointer to the
205 * address accessed if the instruction completed,
206 * i.e. either stwu rs,n(r1) or stwux rs,r1,rb
207 * (or the byte, halfword, float or double forms).
208 *
209 * If we don't check this then any write to the area
210 * between the last mapped region and the stack will
211 * expand the stack rather than segfaulting.
212 */
213 if (address + 2048 < uregs->gpr[1]
214 && (!user_mode(regs) || !store_updates_sp(regs)))
215 goto bad_area;
216 }
217
218 if (expand_stack(vma, address))
219 goto bad_area;
220
221good_area:
222 code = SEGV_ACCERR;
223
224 if (is_exec) {
225 /* protection fault */
226 if (error_code & DSISR_PROTFAULT)
227 goto bad_area;
228 if (!(vma->vm_flags & VM_EXEC))
229 goto bad_area;
230 /* a write */
231 } else if (is_write) {
232 if (!(vma->vm_flags & VM_WRITE))
233 goto bad_area;
234 /* a read */
235 } else {
236 if (!(vma->vm_flags & VM_READ))
237 goto bad_area;
238 }
239
240 survive:
241 /*
242 * If for any reason at all we couldn't handle the fault,
243 * make sure we exit gracefully rather than endlessly redo
244 * the fault.
245 */
246 switch (handle_mm_fault(mm, vma, address, is_write)) {
247
248 case VM_FAULT_MINOR:
249 current->min_flt++;
250 break;
251 case VM_FAULT_MAJOR:
252 current->maj_flt++;
253 break;
254 case VM_FAULT_SIGBUS:
255 goto do_sigbus;
256 case VM_FAULT_OOM:
257 goto out_of_memory;
258 default:
259 BUG();
260 }
261
262 up_read(&mm->mmap_sem);
263 return 0;
264
265bad_area:
266 up_read(&mm->mmap_sem);
267
268bad_area_nosemaphore:
269 /* User mode accesses cause a SIGSEGV */
270 if (user_mode(regs)) {
271 info.si_signo = SIGSEGV;
272 info.si_errno = 0;
273 info.si_code = code;
274 info.si_addr = (void __user *) address;
275 force_sig_info(SIGSEGV, &info, current);
276 return 0;
277 }
278
279 if (trap == 0x400 && (error_code & DSISR_PROTFAULT)
280 && printk_ratelimit())
281 printk(KERN_CRIT "kernel tried to execute NX-protected"
282 " page (%lx) - exploit attempt? (uid: %d)\n",
283 address, current->uid);
284
285 return SIGSEGV;
286
287/*
288 * We ran out of memory, or some other thing happened to us that made
289 * us unable to handle the page fault gracefully.
290 */
291out_of_memory:
292 up_read(&mm->mmap_sem);
293 if (current->pid == 1) {
294 yield();
295 down_read(&mm->mmap_sem);
296 goto survive;
297 }
298 printk("VM: killing process %s\n", current->comm);
299 if (user_mode(regs))
300 do_exit(SIGKILL);
301 return SIGKILL;
302
303do_sigbus:
304 up_read(&mm->mmap_sem);
305 if (user_mode(regs)) {
306 info.si_signo = SIGBUS;
307 info.si_errno = 0;
308 info.si_code = BUS_ADRERR;
309 info.si_addr = (void __user *)address;
310 force_sig_info(SIGBUS, &info, current);
311 return 0;
312 }
313 return SIGBUS;
314}
315
316/*
317 * bad_page_fault is called when we have a bad access from the kernel.
318 * It is called from do_page_fault above and from some of the procedures
319 * in traps.c.
320 */
321void bad_page_fault(struct pt_regs *regs, unsigned long address, int sig)
322{
323 const struct exception_table_entry *entry;
324
325 /* Are we prepared to handle this fault? */
326 if ((entry = search_exception_tables(regs->nip)) != NULL) {
327 regs->nip = entry->fixup;
328 return;
329 }
330
331 /* kernel has accessed a bad area */
332 die("Kernel access of bad area", regs, sig);
333}
diff --git a/arch/ppc64/mm/hash_low.S b/arch/ppc64/mm/hash_low.S
deleted file mode 100644
index ee5a5d36bfa8..000000000000
--- a/arch/ppc64/mm/hash_low.S
+++ /dev/null
@@ -1,288 +0,0 @@
1/*
2 * ppc64 MMU hashtable management routines
3 *
4 * (c) Copyright IBM Corp. 2003
5 *
6 * Maintained by: Benjamin Herrenschmidt
7 * <benh@kernel.crashing.org>
8 *
9 * This file is covered by the GNU Public Licence v2 as
10 * described in the kernel's COPYING file.
11 */
12
13#include <asm/processor.h>
14#include <asm/pgtable.h>
15#include <asm/mmu.h>
16#include <asm/page.h>
17#include <asm/types.h>
18#include <asm/ppc_asm.h>
19#include <asm/asm-offsets.h>
20#include <asm/cputable.h>
21
22 .text
23
24/*
25 * Stackframe:
26 *
27 * +-> Back chain (SP + 256)
28 * | General register save area (SP + 112)
29 * | Parameter save area (SP + 48)
30 * | TOC save area (SP + 40)
31 * | link editor doubleword (SP + 32)
32 * | compiler doubleword (SP + 24)
33 * | LR save area (SP + 16)
34 * | CR save area (SP + 8)
35 * SP ---> +-- Back chain (SP + 0)
36 */
37#define STACKFRAMESIZE 256
38
39/* Save parameters offsets */
40#define STK_PARM(i) (STACKFRAMESIZE + 48 + ((i)-3)*8)
41
42/* Save non-volatile offsets */
43#define STK_REG(i) (112 + ((i)-14)*8)
44
45/*
46 * _hash_page(unsigned long ea, unsigned long access, unsigned long vsid,
47 * pte_t *ptep, unsigned long trap, int local)
48 *
49 * Adds a page to the hash table. This is the non-LPAR version for now
50 */
51
52_GLOBAL(__hash_page)
53 mflr r0
54 std r0,16(r1)
55 stdu r1,-STACKFRAMESIZE(r1)
56 /* Save all params that we need after a function call */
57 std r6,STK_PARM(r6)(r1)
58 std r8,STK_PARM(r8)(r1)
59
60 /* Add _PAGE_PRESENT to access */
61 ori r4,r4,_PAGE_PRESENT
62
63 /* Save non-volatile registers.
64 * r31 will hold "old PTE"
65 * r30 is "new PTE"
66 * r29 is "va"
67 * r28 is a hash value
68 * r27 is hashtab mask (maybe dynamic patched instead ?)
69 */
70 std r27,STK_REG(r27)(r1)
71 std r28,STK_REG(r28)(r1)
72 std r29,STK_REG(r29)(r1)
73 std r30,STK_REG(r30)(r1)
74 std r31,STK_REG(r31)(r1)
75
76 /* Step 1:
77 *
78 * Check permissions, atomically mark the linux PTE busy
79 * and hashed.
80 */
811:
82 ldarx r31,0,r6
83 /* Check access rights (access & ~(pte_val(*ptep))) */
84 andc. r0,r4,r31
85 bne- htab_wrong_access
86 /* Check if PTE is busy */
87 andi. r0,r31,_PAGE_BUSY
88 /* If so, just bail out and refault if needed. Someone else
89 * is changing this PTE anyway and might hash it.
90 */
91 bne- bail_ok
92 /* Prepare new PTE value (turn access RW into DIRTY, then
93 * add BUSY,HASHPTE and ACCESSED)
94 */
95 rlwinm r30,r4,32-9+7,31-7,31-7 /* _PAGE_RW -> _PAGE_DIRTY */
96 or r30,r30,r31
97 ori r30,r30,_PAGE_BUSY | _PAGE_ACCESSED | _PAGE_HASHPTE
98 /* Write the linux PTE atomically (setting busy) */
99 stdcx. r30,0,r6
100 bne- 1b
101 isync
102
103 /* Step 2:
104 *
105 * Insert/Update the HPTE in the hash table. At this point,
106 * r4 (access) is re-useable, we use it for the new HPTE flags
107 */
108
109 /* Calc va and put it in r29 */
110 rldicr r29,r5,28,63-28
111 rldicl r3,r3,0,36
112 or r29,r3,r29
113
114 /* Calculate hash value for primary slot and store it in r28 */
115 rldicl r5,r5,0,25 /* vsid & 0x0000007fffffffff */
116 rldicl r0,r3,64-12,48 /* (ea >> 12) & 0xffff */
117 xor r28,r5,r0
118
119 /* Convert linux PTE bits into HW equivalents */
120 andi. r3,r30,0x1fe /* Get basic set of flags */
121 xori r3,r3,HW_NO_EXEC /* _PAGE_EXEC -> NOEXEC */
122 rlwinm r0,r30,32-9+1,30,30 /* _PAGE_RW -> _PAGE_USER (r0) */
123 rlwinm r4,r30,32-7+1,30,30 /* _PAGE_DIRTY -> _PAGE_USER (r4) */
124 and r0,r0,r4 /* _PAGE_RW & _PAGE_DIRTY -> r0 bit 30 */
125 andc r0,r30,r0 /* r0 = pte & ~r0 */
126 rlwimi r3,r0,32-1,31,31 /* Insert result into PP lsb */
127
128 /* We eventually do the icache sync here (maybe inline that
129 * code rather than call a C function...)
130 */
131BEGIN_FTR_SECTION
132 mr r4,r30
133 mr r5,r7
134 bl .hash_page_do_lazy_icache
135END_FTR_SECTION(CPU_FTR_NOEXECUTE|CPU_FTR_COHERENT_ICACHE, CPU_FTR_NOEXECUTE)
136
137 /* At this point, r3 contains new PP bits, save them in
138 * place of "access" in the param area (sic)
139 */
140 std r3,STK_PARM(r4)(r1)
141
142 /* Get htab_hash_mask */
143 ld r4,htab_hash_mask@got(2)
144 ld r27,0(r4) /* htab_hash_mask -> r27 */
145
146 /* Check if we may already be in the hashtable, in this case, we
147 * go to out-of-line code to try to modify the HPTE
148 */
149 andi. r0,r31,_PAGE_HASHPTE
150 bne htab_modify_pte
151
152htab_insert_pte:
153 /* Clear hpte bits in new pte (we also clear BUSY btw) and
154 * add _PAGE_HASHPTE
155 */
156 lis r0,_PAGE_HPTEFLAGS@h
157 ori r0,r0,_PAGE_HPTEFLAGS@l
158 andc r30,r30,r0
159 ori r30,r30,_PAGE_HASHPTE
160
161 /* page number in r5 */
162 rldicl r5,r31,64-PTE_SHIFT,PTE_SHIFT
163
164 /* Calculate primary group hash */
165 and r0,r28,r27
166 rldicr r3,r0,3,63-3 /* r0 = (hash & mask) << 3 */
167
168 /* Call ppc_md.hpte_insert */
169 ld r7,STK_PARM(r4)(r1) /* Retreive new pp bits */
170 mr r4,r29 /* Retreive va */
171 li r6,0 /* no vflags */
172_GLOBAL(htab_call_hpte_insert1)
173 bl . /* Will be patched by htab_finish_init() */
174 cmpdi 0,r3,0
175 bge htab_pte_insert_ok /* Insertion successful */
176 cmpdi 0,r3,-2 /* Critical failure */
177 beq- htab_pte_insert_failure
178
179 /* Now try secondary slot */
180
181 /* page number in r5 */
182 rldicl r5,r31,64-PTE_SHIFT,PTE_SHIFT
183
184 /* Calculate secondary group hash */
185 andc r0,r27,r28
186 rldicr r3,r0,3,63-3 /* r0 = (~hash & mask) << 3 */
187
188 /* Call ppc_md.hpte_insert */
189 ld r7,STK_PARM(r4)(r1) /* Retreive new pp bits */
190 mr r4,r29 /* Retreive va */
191 li r6,HPTE_V_SECONDARY@l /* secondary slot */
192_GLOBAL(htab_call_hpte_insert2)
193 bl . /* Will be patched by htab_finish_init() */
194 cmpdi 0,r3,0
195 bge+ htab_pte_insert_ok /* Insertion successful */
196 cmpdi 0,r3,-2 /* Critical failure */
197 beq- htab_pte_insert_failure
198
199 /* Both are full, we need to evict something */
200 mftb r0
201 /* Pick a random group based on TB */
202 andi. r0,r0,1
203 mr r5,r28
204 bne 2f
205 not r5,r5
2062: and r0,r5,r27
207 rldicr r3,r0,3,63-3 /* r0 = (hash & mask) << 3 */
208 /* Call ppc_md.hpte_remove */
209_GLOBAL(htab_call_hpte_remove)
210 bl . /* Will be patched by htab_finish_init() */
211
212 /* Try all again */
213 b htab_insert_pte
214
215bail_ok:
216 li r3,0
217 b bail
218
219htab_pte_insert_ok:
220 /* Insert slot number & secondary bit in PTE */
221 rldimi r30,r3,12,63-15
222
223 /* Write out the PTE with a normal write
224 * (maybe add eieio may be good still ?)
225 */
226htab_write_out_pte:
227 ld r6,STK_PARM(r6)(r1)
228 std r30,0(r6)
229 li r3, 0
230bail:
231 ld r27,STK_REG(r27)(r1)
232 ld r28,STK_REG(r28)(r1)
233 ld r29,STK_REG(r29)(r1)
234 ld r30,STK_REG(r30)(r1)
235 ld r31,STK_REG(r31)(r1)
236 addi r1,r1,STACKFRAMESIZE
237 ld r0,16(r1)
238 mtlr r0
239 blr
240
241htab_modify_pte:
242 /* Keep PP bits in r4 and slot idx from the PTE around in r3 */
243 mr r4,r3
244 rlwinm r3,r31,32-12,29,31
245
246 /* Secondary group ? if yes, get a inverted hash value */
247 mr r5,r28
248 andi. r0,r31,_PAGE_SECONDARY
249 beq 1f
250 not r5,r5
2511:
252 /* Calculate proper slot value for ppc_md.hpte_updatepp */
253 and r0,r5,r27
254 rldicr r0,r0,3,63-3 /* r0 = (hash & mask) << 3 */
255 add r3,r0,r3 /* add slot idx */
256
257 /* Call ppc_md.hpte_updatepp */
258 mr r5,r29 /* va */
259 li r6,0 /* large is 0 */
260 ld r7,STK_PARM(r8)(r1) /* get "local" param */
261_GLOBAL(htab_call_hpte_updatepp)
262 bl . /* Will be patched by htab_finish_init() */
263
264 /* if we failed because typically the HPTE wasn't really here
265 * we try an insertion.
266 */
267 cmpdi 0,r3,-1
268 beq- htab_insert_pte
269
270 /* Clear the BUSY bit and Write out the PTE */
271 li r0,_PAGE_BUSY
272 andc r30,r30,r0
273 b htab_write_out_pte
274
275htab_wrong_access:
276 /* Bail out clearing reservation */
277 stdcx. r31,0,r6
278 li r3,1
279 b bail
280
281htab_pte_insert_failure:
282 /* Bail out restoring old PTE */
283 ld r6,STK_PARM(r6)(r1)
284 std r31,0(r6)
285 li r3,-1
286 b bail
287
288
diff --git a/arch/ppc64/mm/hash_native.c b/arch/ppc64/mm/hash_native.c
deleted file mode 100644
index bfd385b7713c..000000000000
--- a/arch/ppc64/mm/hash_native.c
+++ /dev/null
@@ -1,453 +0,0 @@
1/*
2 * native hashtable management.
3 *
4 * SMP scalability work:
5 * Copyright (C) 2001 Anton Blanchard <anton@au.ibm.com>, IBM
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
11 */
12#include <linux/spinlock.h>
13#include <linux/bitops.h>
14#include <linux/threads.h>
15#include <linux/smp.h>
16
17#include <asm/abs_addr.h>
18#include <asm/machdep.h>
19#include <asm/mmu.h>
20#include <asm/mmu_context.h>
21#include <asm/pgtable.h>
22#include <asm/tlbflush.h>
23#include <asm/tlb.h>
24#include <asm/cputable.h>
25
26#define HPTE_LOCK_BIT 3
27
28static DEFINE_SPINLOCK(native_tlbie_lock);
29
30static inline void native_lock_hpte(hpte_t *hptep)
31{
32 unsigned long *word = &hptep->v;
33
34 while (1) {
35 if (!test_and_set_bit(HPTE_LOCK_BIT, word))
36 break;
37 while(test_bit(HPTE_LOCK_BIT, word))
38 cpu_relax();
39 }
40}
41
42static inline void native_unlock_hpte(hpte_t *hptep)
43{
44 unsigned long *word = &hptep->v;
45
46 asm volatile("lwsync":::"memory");
47 clear_bit(HPTE_LOCK_BIT, word);
48}
49
50long native_hpte_insert(unsigned long hpte_group, unsigned long va,
51 unsigned long prpn, unsigned long vflags,
52 unsigned long rflags)
53{
54 hpte_t *hptep = htab_address + hpte_group;
55 unsigned long hpte_v, hpte_r;
56 int i;
57
58 for (i = 0; i < HPTES_PER_GROUP; i++) {
59 if (! (hptep->v & HPTE_V_VALID)) {
60 /* retry with lock held */
61 native_lock_hpte(hptep);
62 if (! (hptep->v & HPTE_V_VALID))
63 break;
64 native_unlock_hpte(hptep);
65 }
66
67 hptep++;
68 }
69
70 if (i == HPTES_PER_GROUP)
71 return -1;
72
73 hpte_v = (va >> 23) << HPTE_V_AVPN_SHIFT | vflags | HPTE_V_VALID;
74 if (vflags & HPTE_V_LARGE)
75 va &= ~(1UL << HPTE_V_AVPN_SHIFT);
76 hpte_r = (prpn << HPTE_R_RPN_SHIFT) | rflags;
77
78 hptep->r = hpte_r;
79 /* Guarantee the second dword is visible before the valid bit */
80 __asm__ __volatile__ ("eieio" : : : "memory");
81 /*
82 * Now set the first dword including the valid bit
83 * NOTE: this also unlocks the hpte
84 */
85 hptep->v = hpte_v;
86
87 __asm__ __volatile__ ("ptesync" : : : "memory");
88
89 return i | (!!(vflags & HPTE_V_SECONDARY) << 3);
90}
91
92static long native_hpte_remove(unsigned long hpte_group)
93{
94 hpte_t *hptep;
95 int i;
96 int slot_offset;
97 unsigned long hpte_v;
98
99 /* pick a random entry to start at */
100 slot_offset = mftb() & 0x7;
101
102 for (i = 0; i < HPTES_PER_GROUP; i++) {
103 hptep = htab_address + hpte_group + slot_offset;
104 hpte_v = hptep->v;
105
106 if ((hpte_v & HPTE_V_VALID) && !(hpte_v & HPTE_V_BOLTED)) {
107 /* retry with lock held */
108 native_lock_hpte(hptep);
109 hpte_v = hptep->v;
110 if ((hpte_v & HPTE_V_VALID)
111 && !(hpte_v & HPTE_V_BOLTED))
112 break;
113 native_unlock_hpte(hptep);
114 }
115
116 slot_offset++;
117 slot_offset &= 0x7;
118 }
119
120 if (i == HPTES_PER_GROUP)
121 return -1;
122
123 /* Invalidate the hpte. NOTE: this also unlocks it */
124 hptep->v = 0;
125
126 return i;
127}
128
129static inline void set_pp_bit(unsigned long pp, hpte_t *addr)
130{
131 unsigned long old;
132 unsigned long *p = &addr->r;
133
134 __asm__ __volatile__(
135 "1: ldarx %0,0,%3\n\
136 rldimi %0,%2,0,61\n\
137 stdcx. %0,0,%3\n\
138 bne 1b"
139 : "=&r" (old), "=m" (*p)
140 : "r" (pp), "r" (p), "m" (*p)
141 : "cc");
142}
143
144/*
145 * Only works on small pages. Yes its ugly to have to check each slot in
146 * the group but we only use this during bootup.
147 */
148static long native_hpte_find(unsigned long vpn)
149{
150 hpte_t *hptep;
151 unsigned long hash;
152 unsigned long i, j;
153 long slot;
154 unsigned long hpte_v;
155
156 hash = hpt_hash(vpn, 0);
157
158 for (j = 0; j < 2; j++) {
159 slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
160 for (i = 0; i < HPTES_PER_GROUP; i++) {
161 hptep = htab_address + slot;
162 hpte_v = hptep->v;
163
164 if ((HPTE_V_AVPN_VAL(hpte_v) == (vpn >> 11))
165 && (hpte_v & HPTE_V_VALID)
166 && ( !!(hpte_v & HPTE_V_SECONDARY) == j)) {
167 /* HPTE matches */
168 if (j)
169 slot = -slot;
170 return slot;
171 }
172 ++slot;
173 }
174 hash = ~hash;
175 }
176
177 return -1;
178}
179
180static long native_hpte_updatepp(unsigned long slot, unsigned long newpp,
181 unsigned long va, int large, int local)
182{
183 hpte_t *hptep = htab_address + slot;
184 unsigned long hpte_v;
185 unsigned long avpn = va >> 23;
186 int ret = 0;
187
188 if (large)
189 avpn &= ~1;
190
191 native_lock_hpte(hptep);
192
193 hpte_v = hptep->v;
194
195 /* Even if we miss, we need to invalidate the TLB */
196 if ((HPTE_V_AVPN_VAL(hpte_v) != avpn)
197 || !(hpte_v & HPTE_V_VALID)) {
198 native_unlock_hpte(hptep);
199 ret = -1;
200 } else {
201 set_pp_bit(newpp, hptep);
202 native_unlock_hpte(hptep);
203 }
204
205 /* Ensure it is out of the tlb too */
206 if (cpu_has_feature(CPU_FTR_TLBIEL) && !large && local) {
207 tlbiel(va);
208 } else {
209 int lock_tlbie = !cpu_has_feature(CPU_FTR_LOCKLESS_TLBIE);
210
211 if (lock_tlbie)
212 spin_lock(&native_tlbie_lock);
213 tlbie(va, large);
214 if (lock_tlbie)
215 spin_unlock(&native_tlbie_lock);
216 }
217
218 return ret;
219}
220
221/*
222 * Update the page protection bits. Intended to be used to create
223 * guard pages for kernel data structures on pages which are bolted
224 * in the HPT. Assumes pages being operated on will not be stolen.
225 * Does not work on large pages.
226 *
227 * No need to lock here because we should be the only user.
228 */
229static void native_hpte_updateboltedpp(unsigned long newpp, unsigned long ea)
230{
231 unsigned long vsid, va, vpn, flags = 0;
232 long slot;
233 hpte_t *hptep;
234 int lock_tlbie = !cpu_has_feature(CPU_FTR_LOCKLESS_TLBIE);
235
236 vsid = get_kernel_vsid(ea);
237 va = (vsid << 28) | (ea & 0x0fffffff);
238 vpn = va >> PAGE_SHIFT;
239
240 slot = native_hpte_find(vpn);
241 if (slot == -1)
242 panic("could not find page to bolt\n");
243 hptep = htab_address + slot;
244
245 set_pp_bit(newpp, hptep);
246
247 /* Ensure it is out of the tlb too */
248 if (lock_tlbie)
249 spin_lock_irqsave(&native_tlbie_lock, flags);
250 tlbie(va, 0);
251 if (lock_tlbie)
252 spin_unlock_irqrestore(&native_tlbie_lock, flags);
253}
254
255static void native_hpte_invalidate(unsigned long slot, unsigned long va,
256 int large, int local)
257{
258 hpte_t *hptep = htab_address + slot;
259 unsigned long hpte_v;
260 unsigned long avpn = va >> 23;
261 unsigned long flags;
262 int lock_tlbie = !cpu_has_feature(CPU_FTR_LOCKLESS_TLBIE);
263
264 if (large)
265 avpn &= ~1;
266
267 local_irq_save(flags);
268 native_lock_hpte(hptep);
269
270 hpte_v = hptep->v;
271
272 /* Even if we miss, we need to invalidate the TLB */
273 if ((HPTE_V_AVPN_VAL(hpte_v) != avpn)
274 || !(hpte_v & HPTE_V_VALID)) {
275 native_unlock_hpte(hptep);
276 } else {
277 /* Invalidate the hpte. NOTE: this also unlocks it */
278 hptep->v = 0;
279 }
280
281 /* Invalidate the tlb */
282 if (cpu_has_feature(CPU_FTR_TLBIEL) && !large && local) {
283 tlbiel(va);
284 } else {
285 if (lock_tlbie)
286 spin_lock(&native_tlbie_lock);
287 tlbie(va, large);
288 if (lock_tlbie)
289 spin_unlock(&native_tlbie_lock);
290 }
291 local_irq_restore(flags);
292}
293
294/*
295 * clear all mappings on kexec. All cpus are in real mode (or they will
296 * be when they isi), and we are the only one left. We rely on our kernel
297 * mapping being 0xC0's and the hardware ignoring those two real bits.
298 *
299 * TODO: add batching support when enabled. remember, no dynamic memory here,
300 * athough there is the control page available...
301 */
302static void native_hpte_clear(void)
303{
304 unsigned long slot, slots, flags;
305 hpte_t *hptep = htab_address;
306 unsigned long hpte_v;
307 unsigned long pteg_count;
308
309 pteg_count = htab_hash_mask + 1;
310
311 local_irq_save(flags);
312
313 /* we take the tlbie lock and hold it. Some hardware will
314 * deadlock if we try to tlbie from two processors at once.
315 */
316 spin_lock(&native_tlbie_lock);
317
318 slots = pteg_count * HPTES_PER_GROUP;
319
320 for (slot = 0; slot < slots; slot++, hptep++) {
321 /*
322 * we could lock the pte here, but we are the only cpu
323 * running, right? and for crash dump, we probably
324 * don't want to wait for a maybe bad cpu.
325 */
326 hpte_v = hptep->v;
327
328 if (hpte_v & HPTE_V_VALID) {
329 hptep->v = 0;
330 tlbie(slot2va(hpte_v, slot), hpte_v & HPTE_V_LARGE);
331 }
332 }
333
334 spin_unlock(&native_tlbie_lock);
335 local_irq_restore(flags);
336}
337
338static void native_flush_hash_range(unsigned long context,
339 unsigned long number, int local)
340{
341 unsigned long vsid, vpn, va, hash, secondary, slot, flags, avpn;
342 int i, j;
343 hpte_t *hptep;
344 unsigned long hpte_v;
345 struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch);
346 unsigned long large = batch->large;
347
348 local_irq_save(flags);
349
350 j = 0;
351 for (i = 0; i < number; i++) {
352 if (batch->addr[i] < KERNELBASE)
353 vsid = get_vsid(context, batch->addr[i]);
354 else
355 vsid = get_kernel_vsid(batch->addr[i]);
356
357 va = (vsid << 28) | (batch->addr[i] & 0x0fffffff);
358 batch->vaddr[j] = va;
359 if (large)
360 vpn = va >> HPAGE_SHIFT;
361 else
362 vpn = va >> PAGE_SHIFT;
363 hash = hpt_hash(vpn, large);
364 secondary = (pte_val(batch->pte[i]) & _PAGE_SECONDARY) >> 15;
365 if (secondary)
366 hash = ~hash;
367 slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
368 slot += (pte_val(batch->pte[i]) & _PAGE_GROUP_IX) >> 12;
369
370 hptep = htab_address + slot;
371
372 avpn = va >> 23;
373 if (large)
374 avpn &= ~0x1UL;
375
376 native_lock_hpte(hptep);
377
378 hpte_v = hptep->v;
379
380 /* Even if we miss, we need to invalidate the TLB */
381 if ((HPTE_V_AVPN_VAL(hpte_v) != avpn)
382 || !(hpte_v & HPTE_V_VALID)) {
383 native_unlock_hpte(hptep);
384 } else {
385 /* Invalidate the hpte. NOTE: this also unlocks it */
386 hptep->v = 0;
387 }
388
389 j++;
390 }
391
392 if (cpu_has_feature(CPU_FTR_TLBIEL) && !large && local) {
393 asm volatile("ptesync":::"memory");
394
395 for (i = 0; i < j; i++)
396 __tlbiel(batch->vaddr[i]);
397
398 asm volatile("ptesync":::"memory");
399 } else {
400 int lock_tlbie = !cpu_has_feature(CPU_FTR_LOCKLESS_TLBIE);
401
402 if (lock_tlbie)
403 spin_lock(&native_tlbie_lock);
404
405 asm volatile("ptesync":::"memory");
406
407 for (i = 0; i < j; i++)
408 __tlbie(batch->vaddr[i], large);
409
410 asm volatile("eieio; tlbsync; ptesync":::"memory");
411
412 if (lock_tlbie)
413 spin_unlock(&native_tlbie_lock);
414 }
415
416 local_irq_restore(flags);
417}
418
419#ifdef CONFIG_PPC_PSERIES
420/* Disable TLB batching on nighthawk */
421static inline int tlb_batching_enabled(void)
422{
423 struct device_node *root = of_find_node_by_path("/");
424 int enabled = 1;
425
426 if (root) {
427 const char *model = get_property(root, "model", NULL);
428 if (model && !strcmp(model, "IBM,9076-N81"))
429 enabled = 0;
430 of_node_put(root);
431 }
432
433 return enabled;
434}
435#else
436static inline int tlb_batching_enabled(void)
437{
438 return 1;
439}
440#endif
441
442void hpte_init_native(void)
443{
444 ppc_md.hpte_invalidate = native_hpte_invalidate;
445 ppc_md.hpte_updatepp = native_hpte_updatepp;
446 ppc_md.hpte_updateboltedpp = native_hpte_updateboltedpp;
447 ppc_md.hpte_insert = native_hpte_insert;
448 ppc_md.hpte_remove = native_hpte_remove;
449 ppc_md.hpte_clear_all = native_hpte_clear;
450 if (tlb_batching_enabled())
451 ppc_md.flush_hash_range = native_flush_hash_range;
452 htab_finish_init();
453}
diff --git a/arch/ppc64/mm/hash_utils.c b/arch/ppc64/mm/hash_utils.c
deleted file mode 100644
index 09475c8edf7c..000000000000
--- a/arch/ppc64/mm/hash_utils.c
+++ /dev/null
@@ -1,438 +0,0 @@
1/*
2 * PowerPC64 port by Mike Corrigan and Dave Engebretsen
3 * {mikejc|engebret}@us.ibm.com
4 *
5 * Copyright (c) 2000 Mike Corrigan <mikejc@us.ibm.com>
6 *
7 * SMP scalability work:
8 * Copyright (C) 2001 Anton Blanchard <anton@au.ibm.com>, IBM
9 *
10 * Module name: htab.c
11 *
12 * Description:
13 * PowerPC Hashed Page Table functions
14 *
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License
17 * as published by the Free Software Foundation; either version
18 * 2 of the License, or (at your option) any later version.
19 */
20
21#undef DEBUG
22
23#include <linux/config.h>
24#include <linux/spinlock.h>
25#include <linux/errno.h>
26#include <linux/sched.h>
27#include <linux/proc_fs.h>
28#include <linux/stat.h>
29#include <linux/sysctl.h>
30#include <linux/ctype.h>
31#include <linux/cache.h>
32#include <linux/init.h>
33#include <linux/signal.h>
34
35#include <asm/ppcdebug.h>
36#include <asm/processor.h>
37#include <asm/pgtable.h>
38#include <asm/mmu.h>
39#include <asm/mmu_context.h>
40#include <asm/page.h>
41#include <asm/types.h>
42#include <asm/system.h>
43#include <asm/uaccess.h>
44#include <asm/machdep.h>
45#include <asm/lmb.h>
46#include <asm/abs_addr.h>
47#include <asm/tlbflush.h>
48#include <asm/io.h>
49#include <asm/eeh.h>
50#include <asm/tlb.h>
51#include <asm/cacheflush.h>
52#include <asm/cputable.h>
53#include <asm/abs_addr.h>
54#include <asm/sections.h>
55
56#ifdef DEBUG
57#define DBG(fmt...) udbg_printf(fmt)
58#else
59#define DBG(fmt...)
60#endif
61
62/*
63 * Note: pte --> Linux PTE
64 * HPTE --> PowerPC Hashed Page Table Entry
65 *
66 * Execution context:
67 * htab_initialize is called with the MMU off (of course), but
68 * the kernel has been copied down to zero so it can directly
69 * reference global data. At this point it is very difficult
70 * to print debug info.
71 *
72 */
73
74#ifdef CONFIG_U3_DART
75extern unsigned long dart_tablebase;
76#endif /* CONFIG_U3_DART */
77
78hpte_t *htab_address;
79unsigned long htab_hash_mask;
80
81extern unsigned long _SDR1;
82
83#define KB (1024)
84#define MB (1024*KB)
85
86static inline void loop_forever(void)
87{
88 volatile unsigned long x = 1;
89 for(;x;x|=1)
90 ;
91}
92
93#ifdef CONFIG_PPC_MULTIPLATFORM
94static inline void create_pte_mapping(unsigned long start, unsigned long end,
95 unsigned long mode, int large)
96{
97 unsigned long addr;
98 unsigned int step;
99 unsigned long tmp_mode;
100 unsigned long vflags;
101
102 if (large) {
103 step = 16*MB;
104 vflags = HPTE_V_BOLTED | HPTE_V_LARGE;
105 } else {
106 step = 4*KB;
107 vflags = HPTE_V_BOLTED;
108 }
109
110 for (addr = start; addr < end; addr += step) {
111 unsigned long vpn, hash, hpteg;
112 unsigned long vsid = get_kernel_vsid(addr);
113 unsigned long va = (vsid << 28) | (addr & 0xfffffff);
114 int ret;
115
116 if (large)
117 vpn = va >> HPAGE_SHIFT;
118 else
119 vpn = va >> PAGE_SHIFT;
120
121
122 tmp_mode = mode;
123
124 /* Make non-kernel text non-executable */
125 if (!in_kernel_text(addr))
126 tmp_mode = mode | HW_NO_EXEC;
127
128 hash = hpt_hash(vpn, large);
129
130 hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP);
131
132#ifdef CONFIG_PPC_PSERIES
133 if (systemcfg->platform & PLATFORM_LPAR)
134 ret = pSeries_lpar_hpte_insert(hpteg, va,
135 virt_to_abs(addr) >> PAGE_SHIFT,
136 vflags, tmp_mode);
137 else
138#endif /* CONFIG_PPC_PSERIES */
139 ret = native_hpte_insert(hpteg, va,
140 virt_to_abs(addr) >> PAGE_SHIFT,
141 vflags, tmp_mode);
142
143 if (ret == -1) {
144 ppc64_terminate_msg(0x20, "create_pte_mapping");
145 loop_forever();
146 }
147 }
148}
149
150void __init htab_initialize(void)
151{
152 unsigned long table, htab_size_bytes;
153 unsigned long pteg_count;
154 unsigned long mode_rw;
155 int i, use_largepages = 0;
156 unsigned long base = 0, size = 0;
157 extern unsigned long tce_alloc_start, tce_alloc_end;
158
159 DBG(" -> htab_initialize()\n");
160
161 /*
162 * Calculate the required size of the htab. We want the number of
163 * PTEGs to equal one half the number of real pages.
164 */
165 htab_size_bytes = 1UL << ppc64_pft_size;
166 pteg_count = htab_size_bytes >> 7;
167
168 /* For debug, make the HTAB 1/8 as big as it normally would be. */
169 ifppcdebug(PPCDBG_HTABSIZE) {
170 pteg_count >>= 3;
171 htab_size_bytes = pteg_count << 7;
172 }
173
174 htab_hash_mask = pteg_count - 1;
175
176 if (systemcfg->platform & PLATFORM_LPAR) {
177 /* Using a hypervisor which owns the htab */
178 htab_address = NULL;
179 _SDR1 = 0;
180 } else {
181 /* Find storage for the HPT. Must be contiguous in
182 * the absolute address space.
183 */
184 table = lmb_alloc(htab_size_bytes, htab_size_bytes);
185
186 DBG("Hash table allocated at %lx, size: %lx\n", table,
187 htab_size_bytes);
188
189 if ( !table ) {
190 ppc64_terminate_msg(0x20, "hpt space");
191 loop_forever();
192 }
193 htab_address = abs_to_virt(table);
194
195 /* htab absolute addr + encoded htabsize */
196 _SDR1 = table + __ilog2(pteg_count) - 11;
197
198 /* Initialize the HPT with no entries */
199 memset((void *)table, 0, htab_size_bytes);
200 }
201
202 mode_rw = _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_COHERENT | PP_RWXX;
203
204 /* On U3 based machines, we need to reserve the DART area and
205 * _NOT_ map it to avoid cache paradoxes as it's remapped non
206 * cacheable later on
207 */
208 if (cpu_has_feature(CPU_FTR_16M_PAGE))
209 use_largepages = 1;
210
211 /* create bolted the linear mapping in the hash table */
212 for (i=0; i < lmb.memory.cnt; i++) {
213 base = lmb.memory.region[i].base + KERNELBASE;
214 size = lmb.memory.region[i].size;
215
216 DBG("creating mapping for region: %lx : %lx\n", base, size);
217
218#ifdef CONFIG_U3_DART
219 /* Do not map the DART space. Fortunately, it will be aligned
220 * in such a way that it will not cross two lmb regions and will
221 * fit within a single 16Mb page.
222 * The DART space is assumed to be a full 16Mb region even if we
223 * only use 2Mb of that space. We will use more of it later for
224 * AGP GART. We have to use a full 16Mb large page.
225 */
226 DBG("DART base: %lx\n", dart_tablebase);
227
228 if (dart_tablebase != 0 && dart_tablebase >= base
229 && dart_tablebase < (base + size)) {
230 if (base != dart_tablebase)
231 create_pte_mapping(base, dart_tablebase, mode_rw,
232 use_largepages);
233 if ((base + size) > (dart_tablebase + 16*MB))
234 create_pte_mapping(dart_tablebase + 16*MB, base + size,
235 mode_rw, use_largepages);
236 continue;
237 }
238#endif /* CONFIG_U3_DART */
239 create_pte_mapping(base, base + size, mode_rw, use_largepages);
240 }
241
242 /*
243 * If we have a memory_limit and we've allocated TCEs then we need to
244 * explicitly map the TCE area at the top of RAM. We also cope with the
245 * case that the TCEs start below memory_limit.
246 * tce_alloc_start/end are 16MB aligned so the mapping should work
247 * for either 4K or 16MB pages.
248 */
249 if (tce_alloc_start) {
250 tce_alloc_start += KERNELBASE;
251 tce_alloc_end += KERNELBASE;
252
253 if (base + size >= tce_alloc_start)
254 tce_alloc_start = base + size + 1;
255
256 create_pte_mapping(tce_alloc_start, tce_alloc_end,
257 mode_rw, use_largepages);
258 }
259
260 DBG(" <- htab_initialize()\n");
261}
262#undef KB
263#undef MB
264#endif /* CONFIG_PPC_MULTIPLATFORM */
265
266/*
267 * Called by asm hashtable.S for doing lazy icache flush
268 */
269unsigned int hash_page_do_lazy_icache(unsigned int pp, pte_t pte, int trap)
270{
271 struct page *page;
272
273 if (!pfn_valid(pte_pfn(pte)))
274 return pp;
275
276 page = pte_page(pte);
277
278 /* page is dirty */
279 if (!test_bit(PG_arch_1, &page->flags) && !PageReserved(page)) {
280 if (trap == 0x400) {
281 __flush_dcache_icache(page_address(page));
282 set_bit(PG_arch_1, &page->flags);
283 } else
284 pp |= HW_NO_EXEC;
285 }
286 return pp;
287}
288
289/* Result code is:
290 * 0 - handled
291 * 1 - normal page fault
292 * -1 - critical hash insertion error
293 */
294int hash_page(unsigned long ea, unsigned long access, unsigned long trap)
295{
296 void *pgdir;
297 unsigned long vsid;
298 struct mm_struct *mm;
299 pte_t *ptep;
300 int ret;
301 int user_region = 0;
302 int local = 0;
303 cpumask_t tmp;
304
305 if ((ea & ~REGION_MASK) >= PGTABLE_RANGE)
306 return 1;
307
308 switch (REGION_ID(ea)) {
309 case USER_REGION_ID:
310 user_region = 1;
311 mm = current->mm;
312 if (! mm)
313 return 1;
314
315 vsid = get_vsid(mm->context.id, ea);
316 break;
317 case VMALLOC_REGION_ID:
318 mm = &init_mm;
319 vsid = get_kernel_vsid(ea);
320 break;
321#if 0
322 case KERNEL_REGION_ID:
323 /*
324 * Should never get here - entire 0xC0... region is bolted.
325 * Send the problem up to do_page_fault
326 */
327#endif
328 default:
329 /* Not a valid range
330 * Send the problem up to do_page_fault
331 */
332 return 1;
333 break;
334 }
335
336 pgdir = mm->pgd;
337
338 if (pgdir == NULL)
339 return 1;
340
341 tmp = cpumask_of_cpu(smp_processor_id());
342 if (user_region && cpus_equal(mm->cpu_vm_mask, tmp))
343 local = 1;
344
345 /* Is this a huge page ? */
346 if (unlikely(in_hugepage_area(mm->context, ea)))
347 ret = hash_huge_page(mm, access, ea, vsid, local);
348 else {
349 ptep = find_linux_pte(pgdir, ea);
350 if (ptep == NULL)
351 return 1;
352 ret = __hash_page(ea, access, vsid, ptep, trap, local);
353 }
354
355 return ret;
356}
357
358void flush_hash_page(unsigned long context, unsigned long ea, pte_t pte,
359 int local)
360{
361 unsigned long vsid, vpn, va, hash, secondary, slot;
362 unsigned long huge = pte_huge(pte);
363
364 if (ea < KERNELBASE)
365 vsid = get_vsid(context, ea);
366 else
367 vsid = get_kernel_vsid(ea);
368
369 va = (vsid << 28) | (ea & 0x0fffffff);
370 if (huge)
371 vpn = va >> HPAGE_SHIFT;
372 else
373 vpn = va >> PAGE_SHIFT;
374 hash = hpt_hash(vpn, huge);
375 secondary = (pte_val(pte) & _PAGE_SECONDARY) >> 15;
376 if (secondary)
377 hash = ~hash;
378 slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
379 slot += (pte_val(pte) & _PAGE_GROUP_IX) >> 12;
380
381 ppc_md.hpte_invalidate(slot, va, huge, local);
382}
383
384void flush_hash_range(unsigned long context, unsigned long number, int local)
385{
386 if (ppc_md.flush_hash_range) {
387 ppc_md.flush_hash_range(context, number, local);
388 } else {
389 int i;
390 struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch);
391
392 for (i = 0; i < number; i++)
393 flush_hash_page(context, batch->addr[i], batch->pte[i],
394 local);
395 }
396}
397
398static inline void make_bl(unsigned int *insn_addr, void *func)
399{
400 unsigned long funcp = *((unsigned long *)func);
401 int offset = funcp - (unsigned long)insn_addr;
402
403 *insn_addr = (unsigned int)(0x48000001 | (offset & 0x03fffffc));
404 flush_icache_range((unsigned long)insn_addr, 4+
405 (unsigned long)insn_addr);
406}
407
408/*
409 * low_hash_fault is called when we the low level hash code failed
410 * to instert a PTE due to an hypervisor error
411 */
412void low_hash_fault(struct pt_regs *regs, unsigned long address)
413{
414 if (user_mode(regs)) {
415 siginfo_t info;
416
417 info.si_signo = SIGBUS;
418 info.si_errno = 0;
419 info.si_code = BUS_ADRERR;
420 info.si_addr = (void __user *)address;
421 force_sig_info(SIGBUS, &info, current);
422 return;
423 }
424 bad_page_fault(regs, address, SIGBUS);
425}
426
427void __init htab_finish_init(void)
428{
429 extern unsigned int *htab_call_hpte_insert1;
430 extern unsigned int *htab_call_hpte_insert2;
431 extern unsigned int *htab_call_hpte_remove;
432 extern unsigned int *htab_call_hpte_updatepp;
433
434 make_bl(htab_call_hpte_insert1, ppc_md.hpte_insert);
435 make_bl(htab_call_hpte_insert2, ppc_md.hpte_insert);
436 make_bl(htab_call_hpte_remove, ppc_md.hpte_remove);
437 make_bl(htab_call_hpte_updatepp, ppc_md.hpte_updatepp);
438}
diff --git a/arch/ppc64/mm/hugetlbpage.c b/arch/ppc64/mm/hugetlbpage.c
deleted file mode 100644
index 0ea0994ed974..000000000000
--- a/arch/ppc64/mm/hugetlbpage.c
+++ /dev/null
@@ -1,745 +0,0 @@
1/*
2 * PPC64 (POWER4) Huge TLB Page Support for Kernel.
3 *
4 * Copyright (C) 2003 David Gibson, IBM Corporation.
5 *
6 * Based on the IA-32 version:
7 * Copyright (C) 2002, Rohit Seth <rohit.seth@intel.com>
8 */
9
10#include <linux/init.h>
11#include <linux/fs.h>
12#include <linux/mm.h>
13#include <linux/hugetlb.h>
14#include <linux/pagemap.h>
15#include <linux/smp_lock.h>
16#include <linux/slab.h>
17#include <linux/err.h>
18#include <linux/sysctl.h>
19#include <asm/mman.h>
20#include <asm/pgalloc.h>
21#include <asm/tlb.h>
22#include <asm/tlbflush.h>
23#include <asm/mmu_context.h>
24#include <asm/machdep.h>
25#include <asm/cputable.h>
26#include <asm/tlb.h>
27
28#include <linux/sysctl.h>
29
30#define NUM_LOW_AREAS (0x100000000UL >> SID_SHIFT)
31#define NUM_HIGH_AREAS (PGTABLE_RANGE >> HTLB_AREA_SHIFT)
32
33/* Modelled after find_linux_pte() */
34pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
35{
36 pgd_t *pg;
37 pud_t *pu;
38 pmd_t *pm;
39 pte_t *pt;
40
41 BUG_ON(! in_hugepage_area(mm->context, addr));
42
43 addr &= HPAGE_MASK;
44
45 pg = pgd_offset(mm, addr);
46 if (!pgd_none(*pg)) {
47 pu = pud_offset(pg, addr);
48 if (!pud_none(*pu)) {
49 pm = pmd_offset(pu, addr);
50 pt = (pte_t *)pm;
51 BUG_ON(!pmd_none(*pm)
52 && !(pte_present(*pt) && pte_huge(*pt)));
53 return pt;
54 }
55 }
56
57 return NULL;
58}
59
60pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr)
61{
62 pgd_t *pg;
63 pud_t *pu;
64 pmd_t *pm;
65 pte_t *pt;
66
67 BUG_ON(! in_hugepage_area(mm->context, addr));
68
69 addr &= HPAGE_MASK;
70
71 pg = pgd_offset(mm, addr);
72 pu = pud_alloc(mm, pg, addr);
73
74 if (pu) {
75 pm = pmd_alloc(mm, pu, addr);
76 if (pm) {
77 pt = (pte_t *)pm;
78 BUG_ON(!pmd_none(*pm)
79 && !(pte_present(*pt) && pte_huge(*pt)));
80 return pt;
81 }
82 }
83
84 return NULL;
85}
86
87#define HUGEPTE_BATCH_SIZE (HPAGE_SIZE / PMD_SIZE)
88
89void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
90 pte_t *ptep, pte_t pte)
91{
92 int i;
93
94 if (pte_present(*ptep)) {
95 pte_clear(mm, addr, ptep);
96 flush_tlb_pending();
97 }
98
99 for (i = 0; i < HUGEPTE_BATCH_SIZE; i++) {
100 *ptep = __pte(pte_val(pte) & ~_PAGE_HPTEFLAGS);
101 ptep++;
102 }
103}
104
105pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
106 pte_t *ptep)
107{
108 unsigned long old = pte_update(ptep, ~0UL);
109 int i;
110
111 if (old & _PAGE_HASHPTE)
112 hpte_update(mm, addr, old, 0);
113
114 for (i = 1; i < HUGEPTE_BATCH_SIZE; i++)
115 ptep[i] = __pte(0);
116
117 return __pte(old);
118}
119
120/*
121 * This function checks for proper alignment of input addr and len parameters.
122 */
123int is_aligned_hugepage_range(unsigned long addr, unsigned long len)
124{
125 if (len & ~HPAGE_MASK)
126 return -EINVAL;
127 if (addr & ~HPAGE_MASK)
128 return -EINVAL;
129 if (! (within_hugepage_low_range(addr, len)
130 || within_hugepage_high_range(addr, len)) )
131 return -EINVAL;
132 return 0;
133}
134
135static void flush_low_segments(void *parm)
136{
137 u16 areas = (unsigned long) parm;
138 unsigned long i;
139
140 asm volatile("isync" : : : "memory");
141
142 BUILD_BUG_ON((sizeof(areas)*8) != NUM_LOW_AREAS);
143
144 for (i = 0; i < NUM_LOW_AREAS; i++) {
145 if (! (areas & (1U << i)))
146 continue;
147 asm volatile("slbie %0"
148 : : "r" ((i << SID_SHIFT) | SLBIE_C));
149 }
150
151 asm volatile("isync" : : : "memory");
152}
153
154static void flush_high_segments(void *parm)
155{
156 u16 areas = (unsigned long) parm;
157 unsigned long i, j;
158
159 asm volatile("isync" : : : "memory");
160
161 BUILD_BUG_ON((sizeof(areas)*8) != NUM_HIGH_AREAS);
162
163 for (i = 0; i < NUM_HIGH_AREAS; i++) {
164 if (! (areas & (1U << i)))
165 continue;
166 for (j = 0; j < (1UL << (HTLB_AREA_SHIFT-SID_SHIFT)); j++)
167 asm volatile("slbie %0"
168 :: "r" (((i << HTLB_AREA_SHIFT)
169 + (j << SID_SHIFT)) | SLBIE_C));
170 }
171
172 asm volatile("isync" : : : "memory");
173}
174
175static int prepare_low_area_for_htlb(struct mm_struct *mm, unsigned long area)
176{
177 unsigned long start = area << SID_SHIFT;
178 unsigned long end = (area+1) << SID_SHIFT;
179 struct vm_area_struct *vma;
180
181 BUG_ON(area >= NUM_LOW_AREAS);
182
183 /* Check no VMAs are in the region */
184 vma = find_vma(mm, start);
185 if (vma && (vma->vm_start < end))
186 return -EBUSY;
187
188 return 0;
189}
190
191static int prepare_high_area_for_htlb(struct mm_struct *mm, unsigned long area)
192{
193 unsigned long start = area << HTLB_AREA_SHIFT;
194 unsigned long end = (area+1) << HTLB_AREA_SHIFT;
195 struct vm_area_struct *vma;
196
197 BUG_ON(area >= NUM_HIGH_AREAS);
198
199 /* Check no VMAs are in the region */
200 vma = find_vma(mm, start);
201 if (vma && (vma->vm_start < end))
202 return -EBUSY;
203
204 return 0;
205}
206
207static int open_low_hpage_areas(struct mm_struct *mm, u16 newareas)
208{
209 unsigned long i;
210
211 BUILD_BUG_ON((sizeof(newareas)*8) != NUM_LOW_AREAS);
212 BUILD_BUG_ON((sizeof(mm->context.low_htlb_areas)*8) != NUM_LOW_AREAS);
213
214 newareas &= ~(mm->context.low_htlb_areas);
215 if (! newareas)
216 return 0; /* The segments we want are already open */
217
218 for (i = 0; i < NUM_LOW_AREAS; i++)
219 if ((1 << i) & newareas)
220 if (prepare_low_area_for_htlb(mm, i) != 0)
221 return -EBUSY;
222
223 mm->context.low_htlb_areas |= newareas;
224
225 /* update the paca copy of the context struct */
226 get_paca()->context = mm->context;
227
228 /* the context change must make it to memory before the flush,
229 * so that further SLB misses do the right thing. */
230 mb();
231 on_each_cpu(flush_low_segments, (void *)(unsigned long)newareas, 0, 1);
232
233 return 0;
234}
235
236static int open_high_hpage_areas(struct mm_struct *mm, u16 newareas)
237{
238 unsigned long i;
239
240 BUILD_BUG_ON((sizeof(newareas)*8) != NUM_HIGH_AREAS);
241 BUILD_BUG_ON((sizeof(mm->context.high_htlb_areas)*8)
242 != NUM_HIGH_AREAS);
243
244 newareas &= ~(mm->context.high_htlb_areas);
245 if (! newareas)
246 return 0; /* The areas we want are already open */
247
248 for (i = 0; i < NUM_HIGH_AREAS; i++)
249 if ((1 << i) & newareas)
250 if (prepare_high_area_for_htlb(mm, i) != 0)
251 return -EBUSY;
252
253 mm->context.high_htlb_areas |= newareas;
254
255 /* update the paca copy of the context struct */
256 get_paca()->context = mm->context;
257
258 /* the context change must make it to memory before the flush,
259 * so that further SLB misses do the right thing. */
260 mb();
261 on_each_cpu(flush_high_segments, (void *)(unsigned long)newareas, 0, 1);
262
263 return 0;
264}
265
266int prepare_hugepage_range(unsigned long addr, unsigned long len)
267{
268 int err;
269
270 if ( (addr+len) < addr )
271 return -EINVAL;
272
273 if ((addr + len) < 0x100000000UL)
274 err = open_low_hpage_areas(current->mm,
275 LOW_ESID_MASK(addr, len));
276 else
277 err = open_high_hpage_areas(current->mm,
278 HTLB_AREA_MASK(addr, len));
279 if (err) {
280 printk(KERN_DEBUG "prepare_hugepage_range(%lx, %lx)"
281 " failed (lowmask: 0x%04hx, highmask: 0x%04hx)\n",
282 addr, len,
283 LOW_ESID_MASK(addr, len), HTLB_AREA_MASK(addr, len));
284 return err;
285 }
286
287 return 0;
288}
289
290struct page *
291follow_huge_addr(struct mm_struct *mm, unsigned long address, int write)
292{
293 pte_t *ptep;
294 struct page *page;
295
296 if (! in_hugepage_area(mm->context, address))
297 return ERR_PTR(-EINVAL);
298
299 ptep = huge_pte_offset(mm, address);
300 page = pte_page(*ptep);
301 if (page)
302 page += (address % HPAGE_SIZE) / PAGE_SIZE;
303
304 return page;
305}
306
307int pmd_huge(pmd_t pmd)
308{
309 return 0;
310}
311
312struct page *
313follow_huge_pmd(struct mm_struct *mm, unsigned long address,
314 pmd_t *pmd, int write)
315{
316 BUG();
317 return NULL;
318}
319
320/* Because we have an exclusive hugepage region which lies within the
321 * normal user address space, we have to take special measures to make
322 * non-huge mmap()s evade the hugepage reserved regions. */
323unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
324 unsigned long len, unsigned long pgoff,
325 unsigned long flags)
326{
327 struct mm_struct *mm = current->mm;
328 struct vm_area_struct *vma;
329 unsigned long start_addr;
330
331 if (len > TASK_SIZE)
332 return -ENOMEM;
333
334 if (addr) {
335 addr = PAGE_ALIGN(addr);
336 vma = find_vma(mm, addr);
337 if (((TASK_SIZE - len) >= addr)
338 && (!vma || (addr+len) <= vma->vm_start)
339 && !is_hugepage_only_range(mm, addr,len))
340 return addr;
341 }
342 if (len > mm->cached_hole_size) {
343 start_addr = addr = mm->free_area_cache;
344 } else {
345 start_addr = addr = TASK_UNMAPPED_BASE;
346 mm->cached_hole_size = 0;
347 }
348
349full_search:
350 vma = find_vma(mm, addr);
351 while (TASK_SIZE - len >= addr) {
352 BUG_ON(vma && (addr >= vma->vm_end));
353
354 if (touches_hugepage_low_range(mm, addr, len)) {
355 addr = ALIGN(addr+1, 1<<SID_SHIFT);
356 vma = find_vma(mm, addr);
357 continue;
358 }
359 if (touches_hugepage_high_range(mm, addr, len)) {
360 addr = ALIGN(addr+1, 1UL<<HTLB_AREA_SHIFT);
361 vma = find_vma(mm, addr);
362 continue;
363 }
364 if (!vma || addr + len <= vma->vm_start) {
365 /*
366 * Remember the place where we stopped the search:
367 */
368 mm->free_area_cache = addr + len;
369 return addr;
370 }
371 if (addr + mm->cached_hole_size < vma->vm_start)
372 mm->cached_hole_size = vma->vm_start - addr;
373 addr = vma->vm_end;
374 vma = vma->vm_next;
375 }
376
377 /* Make sure we didn't miss any holes */
378 if (start_addr != TASK_UNMAPPED_BASE) {
379 start_addr = addr = TASK_UNMAPPED_BASE;
380 mm->cached_hole_size = 0;
381 goto full_search;
382 }
383 return -ENOMEM;
384}
385
386/*
387 * This mmap-allocator allocates new areas top-down from below the
388 * stack's low limit (the base):
389 *
390 * Because we have an exclusive hugepage region which lies within the
391 * normal user address space, we have to take special measures to make
392 * non-huge mmap()s evade the hugepage reserved regions.
393 */
394unsigned long
395arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
396 const unsigned long len, const unsigned long pgoff,
397 const unsigned long flags)
398{
399 struct vm_area_struct *vma, *prev_vma;
400 struct mm_struct *mm = current->mm;
401 unsigned long base = mm->mmap_base, addr = addr0;
402 unsigned long largest_hole = mm->cached_hole_size;
403 int first_time = 1;
404
405 /* requested length too big for entire address space */
406 if (len > TASK_SIZE)
407 return -ENOMEM;
408
409 /* dont allow allocations above current base */
410 if (mm->free_area_cache > base)
411 mm->free_area_cache = base;
412
413 /* requesting a specific address */
414 if (addr) {
415 addr = PAGE_ALIGN(addr);
416 vma = find_vma(mm, addr);
417 if (TASK_SIZE - len >= addr &&
418 (!vma || addr + len <= vma->vm_start)
419 && !is_hugepage_only_range(mm, addr,len))
420 return addr;
421 }
422
423 if (len <= largest_hole) {
424 largest_hole = 0;
425 mm->free_area_cache = base;
426 }
427try_again:
428 /* make sure it can fit in the remaining address space */
429 if (mm->free_area_cache < len)
430 goto fail;
431
432 /* either no address requested or cant fit in requested address hole */
433 addr = (mm->free_area_cache - len) & PAGE_MASK;
434 do {
435hugepage_recheck:
436 if (touches_hugepage_low_range(mm, addr, len)) {
437 addr = (addr & ((~0) << SID_SHIFT)) - len;
438 goto hugepage_recheck;
439 } else if (touches_hugepage_high_range(mm, addr, len)) {
440 addr = (addr & ((~0UL) << HTLB_AREA_SHIFT)) - len;
441 goto hugepage_recheck;
442 }
443
444 /*
445 * Lookup failure means no vma is above this address,
446 * i.e. return with success:
447 */
448 if (!(vma = find_vma_prev(mm, addr, &prev_vma)))
449 return addr;
450
451 /*
452 * new region fits between prev_vma->vm_end and
453 * vma->vm_start, use it:
454 */
455 if (addr+len <= vma->vm_start &&
456 (!prev_vma || (addr >= prev_vma->vm_end))) {
457 /* remember the address as a hint for next time */
458 mm->cached_hole_size = largest_hole;
459 return (mm->free_area_cache = addr);
460 } else {
461 /* pull free_area_cache down to the first hole */
462 if (mm->free_area_cache == vma->vm_end) {
463 mm->free_area_cache = vma->vm_start;
464 mm->cached_hole_size = largest_hole;
465 }
466 }
467
468 /* remember the largest hole we saw so far */
469 if (addr + largest_hole < vma->vm_start)
470 largest_hole = vma->vm_start - addr;
471
472 /* try just below the current vma->vm_start */
473 addr = vma->vm_start-len;
474 } while (len <= vma->vm_start);
475
476fail:
477 /*
478 * if hint left us with no space for the requested
479 * mapping then try again:
480 */
481 if (first_time) {
482 mm->free_area_cache = base;
483 largest_hole = 0;
484 first_time = 0;
485 goto try_again;
486 }
487 /*
488 * A failed mmap() very likely causes application failure,
489 * so fall back to the bottom-up function here. This scenario
490 * can happen with large stack limits and large mmap()
491 * allocations.
492 */
493 mm->free_area_cache = TASK_UNMAPPED_BASE;
494 mm->cached_hole_size = ~0UL;
495 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
496 /*
497 * Restore the topdown base:
498 */
499 mm->free_area_cache = base;
500 mm->cached_hole_size = ~0UL;
501
502 return addr;
503}
504
505static unsigned long htlb_get_low_area(unsigned long len, u16 segmask)
506{
507 unsigned long addr = 0;
508 struct vm_area_struct *vma;
509
510 vma = find_vma(current->mm, addr);
511 while (addr + len <= 0x100000000UL) {
512 BUG_ON(vma && (addr >= vma->vm_end)); /* invariant */
513
514 if (! __within_hugepage_low_range(addr, len, segmask)) {
515 addr = ALIGN(addr+1, 1<<SID_SHIFT);
516 vma = find_vma(current->mm, addr);
517 continue;
518 }
519
520 if (!vma || (addr + len) <= vma->vm_start)
521 return addr;
522 addr = ALIGN(vma->vm_end, HPAGE_SIZE);
523 /* Depending on segmask this might not be a confirmed
524 * hugepage region, so the ALIGN could have skipped
525 * some VMAs */
526 vma = find_vma(current->mm, addr);
527 }
528
529 return -ENOMEM;
530}
531
532static unsigned long htlb_get_high_area(unsigned long len, u16 areamask)
533{
534 unsigned long addr = 0x100000000UL;
535 struct vm_area_struct *vma;
536
537 vma = find_vma(current->mm, addr);
538 while (addr + len <= TASK_SIZE_USER64) {
539 BUG_ON(vma && (addr >= vma->vm_end)); /* invariant */
540
541 if (! __within_hugepage_high_range(addr, len, areamask)) {
542 addr = ALIGN(addr+1, 1UL<<HTLB_AREA_SHIFT);
543 vma = find_vma(current->mm, addr);
544 continue;
545 }
546
547 if (!vma || (addr + len) <= vma->vm_start)
548 return addr;
549 addr = ALIGN(vma->vm_end, HPAGE_SIZE);
550 /* Depending on segmask this might not be a confirmed
551 * hugepage region, so the ALIGN could have skipped
552 * some VMAs */
553 vma = find_vma(current->mm, addr);
554 }
555
556 return -ENOMEM;
557}
558
559unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
560 unsigned long len, unsigned long pgoff,
561 unsigned long flags)
562{
563 int lastshift;
564 u16 areamask, curareas;
565
566 if (len & ~HPAGE_MASK)
567 return -EINVAL;
568
569 if (!cpu_has_feature(CPU_FTR_16M_PAGE))
570 return -EINVAL;
571
572 if (test_thread_flag(TIF_32BIT)) {
573 curareas = current->mm->context.low_htlb_areas;
574
575 /* First see if we can do the mapping in the existing
576 * low areas */
577 addr = htlb_get_low_area(len, curareas);
578 if (addr != -ENOMEM)
579 return addr;
580
581 lastshift = 0;
582 for (areamask = LOW_ESID_MASK(0x100000000UL-len, len);
583 ! lastshift; areamask >>=1) {
584 if (areamask & 1)
585 lastshift = 1;
586
587 addr = htlb_get_low_area(len, curareas | areamask);
588 if ((addr != -ENOMEM)
589 && open_low_hpage_areas(current->mm, areamask) == 0)
590 return addr;
591 }
592 } else {
593 curareas = current->mm->context.high_htlb_areas;
594
595 /* First see if we can do the mapping in the existing
596 * high areas */
597 addr = htlb_get_high_area(len, curareas);
598 if (addr != -ENOMEM)
599 return addr;
600
601 lastshift = 0;
602 for (areamask = HTLB_AREA_MASK(TASK_SIZE_USER64-len, len);
603 ! lastshift; areamask >>=1) {
604 if (areamask & 1)
605 lastshift = 1;
606
607 addr = htlb_get_high_area(len, curareas | areamask);
608 if ((addr != -ENOMEM)
609 && open_high_hpage_areas(current->mm, areamask) == 0)
610 return addr;
611 }
612 }
613 printk(KERN_DEBUG "hugetlb_get_unmapped_area() unable to open"
614 " enough areas\n");
615 return -ENOMEM;
616}
617
618int hash_huge_page(struct mm_struct *mm, unsigned long access,
619 unsigned long ea, unsigned long vsid, int local)
620{
621 pte_t *ptep;
622 unsigned long va, vpn;
623 pte_t old_pte, new_pte;
624 unsigned long rflags, prpn;
625 long slot;
626 int err = 1;
627
628 spin_lock(&mm->page_table_lock);
629
630 ptep = huge_pte_offset(mm, ea);
631
632 /* Search the Linux page table for a match with va */
633 va = (vsid << 28) | (ea & 0x0fffffff);
634 vpn = va >> HPAGE_SHIFT;
635
636 /*
637 * If no pte found or not present, send the problem up to
638 * do_page_fault
639 */
640 if (unlikely(!ptep || pte_none(*ptep)))
641 goto out;
642
643/* BUG_ON(pte_bad(*ptep)); */
644
645 /*
646 * Check the user's access rights to the page. If access should be
647 * prevented then send the problem up to do_page_fault.
648 */
649 if (unlikely(access & ~pte_val(*ptep)))
650 goto out;
651 /*
652 * At this point, we have a pte (old_pte) which can be used to build
653 * or update an HPTE. There are 2 cases:
654 *
655 * 1. There is a valid (present) pte with no associated HPTE (this is
656 * the most common case)
657 * 2. There is a valid (present) pte with an associated HPTE. The
658 * current values of the pp bits in the HPTE prevent access
659 * because we are doing software DIRTY bit management and the
660 * page is currently not DIRTY.
661 */
662
663
664 old_pte = *ptep;
665 new_pte = old_pte;
666
667 rflags = 0x2 | (! (pte_val(new_pte) & _PAGE_RW));
668 /* _PAGE_EXEC -> HW_NO_EXEC since it's inverted */
669 rflags |= ((pte_val(new_pte) & _PAGE_EXEC) ? 0 : HW_NO_EXEC);
670
671 /* Check if pte already has an hpte (case 2) */
672 if (unlikely(pte_val(old_pte) & _PAGE_HASHPTE)) {
673 /* There MIGHT be an HPTE for this pte */
674 unsigned long hash, slot;
675
676 hash = hpt_hash(vpn, 1);
677 if (pte_val(old_pte) & _PAGE_SECONDARY)
678 hash = ~hash;
679 slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
680 slot += (pte_val(old_pte) & _PAGE_GROUP_IX) >> 12;
681
682 if (ppc_md.hpte_updatepp(slot, rflags, va, 1, local) == -1)
683 pte_val(old_pte) &= ~_PAGE_HPTEFLAGS;
684 }
685
686 if (likely(!(pte_val(old_pte) & _PAGE_HASHPTE))) {
687 unsigned long hash = hpt_hash(vpn, 1);
688 unsigned long hpte_group;
689
690 prpn = pte_pfn(old_pte);
691
692repeat:
693 hpte_group = ((hash & htab_hash_mask) *
694 HPTES_PER_GROUP) & ~0x7UL;
695
696 /* Update the linux pte with the HPTE slot */
697 pte_val(new_pte) &= ~_PAGE_HPTEFLAGS;
698 pte_val(new_pte) |= _PAGE_HASHPTE;
699
700 /* Add in WIMG bits */
701 /* XXX We should store these in the pte */
702 rflags |= _PAGE_COHERENT;
703
704 slot = ppc_md.hpte_insert(hpte_group, va, prpn,
705 HPTE_V_LARGE, rflags);
706
707 /* Primary is full, try the secondary */
708 if (unlikely(slot == -1)) {
709 pte_val(new_pte) |= _PAGE_SECONDARY;
710 hpte_group = ((~hash & htab_hash_mask) *
711 HPTES_PER_GROUP) & ~0x7UL;
712 slot = ppc_md.hpte_insert(hpte_group, va, prpn,
713 HPTE_V_LARGE |
714 HPTE_V_SECONDARY,
715 rflags);
716 if (slot == -1) {
717 if (mftb() & 0x1)
718 hpte_group = ((hash & htab_hash_mask) *
719 HPTES_PER_GROUP)&~0x7UL;
720
721 ppc_md.hpte_remove(hpte_group);
722 goto repeat;
723 }
724 }
725
726 if (unlikely(slot == -2))
727 panic("hash_huge_page: pte_insert failed\n");
728
729 pte_val(new_pte) |= (slot<<12) & _PAGE_GROUP_IX;
730
731 /*
732 * No need to use ldarx/stdcx here because all who
733 * might be updating the pte will hold the
734 * page_table_lock
735 */
736 *ptep = new_pte;
737 }
738
739 err = 0;
740
741 out:
742 spin_unlock(&mm->page_table_lock);
743
744 return err;
745}
diff --git a/arch/ppc64/mm/imalloc.c b/arch/ppc64/mm/imalloc.c
deleted file mode 100644
index f4ca29cf5364..000000000000
--- a/arch/ppc64/mm/imalloc.c
+++ /dev/null
@@ -1,312 +0,0 @@
1/*
2 * c 2001 PPC 64 Team, IBM Corp
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 */
9
10#include <linux/slab.h>
11#include <linux/vmalloc.h>
12
13#include <asm/uaccess.h>
14#include <asm/pgalloc.h>
15#include <asm/pgtable.h>
16#include <asm/semaphore.h>
17#include <asm/imalloc.h>
18#include <asm/cacheflush.h>
19
20static DECLARE_MUTEX(imlist_sem);
21struct vm_struct * imlist = NULL;
22
23static int get_free_im_addr(unsigned long size, unsigned long *im_addr)
24{
25 unsigned long addr;
26 struct vm_struct **p, *tmp;
27
28 addr = ioremap_bot;
29 for (p = &imlist; (tmp = *p) ; p = &tmp->next) {
30 if (size + addr < (unsigned long) tmp->addr)
31 break;
32 if ((unsigned long)tmp->addr >= ioremap_bot)
33 addr = tmp->size + (unsigned long) tmp->addr;
34 if (addr >= IMALLOC_END-size)
35 return 1;
36 }
37 *im_addr = addr;
38
39 return 0;
40}
41
42/* Return whether the region described by v_addr and size is a subset
43 * of the region described by parent
44 */
45static inline int im_region_is_subset(unsigned long v_addr, unsigned long size,
46 struct vm_struct *parent)
47{
48 return (int) (v_addr >= (unsigned long) parent->addr &&
49 v_addr < (unsigned long) parent->addr + parent->size &&
50 size < parent->size);
51}
52
53/* Return whether the region described by v_addr and size is a superset
54 * of the region described by child
55 */
56static int im_region_is_superset(unsigned long v_addr, unsigned long size,
57 struct vm_struct *child)
58{
59 struct vm_struct parent;
60
61 parent.addr = (void *) v_addr;
62 parent.size = size;
63
64 return im_region_is_subset((unsigned long) child->addr, child->size,
65 &parent);
66}
67
68/* Return whether the region described by v_addr and size overlaps
69 * the region described by vm. Overlapping regions meet the
70 * following conditions:
71 * 1) The regions share some part of the address space
72 * 2) The regions aren't identical
73 * 3) Neither region is a subset of the other
74 */
75static int im_region_overlaps(unsigned long v_addr, unsigned long size,
76 struct vm_struct *vm)
77{
78 if (im_region_is_superset(v_addr, size, vm))
79 return 0;
80
81 return (v_addr + size > (unsigned long) vm->addr + vm->size &&
82 v_addr < (unsigned long) vm->addr + vm->size) ||
83 (v_addr < (unsigned long) vm->addr &&
84 v_addr + size > (unsigned long) vm->addr);
85}
86
87/* Determine imalloc status of region described by v_addr and size.
88 * Can return one of the following:
89 * IM_REGION_UNUSED - Entire region is unallocated in imalloc space.
90 * IM_REGION_SUBSET - Region is a subset of a region that is already
91 * allocated in imalloc space.
92 * vm will be assigned to a ptr to the parent region.
93 * IM_REGION_EXISTS - Exact region already allocated in imalloc space.
94 * vm will be assigned to a ptr to the existing imlist
95 * member.
96 * IM_REGION_OVERLAPS - Region overlaps an allocated region in imalloc space.
97 * IM_REGION_SUPERSET - Region is a superset of a region that is already
98 * allocated in imalloc space.
99 */
100static int im_region_status(unsigned long v_addr, unsigned long size,
101 struct vm_struct **vm)
102{
103 struct vm_struct *tmp;
104
105 for (tmp = imlist; tmp; tmp = tmp->next)
106 if (v_addr < (unsigned long) tmp->addr + tmp->size)
107 break;
108
109 if (tmp) {
110 if (im_region_overlaps(v_addr, size, tmp))
111 return IM_REGION_OVERLAP;
112
113 *vm = tmp;
114 if (im_region_is_subset(v_addr, size, tmp)) {
115 /* Return with tmp pointing to superset */
116 return IM_REGION_SUBSET;
117 }
118 if (im_region_is_superset(v_addr, size, tmp)) {
119 /* Return with tmp pointing to first subset */
120 return IM_REGION_SUPERSET;
121 }
122 else if (v_addr == (unsigned long) tmp->addr &&
123 size == tmp->size) {
124 /* Return with tmp pointing to exact region */
125 return IM_REGION_EXISTS;
126 }
127 }
128
129 *vm = NULL;
130 return IM_REGION_UNUSED;
131}
132
133static struct vm_struct * split_im_region(unsigned long v_addr,
134 unsigned long size, struct vm_struct *parent)
135{
136 struct vm_struct *vm1 = NULL;
137 struct vm_struct *vm2 = NULL;
138 struct vm_struct *new_vm = NULL;
139
140 vm1 = (struct vm_struct *) kmalloc(sizeof(*vm1), GFP_KERNEL);
141 if (vm1 == NULL) {
142 printk(KERN_ERR "%s() out of memory\n", __FUNCTION__);
143 return NULL;
144 }
145
146 if (v_addr == (unsigned long) parent->addr) {
147 /* Use existing parent vm_struct to represent child, allocate
148 * new one for the remainder of parent range
149 */
150 vm1->size = parent->size - size;
151 vm1->addr = (void *) (v_addr + size);
152 vm1->next = parent->next;
153
154 parent->size = size;
155 parent->next = vm1;
156 new_vm = parent;
157 } else if (v_addr + size == (unsigned long) parent->addr +
158 parent->size) {
159 /* Allocate new vm_struct to represent child, use existing
160 * parent one for remainder of parent range
161 */
162 vm1->size = size;
163 vm1->addr = (void *) v_addr;
164 vm1->next = parent->next;
165 new_vm = vm1;
166
167 parent->size -= size;
168 parent->next = vm1;
169 } else {
170 /* Allocate two new vm_structs for the new child and
171 * uppermost remainder, and use existing parent one for the
172 * lower remainder of parent range
173 */
174 vm2 = (struct vm_struct *) kmalloc(sizeof(*vm2), GFP_KERNEL);
175 if (vm2 == NULL) {
176 printk(KERN_ERR "%s() out of memory\n", __FUNCTION__);
177 kfree(vm1);
178 return NULL;
179 }
180
181 vm1->size = size;
182 vm1->addr = (void *) v_addr;
183 vm1->next = vm2;
184 new_vm = vm1;
185
186 vm2->size = ((unsigned long) parent->addr + parent->size) -
187 (v_addr + size);
188 vm2->addr = (void *) v_addr + size;
189 vm2->next = parent->next;
190
191 parent->size = v_addr - (unsigned long) parent->addr;
192 parent->next = vm1;
193 }
194
195 return new_vm;
196}
197
198static struct vm_struct * __add_new_im_area(unsigned long req_addr,
199 unsigned long size)
200{
201 struct vm_struct **p, *tmp, *area;
202
203 for (p = &imlist; (tmp = *p) ; p = &tmp->next) {
204 if (req_addr + size <= (unsigned long)tmp->addr)
205 break;
206 }
207
208 area = (struct vm_struct *) kmalloc(sizeof(*area), GFP_KERNEL);
209 if (!area)
210 return NULL;
211 area->flags = 0;
212 area->addr = (void *)req_addr;
213 area->size = size;
214 area->next = *p;
215 *p = area;
216
217 return area;
218}
219
220static struct vm_struct * __im_get_area(unsigned long req_addr,
221 unsigned long size,
222 int criteria)
223{
224 struct vm_struct *tmp;
225 int status;
226
227 status = im_region_status(req_addr, size, &tmp);
228 if ((criteria & status) == 0) {
229 return NULL;
230 }
231
232 switch (status) {
233 case IM_REGION_UNUSED:
234 tmp = __add_new_im_area(req_addr, size);
235 break;
236 case IM_REGION_SUBSET:
237 tmp = split_im_region(req_addr, size, tmp);
238 break;
239 case IM_REGION_EXISTS:
240 /* Return requested region */
241 break;
242 case IM_REGION_SUPERSET:
243 /* Return first existing subset of requested region */
244 break;
245 default:
246 printk(KERN_ERR "%s() unexpected imalloc region status\n",
247 __FUNCTION__);
248 tmp = NULL;
249 }
250
251 return tmp;
252}
253
254struct vm_struct * im_get_free_area(unsigned long size)
255{
256 struct vm_struct *area;
257 unsigned long addr;
258
259 down(&imlist_sem);
260 if (get_free_im_addr(size, &addr)) {
261 printk(KERN_ERR "%s() cannot obtain addr for size 0x%lx\n",
262 __FUNCTION__, size);
263 area = NULL;
264 goto next_im_done;
265 }
266
267 area = __im_get_area(addr, size, IM_REGION_UNUSED);
268 if (area == NULL) {
269 printk(KERN_ERR
270 "%s() cannot obtain area for addr 0x%lx size 0x%lx\n",
271 __FUNCTION__, addr, size);
272 }
273next_im_done:
274 up(&imlist_sem);
275 return area;
276}
277
278struct vm_struct * im_get_area(unsigned long v_addr, unsigned long size,
279 int criteria)
280{
281 struct vm_struct *area;
282
283 down(&imlist_sem);
284 area = __im_get_area(v_addr, size, criteria);
285 up(&imlist_sem);
286 return area;
287}
288
289void im_free(void * addr)
290{
291 struct vm_struct **p, *tmp;
292
293 if (!addr)
294 return;
295 if ((unsigned long) addr & ~PAGE_MASK) {
296 printk(KERN_ERR "Trying to %s bad address (%p)\n", __FUNCTION__, addr);
297 return;
298 }
299 down(&imlist_sem);
300 for (p = &imlist ; (tmp = *p) ; p = &tmp->next) {
301 if (tmp->addr == addr) {
302 *p = tmp->next;
303 unmap_vm_area(tmp);
304 kfree(tmp);
305 up(&imlist_sem);
306 return;
307 }
308 }
309 up(&imlist_sem);
310 printk(KERN_ERR "Trying to %s nonexistent area (%p)\n", __FUNCTION__,
311 addr);
312}
diff --git a/arch/ppc64/mm/init.c b/arch/ppc64/mm/init.c
deleted file mode 100644
index e2bd7776622f..000000000000
--- a/arch/ppc64/mm/init.c
+++ /dev/null
@@ -1,950 +0,0 @@
1/*
2 * PowerPC version
3 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
4 *
5 * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
6 * and Cort Dougan (PReP) (cort@cs.nmt.edu)
7 * Copyright (C) 1996 Paul Mackerras
8 * Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk).
9 *
10 * Derived from "arch/i386/mm/init.c"
11 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
12 *
13 * Dave Engebretsen <engebret@us.ibm.com>
14 * Rework for PPC64 port.
15 *
16 * This program is free software; you can redistribute it and/or
17 * modify it under the terms of the GNU General Public License
18 * as published by the Free Software Foundation; either version
19 * 2 of the License, or (at your option) any later version.
20 *
21 */
22
23#include <linux/config.h>
24#include <linux/signal.h>
25#include <linux/sched.h>
26#include <linux/kernel.h>
27#include <linux/errno.h>
28#include <linux/string.h>
29#include <linux/types.h>
30#include <linux/mman.h>
31#include <linux/mm.h>
32#include <linux/swap.h>
33#include <linux/stddef.h>
34#include <linux/vmalloc.h>
35#include <linux/init.h>
36#include <linux/delay.h>
37#include <linux/bootmem.h>
38#include <linux/highmem.h>
39#include <linux/idr.h>
40#include <linux/nodemask.h>
41#include <linux/module.h>
42
43#include <asm/pgalloc.h>
44#include <asm/page.h>
45#include <asm/prom.h>
46#include <asm/lmb.h>
47#include <asm/rtas.h>
48#include <asm/io.h>
49#include <asm/mmu_context.h>
50#include <asm/pgtable.h>
51#include <asm/mmu.h>
52#include <asm/uaccess.h>
53#include <asm/smp.h>
54#include <asm/machdep.h>
55#include <asm/tlb.h>
56#include <asm/eeh.h>
57#include <asm/processor.h>
58#include <asm/mmzone.h>
59#include <asm/cputable.h>
60#include <asm/ppcdebug.h>
61#include <asm/sections.h>
62#include <asm/system.h>
63#include <asm/iommu.h>
64#include <asm/abs_addr.h>
65#include <asm/vdso.h>
66#include <asm/imalloc.h>
67
68#if PGTABLE_RANGE > USER_VSID_RANGE
69#warning Limited user VSID range means pagetable space is wasted
70#endif
71
72#if (TASK_SIZE_USER64 < PGTABLE_RANGE) && (TASK_SIZE_USER64 < USER_VSID_RANGE)
73#warning TASK_SIZE is smaller than it needs to be.
74#endif
75
76int mem_init_done;
77unsigned long ioremap_bot = IMALLOC_BASE;
78static unsigned long phbs_io_bot = PHBS_IO_BASE;
79
80extern pgd_t swapper_pg_dir[];
81extern struct task_struct *current_set[NR_CPUS];
82
83unsigned long klimit = (unsigned long)_end;
84
85unsigned long _SDR1=0;
86unsigned long _ASR=0;
87
88/* max amount of RAM to use */
89unsigned long __max_memory;
90
91/* info on what we think the IO hole is */
92unsigned long io_hole_start;
93unsigned long io_hole_size;
94
95void show_mem(void)
96{
97 unsigned long total = 0, reserved = 0;
98 unsigned long shared = 0, cached = 0;
99 struct page *page;
100 pg_data_t *pgdat;
101 unsigned long i;
102
103 printk("Mem-info:\n");
104 show_free_areas();
105 printk("Free swap: %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10));
106 for_each_pgdat(pgdat) {
107 unsigned long flags;
108 pgdat_resize_lock(pgdat, &flags);
109 for (i = 0; i < pgdat->node_spanned_pages; i++) {
110 page = pgdat_page_nr(pgdat, i);
111 total++;
112 if (PageReserved(page))
113 reserved++;
114 else if (PageSwapCache(page))
115 cached++;
116 else if (page_count(page))
117 shared += page_count(page) - 1;
118 }
119 pgdat_resize_unlock(pgdat, &flags);
120 }
121 printk("%ld pages of RAM\n", total);
122 printk("%ld reserved pages\n", reserved);
123 printk("%ld pages shared\n", shared);
124 printk("%ld pages swap cached\n", cached);
125}
126
127#ifdef CONFIG_PPC_ISERIES
128
129void __iomem *ioremap(unsigned long addr, unsigned long size)
130{
131 return (void __iomem *)addr;
132}
133
134extern void __iomem *__ioremap(unsigned long addr, unsigned long size,
135 unsigned long flags)
136{
137 return (void __iomem *)addr;
138}
139
140void iounmap(volatile void __iomem *addr)
141{
142 return;
143}
144
145#else
146
147/*
148 * map_io_page currently only called by __ioremap
149 * map_io_page adds an entry to the ioremap page table
150 * and adds an entry to the HPT, possibly bolting it
151 */
152static int map_io_page(unsigned long ea, unsigned long pa, int flags)
153{
154 pgd_t *pgdp;
155 pud_t *pudp;
156 pmd_t *pmdp;
157 pte_t *ptep;
158 unsigned long vsid;
159
160 if (mem_init_done) {
161 pgdp = pgd_offset_k(ea);
162 pudp = pud_alloc(&init_mm, pgdp, ea);
163 if (!pudp)
164 return -ENOMEM;
165 pmdp = pmd_alloc(&init_mm, pudp, ea);
166 if (!pmdp)
167 return -ENOMEM;
168 ptep = pte_alloc_kernel(pmdp, ea);
169 if (!ptep)
170 return -ENOMEM;
171 set_pte_at(&init_mm, ea, ptep, pfn_pte(pa >> PAGE_SHIFT,
172 __pgprot(flags)));
173 } else {
174 unsigned long va, vpn, hash, hpteg;
175
176 /*
177 * If the mm subsystem is not fully up, we cannot create a
178 * linux page table entry for this mapping. Simply bolt an
179 * entry in the hardware page table.
180 */
181 vsid = get_kernel_vsid(ea);
182 va = (vsid << 28) | (ea & 0xFFFFFFF);
183 vpn = va >> PAGE_SHIFT;
184
185 hash = hpt_hash(vpn, 0);
186
187 hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP);
188
189 /* Panic if a pte grpup is full */
190 if (ppc_md.hpte_insert(hpteg, va, pa >> PAGE_SHIFT,
191 HPTE_V_BOLTED,
192 _PAGE_NO_CACHE|_PAGE_GUARDED|PP_RWXX)
193 == -1) {
194 panic("map_io_page: could not insert mapping");
195 }
196 }
197 return 0;
198}
199
200
201static void __iomem * __ioremap_com(unsigned long addr, unsigned long pa,
202 unsigned long ea, unsigned long size,
203 unsigned long flags)
204{
205 unsigned long i;
206
207 if ((flags & _PAGE_PRESENT) == 0)
208 flags |= pgprot_val(PAGE_KERNEL);
209
210 for (i = 0; i < size; i += PAGE_SIZE)
211 if (map_io_page(ea+i, pa+i, flags))
212 return NULL;
213
214 return (void __iomem *) (ea + (addr & ~PAGE_MASK));
215}
216
217
218void __iomem *
219ioremap(unsigned long addr, unsigned long size)
220{
221 return __ioremap(addr, size, _PAGE_NO_CACHE | _PAGE_GUARDED);
222}
223
224void __iomem * __ioremap(unsigned long addr, unsigned long size,
225 unsigned long flags)
226{
227 unsigned long pa, ea;
228 void __iomem *ret;
229
230 /*
231 * Choose an address to map it to.
232 * Once the imalloc system is running, we use it.
233 * Before that, we map using addresses going
234 * up from ioremap_bot. imalloc will use
235 * the addresses from ioremap_bot through
236 * IMALLOC_END
237 *
238 */
239 pa = addr & PAGE_MASK;
240 size = PAGE_ALIGN(addr + size) - pa;
241
242 if (size == 0)
243 return NULL;
244
245 if (mem_init_done) {
246 struct vm_struct *area;
247 area = im_get_free_area(size);
248 if (area == NULL)
249 return NULL;
250 ea = (unsigned long)(area->addr);
251 ret = __ioremap_com(addr, pa, ea, size, flags);
252 if (!ret)
253 im_free(area->addr);
254 } else {
255 ea = ioremap_bot;
256 ret = __ioremap_com(addr, pa, ea, size, flags);
257 if (ret)
258 ioremap_bot += size;
259 }
260 return ret;
261}
262
263#define IS_PAGE_ALIGNED(_val) ((_val) == ((_val) & PAGE_MASK))
264
265int __ioremap_explicit(unsigned long pa, unsigned long ea,
266 unsigned long size, unsigned long flags)
267{
268 struct vm_struct *area;
269 void __iomem *ret;
270
271 /* For now, require page-aligned values for pa, ea, and size */
272 if (!IS_PAGE_ALIGNED(pa) || !IS_PAGE_ALIGNED(ea) ||
273 !IS_PAGE_ALIGNED(size)) {
274 printk(KERN_ERR "unaligned value in %s\n", __FUNCTION__);
275 return 1;
276 }
277
278 if (!mem_init_done) {
279 /* Two things to consider in this case:
280 * 1) No records will be kept (imalloc, etc) that the region
281 * has been remapped
282 * 2) It won't be easy to iounmap() the region later (because
283 * of 1)
284 */
285 ;
286 } else {
287 area = im_get_area(ea, size,
288 IM_REGION_UNUSED|IM_REGION_SUBSET|IM_REGION_EXISTS);
289 if (area == NULL) {
290 /* Expected when PHB-dlpar is in play */
291 return 1;
292 }
293 if (ea != (unsigned long) area->addr) {
294 printk(KERN_ERR "unexpected addr return from "
295 "im_get_area\n");
296 return 1;
297 }
298 }
299
300 ret = __ioremap_com(pa, pa, ea, size, flags);
301 if (ret == NULL) {
302 printk(KERN_ERR "ioremap_explicit() allocation failure !\n");
303 return 1;
304 }
305 if (ret != (void *) ea) {
306 printk(KERN_ERR "__ioremap_com() returned unexpected addr\n");
307 return 1;
308 }
309
310 return 0;
311}
312
313/*
314 * Unmap an IO region and remove it from imalloc'd list.
315 * Access to IO memory should be serialized by driver.
316 * This code is modeled after vmalloc code - unmap_vm_area()
317 *
318 * XXX what about calls before mem_init_done (ie python_countermeasures())
319 */
320void iounmap(volatile void __iomem *token)
321{
322 void *addr;
323
324 if (!mem_init_done)
325 return;
326
327 addr = (void *) ((unsigned long __force) token & PAGE_MASK);
328
329 im_free(addr);
330}
331
332static int iounmap_subset_regions(unsigned long addr, unsigned long size)
333{
334 struct vm_struct *area;
335
336 /* Check whether subsets of this region exist */
337 area = im_get_area(addr, size, IM_REGION_SUPERSET);
338 if (area == NULL)
339 return 1;
340
341 while (area) {
342 iounmap((void __iomem *) area->addr);
343 area = im_get_area(addr, size,
344 IM_REGION_SUPERSET);
345 }
346
347 return 0;
348}
349
350int iounmap_explicit(volatile void __iomem *start, unsigned long size)
351{
352 struct vm_struct *area;
353 unsigned long addr;
354 int rc;
355
356 addr = (unsigned long __force) start & PAGE_MASK;
357
358 /* Verify that the region either exists or is a subset of an existing
359 * region. In the latter case, split the parent region to create
360 * the exact region
361 */
362 area = im_get_area(addr, size,
363 IM_REGION_EXISTS | IM_REGION_SUBSET);
364 if (area == NULL) {
365 /* Determine whether subset regions exist. If so, unmap */
366 rc = iounmap_subset_regions(addr, size);
367 if (rc) {
368 printk(KERN_ERR
369 "%s() cannot unmap nonexistent range 0x%lx\n",
370 __FUNCTION__, addr);
371 return 1;
372 }
373 } else {
374 iounmap((void __iomem *) area->addr);
375 }
376 /*
377 * FIXME! This can't be right:
378 iounmap(area->addr);
379 * Maybe it should be "iounmap(area);"
380 */
381 return 0;
382}
383
384#endif
385
386EXPORT_SYMBOL(ioremap);
387EXPORT_SYMBOL(__ioremap);
388EXPORT_SYMBOL(iounmap);
389
390void free_initmem(void)
391{
392 unsigned long addr;
393
394 addr = (unsigned long)__init_begin;
395 for (; addr < (unsigned long)__init_end; addr += PAGE_SIZE) {
396 memset((void *)addr, 0xcc, PAGE_SIZE);
397 ClearPageReserved(virt_to_page(addr));
398 set_page_count(virt_to_page(addr), 1);
399 free_page(addr);
400 totalram_pages++;
401 }
402 printk ("Freeing unused kernel memory: %luk freed\n",
403 ((unsigned long)__init_end - (unsigned long)__init_begin) >> 10);
404}
405
406#ifdef CONFIG_BLK_DEV_INITRD
407void free_initrd_mem(unsigned long start, unsigned long end)
408{
409 if (start < end)
410 printk ("Freeing initrd memory: %ldk freed\n", (end - start) >> 10);
411 for (; start < end; start += PAGE_SIZE) {
412 ClearPageReserved(virt_to_page(start));
413 set_page_count(virt_to_page(start), 1);
414 free_page(start);
415 totalram_pages++;
416 }
417}
418#endif
419
420static DEFINE_SPINLOCK(mmu_context_lock);
421static DEFINE_IDR(mmu_context_idr);
422
423int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
424{
425 int index;
426 int err;
427
428again:
429 if (!idr_pre_get(&mmu_context_idr, GFP_KERNEL))
430 return -ENOMEM;
431
432 spin_lock(&mmu_context_lock);
433 err = idr_get_new_above(&mmu_context_idr, NULL, 1, &index);
434 spin_unlock(&mmu_context_lock);
435
436 if (err == -EAGAIN)
437 goto again;
438 else if (err)
439 return err;
440
441 if (index > MAX_CONTEXT) {
442 idr_remove(&mmu_context_idr, index);
443 return -ENOMEM;
444 }
445
446 mm->context.id = index;
447
448 return 0;
449}
450
451void destroy_context(struct mm_struct *mm)
452{
453 spin_lock(&mmu_context_lock);
454 idr_remove(&mmu_context_idr, mm->context.id);
455 spin_unlock(&mmu_context_lock);
456
457 mm->context.id = NO_CONTEXT;
458}
459
460/*
461 * Do very early mm setup.
462 */
463void __init mm_init_ppc64(void)
464{
465#ifndef CONFIG_PPC_ISERIES
466 unsigned long i;
467#endif
468
469 ppc64_boot_msg(0x100, "MM Init");
470
471 /* This is the story of the IO hole... please, keep seated,
472 * unfortunately, we are out of oxygen masks at the moment.
473 * So we need some rough way to tell where your big IO hole
474 * is. On pmac, it's between 2G and 4G, on POWER3, it's around
475 * that area as well, on POWER4 we don't have one, etc...
476 * We need that as a "hint" when sizing the TCE table on POWER3
477 * So far, the simplest way that seem work well enough for us it
478 * to just assume that the first discontinuity in our physical
479 * RAM layout is the IO hole. That may not be correct in the future
480 * (and isn't on iSeries but then we don't care ;)
481 */
482
483#ifndef CONFIG_PPC_ISERIES
484 for (i = 1; i < lmb.memory.cnt; i++) {
485 unsigned long base, prevbase, prevsize;
486
487 prevbase = lmb.memory.region[i-1].base;
488 prevsize = lmb.memory.region[i-1].size;
489 base = lmb.memory.region[i].base;
490 if (base > (prevbase + prevsize)) {
491 io_hole_start = prevbase + prevsize;
492 io_hole_size = base - (prevbase + prevsize);
493 break;
494 }
495 }
496#endif /* CONFIG_PPC_ISERIES */
497 if (io_hole_start)
498 printk("IO Hole assumed to be %lx -> %lx\n",
499 io_hole_start, io_hole_start + io_hole_size - 1);
500
501 ppc64_boot_msg(0x100, "MM Init Done");
502}
503
504/*
505 * This is called by /dev/mem to know if a given address has to
506 * be mapped non-cacheable or not
507 */
508int page_is_ram(unsigned long pfn)
509{
510 int i;
511 unsigned long paddr = (pfn << PAGE_SHIFT);
512
513 for (i=0; i < lmb.memory.cnt; i++) {
514 unsigned long base;
515
516 base = lmb.memory.region[i].base;
517
518 if ((paddr >= base) &&
519 (paddr < (base + lmb.memory.region[i].size))) {
520 return 1;
521 }
522 }
523
524 return 0;
525}
526EXPORT_SYMBOL(page_is_ram);
527
528/*
529 * Initialize the bootmem system and give it all the memory we
530 * have available.
531 */
532#ifndef CONFIG_NEED_MULTIPLE_NODES
533void __init do_init_bootmem(void)
534{
535 unsigned long i;
536 unsigned long start, bootmap_pages;
537 unsigned long total_pages = lmb_end_of_DRAM() >> PAGE_SHIFT;
538 int boot_mapsize;
539
540 /*
541 * Find an area to use for the bootmem bitmap. Calculate the size of
542 * bitmap required as (Total Memory) / PAGE_SIZE / BITS_PER_BYTE.
543 * Add 1 additional page in case the address isn't page-aligned.
544 */
545 bootmap_pages = bootmem_bootmap_pages(total_pages);
546
547 start = lmb_alloc(bootmap_pages<<PAGE_SHIFT, PAGE_SIZE);
548 BUG_ON(!start);
549
550 boot_mapsize = init_bootmem(start >> PAGE_SHIFT, total_pages);
551
552 max_pfn = max_low_pfn;
553
554 /* Add all physical memory to the bootmem map, mark each area
555 * present.
556 */
557 for (i=0; i < lmb.memory.cnt; i++)
558 free_bootmem(lmb.memory.region[i].base,
559 lmb_size_bytes(&lmb.memory, i));
560
561 /* reserve the sections we're already using */
562 for (i=0; i < lmb.reserved.cnt; i++)
563 reserve_bootmem(lmb.reserved.region[i].base,
564 lmb_size_bytes(&lmb.reserved, i));
565
566 for (i=0; i < lmb.memory.cnt; i++)
567 memory_present(0, lmb_start_pfn(&lmb.memory, i),
568 lmb_end_pfn(&lmb.memory, i));
569}
570
571/*
572 * paging_init() sets up the page tables - in fact we've already done this.
573 */
574void __init paging_init(void)
575{
576 unsigned long zones_size[MAX_NR_ZONES];
577 unsigned long zholes_size[MAX_NR_ZONES];
578 unsigned long total_ram = lmb_phys_mem_size();
579 unsigned long top_of_ram = lmb_end_of_DRAM();
580
581 printk(KERN_INFO "Top of RAM: 0x%lx, Total RAM: 0x%lx\n",
582 top_of_ram, total_ram);
583 printk(KERN_INFO "Memory hole size: %ldMB\n",
584 (top_of_ram - total_ram) >> 20);
585 /*
586 * All pages are DMA-able so we put them all in the DMA zone.
587 */
588 memset(zones_size, 0, sizeof(zones_size));
589 memset(zholes_size, 0, sizeof(zholes_size));
590
591 zones_size[ZONE_DMA] = top_of_ram >> PAGE_SHIFT;
592 zholes_size[ZONE_DMA] = (top_of_ram - total_ram) >> PAGE_SHIFT;
593
594 free_area_init_node(0, NODE_DATA(0), zones_size,
595 __pa(PAGE_OFFSET) >> PAGE_SHIFT, zholes_size);
596}
597#endif /* ! CONFIG_NEED_MULTIPLE_NODES */
598
599static struct kcore_list kcore_vmem;
600
601static int __init setup_kcore(void)
602{
603 int i;
604
605 for (i=0; i < lmb.memory.cnt; i++) {
606 unsigned long base, size;
607 struct kcore_list *kcore_mem;
608
609 base = lmb.memory.region[i].base;
610 size = lmb.memory.region[i].size;
611
612 /* GFP_ATOMIC to avoid might_sleep warnings during boot */
613 kcore_mem = kmalloc(sizeof(struct kcore_list), GFP_ATOMIC);
614 if (!kcore_mem)
615 panic("mem_init: kmalloc failed\n");
616
617 kclist_add(kcore_mem, __va(base), size);
618 }
619
620 kclist_add(&kcore_vmem, (void *)VMALLOC_START, VMALLOC_END-VMALLOC_START);
621
622 return 0;
623}
624module_init(setup_kcore);
625
626void __init mem_init(void)
627{
628#ifdef CONFIG_NEED_MULTIPLE_NODES
629 int nid;
630#endif
631 pg_data_t *pgdat;
632 unsigned long i;
633 struct page *page;
634 unsigned long reservedpages = 0, codesize, initsize, datasize, bsssize;
635
636 num_physpages = max_low_pfn; /* RAM is assumed contiguous */
637 high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
638
639#ifdef CONFIG_NEED_MULTIPLE_NODES
640 for_each_online_node(nid) {
641 if (NODE_DATA(nid)->node_spanned_pages != 0) {
642 printk("freeing bootmem node %x\n", nid);
643 totalram_pages +=
644 free_all_bootmem_node(NODE_DATA(nid));
645 }
646 }
647#else
648 max_mapnr = num_physpages;
649 totalram_pages += free_all_bootmem();
650#endif
651
652 for_each_pgdat(pgdat) {
653 unsigned long flags;
654 pgdat_resize_lock(pgdat, &flags);
655 for (i = 0; i < pgdat->node_spanned_pages; i++) {
656 page = pgdat_page_nr(pgdat, i);
657 if (PageReserved(page))
658 reservedpages++;
659 }
660 pgdat_resize_unlock(pgdat, &flags);
661 }
662
663 codesize = (unsigned long)&_etext - (unsigned long)&_stext;
664 initsize = (unsigned long)&__init_end - (unsigned long)&__init_begin;
665 datasize = (unsigned long)&_edata - (unsigned long)&__init_end;
666 bsssize = (unsigned long)&__bss_stop - (unsigned long)&__bss_start;
667
668 printk(KERN_INFO "Memory: %luk/%luk available (%luk kernel code, "
669 "%luk reserved, %luk data, %luk bss, %luk init)\n",
670 (unsigned long)nr_free_pages() << (PAGE_SHIFT-10),
671 num_physpages << (PAGE_SHIFT-10),
672 codesize >> 10,
673 reservedpages << (PAGE_SHIFT-10),
674 datasize >> 10,
675 bsssize >> 10,
676 initsize >> 10);
677
678 mem_init_done = 1;
679
680 /* Initialize the vDSO */
681 vdso_init();
682}
683
684/*
685 * This is called when a page has been modified by the kernel.
686 * It just marks the page as not i-cache clean. We do the i-cache
687 * flush later when the page is given to a user process, if necessary.
688 */
689void flush_dcache_page(struct page *page)
690{
691 if (cpu_has_feature(CPU_FTR_COHERENT_ICACHE))
692 return;
693 /* avoid an atomic op if possible */
694 if (test_bit(PG_arch_1, &page->flags))
695 clear_bit(PG_arch_1, &page->flags);
696}
697EXPORT_SYMBOL(flush_dcache_page);
698
699void clear_user_page(void *page, unsigned long vaddr, struct page *pg)
700{
701 clear_page(page);
702
703 if (cpu_has_feature(CPU_FTR_COHERENT_ICACHE))
704 return;
705 /*
706 * We shouldnt have to do this, but some versions of glibc
707 * require it (ld.so assumes zero filled pages are icache clean)
708 * - Anton
709 */
710
711 /* avoid an atomic op if possible */
712 if (test_bit(PG_arch_1, &pg->flags))
713 clear_bit(PG_arch_1, &pg->flags);
714}
715EXPORT_SYMBOL(clear_user_page);
716
717void copy_user_page(void *vto, void *vfrom, unsigned long vaddr,
718 struct page *pg)
719{
720 copy_page(vto, vfrom);
721
722 /*
723 * We should be able to use the following optimisation, however
724 * there are two problems.
725 * Firstly a bug in some versions of binutils meant PLT sections
726 * were not marked executable.
727 * Secondly the first word in the GOT section is blrl, used
728 * to establish the GOT address. Until recently the GOT was
729 * not marked executable.
730 * - Anton
731 */
732#if 0
733 if (!vma->vm_file && ((vma->vm_flags & VM_EXEC) == 0))
734 return;
735#endif
736
737 if (cpu_has_feature(CPU_FTR_COHERENT_ICACHE))
738 return;
739
740 /* avoid an atomic op if possible */
741 if (test_bit(PG_arch_1, &pg->flags))
742 clear_bit(PG_arch_1, &pg->flags);
743}
744
745void flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
746 unsigned long addr, int len)
747{
748 unsigned long maddr;
749
750 maddr = (unsigned long)page_address(page) + (addr & ~PAGE_MASK);
751 flush_icache_range(maddr, maddr + len);
752}
753EXPORT_SYMBOL(flush_icache_user_range);
754
755/*
756 * This is called at the end of handling a user page fault, when the
757 * fault has been handled by updating a PTE in the linux page tables.
758 * We use it to preload an HPTE into the hash table corresponding to
759 * the updated linux PTE.
760 *
761 * This must always be called with the mm->page_table_lock held
762 */
763void update_mmu_cache(struct vm_area_struct *vma, unsigned long ea,
764 pte_t pte)
765{
766 unsigned long vsid;
767 void *pgdir;
768 pte_t *ptep;
769 int local = 0;
770 cpumask_t tmp;
771 unsigned long flags;
772
773 /* handle i-cache coherency */
774 if (!cpu_has_feature(CPU_FTR_COHERENT_ICACHE) &&
775 !cpu_has_feature(CPU_FTR_NOEXECUTE)) {
776 unsigned long pfn = pte_pfn(pte);
777 if (pfn_valid(pfn)) {
778 struct page *page = pfn_to_page(pfn);
779 if (!PageReserved(page)
780 && !test_bit(PG_arch_1, &page->flags)) {
781 __flush_dcache_icache(page_address(page));
782 set_bit(PG_arch_1, &page->flags);
783 }
784 }
785 }
786
787 /* We only want HPTEs for linux PTEs that have _PAGE_ACCESSED set */
788 if (!pte_young(pte))
789 return;
790
791 pgdir = vma->vm_mm->pgd;
792 if (pgdir == NULL)
793 return;
794
795 ptep = find_linux_pte(pgdir, ea);
796 if (!ptep)
797 return;
798
799 vsid = get_vsid(vma->vm_mm->context.id, ea);
800
801 local_irq_save(flags);
802 tmp = cpumask_of_cpu(smp_processor_id());
803 if (cpus_equal(vma->vm_mm->cpu_vm_mask, tmp))
804 local = 1;
805
806 __hash_page(ea, 0, vsid, ptep, 0x300, local);
807 local_irq_restore(flags);
808}
809
810void __iomem * reserve_phb_iospace(unsigned long size)
811{
812 void __iomem *virt_addr;
813
814 if (phbs_io_bot >= IMALLOC_BASE)
815 panic("reserve_phb_iospace(): phb io space overflow\n");
816
817 virt_addr = (void __iomem *) phbs_io_bot;
818 phbs_io_bot += size;
819
820 return virt_addr;
821}
822
823static void zero_ctor(void *addr, kmem_cache_t *cache, unsigned long flags)
824{
825 memset(addr, 0, kmem_cache_size(cache));
826}
827
828static const int pgtable_cache_size[2] = {
829 PTE_TABLE_SIZE, PMD_TABLE_SIZE
830};
831static const char *pgtable_cache_name[ARRAY_SIZE(pgtable_cache_size)] = {
832 "pgd_pte_cache", "pud_pmd_cache",
833};
834
835kmem_cache_t *pgtable_cache[ARRAY_SIZE(pgtable_cache_size)];
836
837void pgtable_cache_init(void)
838{
839 int i;
840
841 BUILD_BUG_ON(PTE_TABLE_SIZE != pgtable_cache_size[PTE_CACHE_NUM]);
842 BUILD_BUG_ON(PMD_TABLE_SIZE != pgtable_cache_size[PMD_CACHE_NUM]);
843 BUILD_BUG_ON(PUD_TABLE_SIZE != pgtable_cache_size[PUD_CACHE_NUM]);
844 BUILD_BUG_ON(PGD_TABLE_SIZE != pgtable_cache_size[PGD_CACHE_NUM]);
845
846 for (i = 0; i < ARRAY_SIZE(pgtable_cache_size); i++) {
847 int size = pgtable_cache_size[i];
848 const char *name = pgtable_cache_name[i];
849
850 pgtable_cache[i] = kmem_cache_create(name,
851 size, size,
852 SLAB_HWCACHE_ALIGN
853 | SLAB_MUST_HWCACHE_ALIGN,
854 zero_ctor,
855 NULL);
856 if (! pgtable_cache[i])
857 panic("pgtable_cache_init(): could not create %s!\n",
858 name);
859 }
860}
861
862pgprot_t phys_mem_access_prot(struct file *file, unsigned long addr,
863 unsigned long size, pgprot_t vma_prot)
864{
865 if (ppc_md.phys_mem_access_prot)
866 return ppc_md.phys_mem_access_prot(file, addr, size, vma_prot);
867
868 if (!page_is_ram(addr >> PAGE_SHIFT))
869 vma_prot = __pgprot(pgprot_val(vma_prot)
870 | _PAGE_GUARDED | _PAGE_NO_CACHE);
871 return vma_prot;
872}
873EXPORT_SYMBOL(phys_mem_access_prot);
874
875#ifdef CONFIG_MEMORY_HOTPLUG
876
877void online_page(struct page *page)
878{
879 ClearPageReserved(page);
880 free_cold_page(page);
881 totalram_pages++;
882 num_physpages++;
883}
884
885/*
886 * This works only for the non-NUMA case. Later, we'll need a lookup
887 * to convert from real physical addresses to nid, that doesn't use
888 * pfn_to_nid().
889 */
890int __devinit add_memory(u64 start, u64 size)
891{
892 struct pglist_data *pgdata = NODE_DATA(0);
893 struct zone *zone;
894 unsigned long start_pfn = start >> PAGE_SHIFT;
895 unsigned long nr_pages = size >> PAGE_SHIFT;
896
897 /* this should work for most non-highmem platforms */
898 zone = pgdata->node_zones;
899
900 return __add_pages(zone, start_pfn, nr_pages);
901
902 return 0;
903}
904
905/*
906 * First pass at this code will check to determine if the remove
907 * request is within the RMO. Do not allow removal within the RMO.
908 */
909int __devinit remove_memory(u64 start, u64 size)
910{
911 struct zone *zone;
912 unsigned long start_pfn, end_pfn, nr_pages;
913
914 start_pfn = start >> PAGE_SHIFT;
915 nr_pages = size >> PAGE_SHIFT;
916 end_pfn = start_pfn + nr_pages;
917
918 printk("%s(): Attempting to remove memoy in range "
919 "%lx to %lx\n", __func__, start, start+size);
920 /*
921 * check for range within RMO
922 */
923 zone = page_zone(pfn_to_page(start_pfn));
924
925 printk("%s(): memory will be removed from "
926 "the %s zone\n", __func__, zone->name);
927
928 /*
929 * not handling removing memory ranges that
930 * overlap multiple zones yet
931 */
932 if (end_pfn > (zone->zone_start_pfn + zone->spanned_pages))
933 goto overlap;
934
935 /* make sure it is NOT in RMO */
936 if ((start < lmb.rmo_size) || ((start+size) < lmb.rmo_size)) {
937 printk("%s(): range to be removed must NOT be in RMO!\n",
938 __func__);
939 goto in_rmo;
940 }
941
942 return __remove_pages(zone, start_pfn, nr_pages);
943
944overlap:
945 printk("%s(): memory range to be removed overlaps "
946 "multiple zones!!!\n", __func__);
947in_rmo:
948 return -1;
949}
950#endif /* CONFIG_MEMORY_HOTPLUG */
diff --git a/arch/ppc64/mm/mmap.c b/arch/ppc64/mm/mmap.c
deleted file mode 100644
index fe65f522aff3..000000000000
--- a/arch/ppc64/mm/mmap.c
+++ /dev/null
@@ -1,86 +0,0 @@
1/*
2 * linux/arch/ppc64/mm/mmap.c
3 *
4 * flexible mmap layout support
5 *
6 * Copyright 2003-2004 Red Hat Inc., Durham, North Carolina.
7 * All Rights Reserved.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22 *
23 *
24 * Started by Ingo Molnar <mingo@elte.hu>
25 */
26
27#include <linux/personality.h>
28#include <linux/mm.h>
29
30/*
31 * Top of mmap area (just below the process stack).
32 *
33 * Leave an at least ~128 MB hole.
34 */
35#define MIN_GAP (128*1024*1024)
36#define MAX_GAP (TASK_SIZE/6*5)
37
38static inline unsigned long mmap_base(void)
39{
40 unsigned long gap = current->signal->rlim[RLIMIT_STACK].rlim_cur;
41
42 if (gap < MIN_GAP)
43 gap = MIN_GAP;
44 else if (gap > MAX_GAP)
45 gap = MAX_GAP;
46
47 return TASK_SIZE - (gap & PAGE_MASK);
48}
49
50static inline int mmap_is_legacy(void)
51{
52 /*
53 * Force standard allocation for 64 bit programs.
54 */
55 if (!test_thread_flag(TIF_32BIT))
56 return 1;
57
58 if (current->personality & ADDR_COMPAT_LAYOUT)
59 return 1;
60
61 if (current->signal->rlim[RLIMIT_STACK].rlim_cur == RLIM_INFINITY)
62 return 1;
63
64 return sysctl_legacy_va_layout;
65}
66
67/*
68 * This function, called very early during the creation of a new
69 * process VM image, sets up which VM layout function to use:
70 */
71void arch_pick_mmap_layout(struct mm_struct *mm)
72{
73 /*
74 * Fall back to the standard layout if the personality
75 * bit is set, or if the expected stack growth is unlimited:
76 */
77 if (mmap_is_legacy()) {
78 mm->mmap_base = TASK_UNMAPPED_BASE;
79 mm->get_unmapped_area = arch_get_unmapped_area;
80 mm->unmap_area = arch_unmap_area;
81 } else {
82 mm->mmap_base = mmap_base();
83 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
84 mm->unmap_area = arch_unmap_area_topdown;
85 }
86}
diff --git a/arch/ppc64/mm/numa.c b/arch/ppc64/mm/numa.c
deleted file mode 100644
index cb864b8f2750..000000000000
--- a/arch/ppc64/mm/numa.c
+++ /dev/null
@@ -1,779 +0,0 @@
1/*
2 * pSeries NUMA support
3 *
4 * Copyright (C) 2002 Anton Blanchard <anton@au.ibm.com>, IBM
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11#include <linux/threads.h>
12#include <linux/bootmem.h>
13#include <linux/init.h>
14#include <linux/mm.h>
15#include <linux/mmzone.h>
16#include <linux/module.h>
17#include <linux/nodemask.h>
18#include <linux/cpu.h>
19#include <linux/notifier.h>
20#include <asm/lmb.h>
21#include <asm/machdep.h>
22#include <asm/abs_addr.h>
23
24static int numa_enabled = 1;
25
26static int numa_debug;
27#define dbg(args...) if (numa_debug) { printk(KERN_INFO args); }
28
29#ifdef DEBUG_NUMA
30#define ARRAY_INITIALISER -1
31#else
32#define ARRAY_INITIALISER 0
33#endif
34
35int numa_cpu_lookup_table[NR_CPUS] = { [ 0 ... (NR_CPUS - 1)] =
36 ARRAY_INITIALISER};
37char *numa_memory_lookup_table;
38cpumask_t numa_cpumask_lookup_table[MAX_NUMNODES];
39int nr_cpus_in_node[MAX_NUMNODES] = { [0 ... (MAX_NUMNODES -1)] = 0};
40
41struct pglist_data *node_data[MAX_NUMNODES];
42bootmem_data_t __initdata plat_node_bdata[MAX_NUMNODES];
43static int min_common_depth;
44
45/*
46 * We need somewhere to store start/span for each node until we have
47 * allocated the real node_data structures.
48 */
49static struct {
50 unsigned long node_start_pfn;
51 unsigned long node_end_pfn;
52 unsigned long node_present_pages;
53} init_node_data[MAX_NUMNODES] __initdata;
54
55EXPORT_SYMBOL(node_data);
56EXPORT_SYMBOL(numa_cpu_lookup_table);
57EXPORT_SYMBOL(numa_memory_lookup_table);
58EXPORT_SYMBOL(numa_cpumask_lookup_table);
59EXPORT_SYMBOL(nr_cpus_in_node);
60
61static inline void map_cpu_to_node(int cpu, int node)
62{
63 numa_cpu_lookup_table[cpu] = node;
64 if (!(cpu_isset(cpu, numa_cpumask_lookup_table[node]))) {
65 cpu_set(cpu, numa_cpumask_lookup_table[node]);
66 nr_cpus_in_node[node]++;
67 }
68}
69
70#ifdef CONFIG_HOTPLUG_CPU
71static void unmap_cpu_from_node(unsigned long cpu)
72{
73 int node = numa_cpu_lookup_table[cpu];
74
75 dbg("removing cpu %lu from node %d\n", cpu, node);
76
77 if (cpu_isset(cpu, numa_cpumask_lookup_table[node])) {
78 cpu_clear(cpu, numa_cpumask_lookup_table[node]);
79 nr_cpus_in_node[node]--;
80 } else {
81 printk(KERN_ERR "WARNING: cpu %lu not found in node %d\n",
82 cpu, node);
83 }
84}
85#endif /* CONFIG_HOTPLUG_CPU */
86
87static struct device_node * __devinit find_cpu_node(unsigned int cpu)
88{
89 unsigned int hw_cpuid = get_hard_smp_processor_id(cpu);
90 struct device_node *cpu_node = NULL;
91 unsigned int *interrupt_server, *reg;
92 int len;
93
94 while ((cpu_node = of_find_node_by_type(cpu_node, "cpu")) != NULL) {
95 /* Try interrupt server first */
96 interrupt_server = (unsigned int *)get_property(cpu_node,
97 "ibm,ppc-interrupt-server#s", &len);
98
99 len = len / sizeof(u32);
100
101 if (interrupt_server && (len > 0)) {
102 while (len--) {
103 if (interrupt_server[len] == hw_cpuid)
104 return cpu_node;
105 }
106 } else {
107 reg = (unsigned int *)get_property(cpu_node,
108 "reg", &len);
109 if (reg && (len > 0) && (reg[0] == hw_cpuid))
110 return cpu_node;
111 }
112 }
113
114 return NULL;
115}
116
117/* must hold reference to node during call */
118static int *of_get_associativity(struct device_node *dev)
119{
120 return (unsigned int *)get_property(dev, "ibm,associativity", NULL);
121}
122
123static int of_node_numa_domain(struct device_node *device)
124{
125 int numa_domain;
126 unsigned int *tmp;
127
128 if (min_common_depth == -1)
129 return 0;
130
131 tmp = of_get_associativity(device);
132 if (tmp && (tmp[0] >= min_common_depth)) {
133 numa_domain = tmp[min_common_depth];
134 } else {
135 dbg("WARNING: no NUMA information for %s\n",
136 device->full_name);
137 numa_domain = 0;
138 }
139 return numa_domain;
140}
141
142/*
143 * In theory, the "ibm,associativity" property may contain multiple
144 * associativity lists because a resource may be multiply connected
145 * into the machine. This resource then has different associativity
146 * characteristics relative to its multiple connections. We ignore
147 * this for now. We also assume that all cpu and memory sets have
148 * their distances represented at a common level. This won't be
149 * true for heirarchical NUMA.
150 *
151 * In any case the ibm,associativity-reference-points should give
152 * the correct depth for a normal NUMA system.
153 *
154 * - Dave Hansen <haveblue@us.ibm.com>
155 */
156static int __init find_min_common_depth(void)
157{
158 int depth;
159 unsigned int *ref_points;
160 struct device_node *rtas_root;
161 unsigned int len;
162
163 rtas_root = of_find_node_by_path("/rtas");
164
165 if (!rtas_root)
166 return -1;
167
168 /*
169 * this property is 2 32-bit integers, each representing a level of
170 * depth in the associativity nodes. The first is for an SMP
171 * configuration (should be all 0's) and the second is for a normal
172 * NUMA configuration.
173 */
174 ref_points = (unsigned int *)get_property(rtas_root,
175 "ibm,associativity-reference-points", &len);
176
177 if ((len >= 1) && ref_points) {
178 depth = ref_points[1];
179 } else {
180 dbg("WARNING: could not find NUMA "
181 "associativity reference point\n");
182 depth = -1;
183 }
184 of_node_put(rtas_root);
185
186 return depth;
187}
188
189static int __init get_mem_addr_cells(void)
190{
191 struct device_node *memory = NULL;
192 int rc;
193
194 memory = of_find_node_by_type(memory, "memory");
195 if (!memory)
196 return 0; /* it won't matter */
197
198 rc = prom_n_addr_cells(memory);
199 return rc;
200}
201
202static int __init get_mem_size_cells(void)
203{
204 struct device_node *memory = NULL;
205 int rc;
206
207 memory = of_find_node_by_type(memory, "memory");
208 if (!memory)
209 return 0; /* it won't matter */
210 rc = prom_n_size_cells(memory);
211 return rc;
212}
213
214static unsigned long read_n_cells(int n, unsigned int **buf)
215{
216 unsigned long result = 0;
217
218 while (n--) {
219 result = (result << 32) | **buf;
220 (*buf)++;
221 }
222 return result;
223}
224
225/*
226 * Figure out to which domain a cpu belongs and stick it there.
227 * Return the id of the domain used.
228 */
229static int numa_setup_cpu(unsigned long lcpu)
230{
231 int numa_domain = 0;
232 struct device_node *cpu = find_cpu_node(lcpu);
233
234 if (!cpu) {
235 WARN_ON(1);
236 goto out;
237 }
238
239 numa_domain = of_node_numa_domain(cpu);
240
241 if (numa_domain >= num_online_nodes()) {
242 /*
243 * POWER4 LPAR uses 0xffff as invalid node,
244 * dont warn in this case.
245 */
246 if (numa_domain != 0xffff)
247 printk(KERN_ERR "WARNING: cpu %ld "
248 "maps to invalid NUMA node %d\n",
249 lcpu, numa_domain);
250 numa_domain = 0;
251 }
252out:
253 node_set_online(numa_domain);
254
255 map_cpu_to_node(lcpu, numa_domain);
256
257 of_node_put(cpu);
258
259 return numa_domain;
260}
261
262static int cpu_numa_callback(struct notifier_block *nfb,
263 unsigned long action,
264 void *hcpu)
265{
266 unsigned long lcpu = (unsigned long)hcpu;
267 int ret = NOTIFY_DONE;
268
269 switch (action) {
270 case CPU_UP_PREPARE:
271 if (min_common_depth == -1 || !numa_enabled)
272 map_cpu_to_node(lcpu, 0);
273 else
274 numa_setup_cpu(lcpu);
275 ret = NOTIFY_OK;
276 break;
277#ifdef CONFIG_HOTPLUG_CPU
278 case CPU_DEAD:
279 case CPU_UP_CANCELED:
280 unmap_cpu_from_node(lcpu);
281 break;
282 ret = NOTIFY_OK;
283#endif
284 }
285 return ret;
286}
287
288/*
289 * Check and possibly modify a memory region to enforce the memory limit.
290 *
291 * Returns the size the region should have to enforce the memory limit.
292 * This will either be the original value of size, a truncated value,
293 * or zero. If the returned value of size is 0 the region should be
294 * discarded as it lies wholy above the memory limit.
295 */
296static unsigned long __init numa_enforce_memory_limit(unsigned long start, unsigned long size)
297{
298 /*
299 * We use lmb_end_of_DRAM() in here instead of memory_limit because
300 * we've already adjusted it for the limit and it takes care of
301 * having memory holes below the limit.
302 */
303 extern unsigned long memory_limit;
304
305 if (! memory_limit)
306 return size;
307
308 if (start + size <= lmb_end_of_DRAM())
309 return size;
310
311 if (start >= lmb_end_of_DRAM())
312 return 0;
313
314 return lmb_end_of_DRAM() - start;
315}
316
317static int __init parse_numa_properties(void)
318{
319 struct device_node *cpu = NULL;
320 struct device_node *memory = NULL;
321 int addr_cells, size_cells;
322 int max_domain = 0;
323 long entries = lmb_end_of_DRAM() >> MEMORY_INCREMENT_SHIFT;
324 unsigned long i;
325
326 if (numa_enabled == 0) {
327 printk(KERN_WARNING "NUMA disabled by user\n");
328 return -1;
329 }
330
331 numa_memory_lookup_table =
332 (char *)abs_to_virt(lmb_alloc(entries * sizeof(char), 1));
333 memset(numa_memory_lookup_table, 0, entries * sizeof(char));
334
335 for (i = 0; i < entries ; i++)
336 numa_memory_lookup_table[i] = ARRAY_INITIALISER;
337
338 min_common_depth = find_min_common_depth();
339
340 dbg("NUMA associativity depth for CPU/Memory: %d\n", min_common_depth);
341 if (min_common_depth < 0)
342 return min_common_depth;
343
344 max_domain = numa_setup_cpu(boot_cpuid);
345
346 /*
347 * Even though we connect cpus to numa domains later in SMP init,
348 * we need to know the maximum node id now. This is because each
349 * node id must have NODE_DATA etc backing it.
350 * As a result of hotplug we could still have cpus appear later on
351 * with larger node ids. In that case we force the cpu into node 0.
352 */
353 for_each_cpu(i) {
354 int numa_domain;
355
356 cpu = find_cpu_node(i);
357
358 if (cpu) {
359 numa_domain = of_node_numa_domain(cpu);
360 of_node_put(cpu);
361
362 if (numa_domain < MAX_NUMNODES &&
363 max_domain < numa_domain)
364 max_domain = numa_domain;
365 }
366 }
367
368 addr_cells = get_mem_addr_cells();
369 size_cells = get_mem_size_cells();
370 memory = NULL;
371 while ((memory = of_find_node_by_type(memory, "memory")) != NULL) {
372 unsigned long start;
373 unsigned long size;
374 int numa_domain;
375 int ranges;
376 unsigned int *memcell_buf;
377 unsigned int len;
378
379 memcell_buf = (unsigned int *)get_property(memory, "reg", &len);
380 if (!memcell_buf || len <= 0)
381 continue;
382
383 ranges = memory->n_addrs;
384new_range:
385 /* these are order-sensitive, and modify the buffer pointer */
386 start = read_n_cells(addr_cells, &memcell_buf);
387 size = read_n_cells(size_cells, &memcell_buf);
388
389 start = _ALIGN_DOWN(start, MEMORY_INCREMENT);
390 size = _ALIGN_UP(size, MEMORY_INCREMENT);
391
392 numa_domain = of_node_numa_domain(memory);
393
394 if (numa_domain >= MAX_NUMNODES) {
395 if (numa_domain != 0xffff)
396 printk(KERN_ERR "WARNING: memory at %lx maps "
397 "to invalid NUMA node %d\n", start,
398 numa_domain);
399 numa_domain = 0;
400 }
401
402 if (max_domain < numa_domain)
403 max_domain = numa_domain;
404
405 if (! (size = numa_enforce_memory_limit(start, size))) {
406 if (--ranges)
407 goto new_range;
408 else
409 continue;
410 }
411
412 /*
413 * Initialize new node struct, or add to an existing one.
414 */
415 if (init_node_data[numa_domain].node_end_pfn) {
416 if ((start / PAGE_SIZE) <
417 init_node_data[numa_domain].node_start_pfn)
418 init_node_data[numa_domain].node_start_pfn =
419 start / PAGE_SIZE;
420 if (((start / PAGE_SIZE) + (size / PAGE_SIZE)) >
421 init_node_data[numa_domain].node_end_pfn)
422 init_node_data[numa_domain].node_end_pfn =
423 (start / PAGE_SIZE) +
424 (size / PAGE_SIZE);
425
426 init_node_data[numa_domain].node_present_pages +=
427 size / PAGE_SIZE;
428 } else {
429 node_set_online(numa_domain);
430
431 init_node_data[numa_domain].node_start_pfn =
432 start / PAGE_SIZE;
433 init_node_data[numa_domain].node_end_pfn =
434 init_node_data[numa_domain].node_start_pfn +
435 size / PAGE_SIZE;
436 init_node_data[numa_domain].node_present_pages =
437 size / PAGE_SIZE;
438 }
439
440 for (i = start ; i < (start+size); i += MEMORY_INCREMENT)
441 numa_memory_lookup_table[i >> MEMORY_INCREMENT_SHIFT] =
442 numa_domain;
443
444 if (--ranges)
445 goto new_range;
446 }
447
448 for (i = 0; i <= max_domain; i++)
449 node_set_online(i);
450
451 return 0;
452}
453
454static void __init setup_nonnuma(void)
455{
456 unsigned long top_of_ram = lmb_end_of_DRAM();
457 unsigned long total_ram = lmb_phys_mem_size();
458 unsigned long i;
459
460 printk(KERN_INFO "Top of RAM: 0x%lx, Total RAM: 0x%lx\n",
461 top_of_ram, total_ram);
462 printk(KERN_INFO "Memory hole size: %ldMB\n",
463 (top_of_ram - total_ram) >> 20);
464
465 if (!numa_memory_lookup_table) {
466 long entries = top_of_ram >> MEMORY_INCREMENT_SHIFT;
467 numa_memory_lookup_table =
468 (char *)abs_to_virt(lmb_alloc(entries * sizeof(char), 1));
469 memset(numa_memory_lookup_table, 0, entries * sizeof(char));
470 for (i = 0; i < entries ; i++)
471 numa_memory_lookup_table[i] = ARRAY_INITIALISER;
472 }
473
474 map_cpu_to_node(boot_cpuid, 0);
475
476 node_set_online(0);
477
478 init_node_data[0].node_start_pfn = 0;
479 init_node_data[0].node_end_pfn = lmb_end_of_DRAM() / PAGE_SIZE;
480 init_node_data[0].node_present_pages = total_ram / PAGE_SIZE;
481
482 for (i = 0 ; i < top_of_ram; i += MEMORY_INCREMENT)
483 numa_memory_lookup_table[i >> MEMORY_INCREMENT_SHIFT] = 0;
484}
485
486static void __init dump_numa_topology(void)
487{
488 unsigned int node;
489 unsigned int count;
490
491 if (min_common_depth == -1 || !numa_enabled)
492 return;
493
494 for_each_online_node(node) {
495 unsigned long i;
496
497 printk(KERN_INFO "Node %d Memory:", node);
498
499 count = 0;
500
501 for (i = 0; i < lmb_end_of_DRAM(); i += MEMORY_INCREMENT) {
502 if (numa_memory_lookup_table[i >> MEMORY_INCREMENT_SHIFT] == node) {
503 if (count == 0)
504 printk(" 0x%lx", i);
505 ++count;
506 } else {
507 if (count > 0)
508 printk("-0x%lx", i);
509 count = 0;
510 }
511 }
512
513 if (count > 0)
514 printk("-0x%lx", i);
515 printk("\n");
516 }
517 return;
518}
519
520/*
521 * Allocate some memory, satisfying the lmb or bootmem allocator where
522 * required. nid is the preferred node and end is the physical address of
523 * the highest address in the node.
524 *
525 * Returns the physical address of the memory.
526 */
527static unsigned long careful_allocation(int nid, unsigned long size,
528 unsigned long align, unsigned long end)
529{
530 unsigned long ret = lmb_alloc_base(size, align, end);
531
532 /* retry over all memory */
533 if (!ret)
534 ret = lmb_alloc_base(size, align, lmb_end_of_DRAM());
535
536 if (!ret)
537 panic("numa.c: cannot allocate %lu bytes on node %d",
538 size, nid);
539
540 /*
541 * If the memory came from a previously allocated node, we must
542 * retry with the bootmem allocator.
543 */
544 if (pa_to_nid(ret) < nid) {
545 nid = pa_to_nid(ret);
546 ret = (unsigned long)__alloc_bootmem_node(NODE_DATA(nid),
547 size, align, 0);
548
549 if (!ret)
550 panic("numa.c: cannot allocate %lu bytes on node %d",
551 size, nid);
552
553 ret = virt_to_abs(ret);
554
555 dbg("alloc_bootmem %lx %lx\n", ret, size);
556 }
557
558 return ret;
559}
560
561void __init do_init_bootmem(void)
562{
563 int nid;
564 int addr_cells, size_cells;
565 struct device_node *memory = NULL;
566 static struct notifier_block ppc64_numa_nb = {
567 .notifier_call = cpu_numa_callback,
568 .priority = 1 /* Must run before sched domains notifier. */
569 };
570
571 min_low_pfn = 0;
572 max_low_pfn = lmb_end_of_DRAM() >> PAGE_SHIFT;
573 max_pfn = max_low_pfn;
574
575 if (parse_numa_properties())
576 setup_nonnuma();
577 else
578 dump_numa_topology();
579
580 register_cpu_notifier(&ppc64_numa_nb);
581
582 for_each_online_node(nid) {
583 unsigned long start_paddr, end_paddr;
584 int i;
585 unsigned long bootmem_paddr;
586 unsigned long bootmap_pages;
587
588 start_paddr = init_node_data[nid].node_start_pfn * PAGE_SIZE;
589 end_paddr = init_node_data[nid].node_end_pfn * PAGE_SIZE;
590
591 /* Allocate the node structure node local if possible */
592 NODE_DATA(nid) = (struct pglist_data *)careful_allocation(nid,
593 sizeof(struct pglist_data),
594 SMP_CACHE_BYTES, end_paddr);
595 NODE_DATA(nid) = abs_to_virt(NODE_DATA(nid));
596 memset(NODE_DATA(nid), 0, sizeof(struct pglist_data));
597
598 dbg("node %d\n", nid);
599 dbg("NODE_DATA() = %p\n", NODE_DATA(nid));
600
601 NODE_DATA(nid)->bdata = &plat_node_bdata[nid];
602 NODE_DATA(nid)->node_start_pfn =
603 init_node_data[nid].node_start_pfn;
604 NODE_DATA(nid)->node_spanned_pages =
605 end_paddr - start_paddr;
606
607 if (NODE_DATA(nid)->node_spanned_pages == 0)
608 continue;
609
610 dbg("start_paddr = %lx\n", start_paddr);
611 dbg("end_paddr = %lx\n", end_paddr);
612
613 bootmap_pages = bootmem_bootmap_pages((end_paddr - start_paddr) >> PAGE_SHIFT);
614
615 bootmem_paddr = careful_allocation(nid,
616 bootmap_pages << PAGE_SHIFT,
617 PAGE_SIZE, end_paddr);
618 memset(abs_to_virt(bootmem_paddr), 0,
619 bootmap_pages << PAGE_SHIFT);
620 dbg("bootmap_paddr = %lx\n", bootmem_paddr);
621
622 init_bootmem_node(NODE_DATA(nid), bootmem_paddr >> PAGE_SHIFT,
623 start_paddr >> PAGE_SHIFT,
624 end_paddr >> PAGE_SHIFT);
625
626 /*
627 * We need to do another scan of all memory sections to
628 * associate memory with the correct node.
629 */
630 addr_cells = get_mem_addr_cells();
631 size_cells = get_mem_size_cells();
632 memory = NULL;
633 while ((memory = of_find_node_by_type(memory, "memory")) != NULL) {
634 unsigned long mem_start, mem_size;
635 int numa_domain, ranges;
636 unsigned int *memcell_buf;
637 unsigned int len;
638
639 memcell_buf = (unsigned int *)get_property(memory, "reg", &len);
640 if (!memcell_buf || len <= 0)
641 continue;
642
643 ranges = memory->n_addrs; /* ranges in cell */
644new_range:
645 mem_start = read_n_cells(addr_cells, &memcell_buf);
646 mem_size = read_n_cells(size_cells, &memcell_buf);
647 if (numa_enabled) {
648 numa_domain = of_node_numa_domain(memory);
649 if (numa_domain >= MAX_NUMNODES)
650 numa_domain = 0;
651 } else
652 numa_domain = 0;
653
654 if (numa_domain != nid)
655 continue;
656
657 mem_size = numa_enforce_memory_limit(mem_start, mem_size);
658 if (mem_size) {
659 dbg("free_bootmem %lx %lx\n", mem_start, mem_size);
660 free_bootmem_node(NODE_DATA(nid), mem_start, mem_size);
661 }
662
663 if (--ranges) /* process all ranges in cell */
664 goto new_range;
665 }
666
667 /*
668 * Mark reserved regions on this node
669 */
670 for (i = 0; i < lmb.reserved.cnt; i++) {
671 unsigned long physbase = lmb.reserved.region[i].base;
672 unsigned long size = lmb.reserved.region[i].size;
673
674 if (pa_to_nid(physbase) != nid &&
675 pa_to_nid(physbase+size-1) != nid)
676 continue;
677
678 if (physbase < end_paddr &&
679 (physbase+size) > start_paddr) {
680 /* overlaps */
681 if (physbase < start_paddr) {
682 size -= start_paddr - physbase;
683 physbase = start_paddr;
684 }
685
686 if (size > end_paddr - physbase)
687 size = end_paddr - physbase;
688
689 dbg("reserve_bootmem %lx %lx\n", physbase,
690 size);
691 reserve_bootmem_node(NODE_DATA(nid), physbase,
692 size);
693 }
694 }
695 /*
696 * This loop may look famaliar, but we have to do it again
697 * after marking our reserved memory to mark memory present
698 * for sparsemem.
699 */
700 addr_cells = get_mem_addr_cells();
701 size_cells = get_mem_size_cells();
702 memory = NULL;
703 while ((memory = of_find_node_by_type(memory, "memory")) != NULL) {
704 unsigned long mem_start, mem_size;
705 int numa_domain, ranges;
706 unsigned int *memcell_buf;
707 unsigned int len;
708
709 memcell_buf = (unsigned int *)get_property(memory, "reg", &len);
710 if (!memcell_buf || len <= 0)
711 continue;
712
713 ranges = memory->n_addrs; /* ranges in cell */
714new_range2:
715 mem_start = read_n_cells(addr_cells, &memcell_buf);
716 mem_size = read_n_cells(size_cells, &memcell_buf);
717 if (numa_enabled) {
718 numa_domain = of_node_numa_domain(memory);
719 if (numa_domain >= MAX_NUMNODES)
720 numa_domain = 0;
721 } else
722 numa_domain = 0;
723
724 if (numa_domain != nid)
725 continue;
726
727 mem_size = numa_enforce_memory_limit(mem_start, mem_size);
728 memory_present(numa_domain, mem_start >> PAGE_SHIFT,
729 (mem_start + mem_size) >> PAGE_SHIFT);
730
731 if (--ranges) /* process all ranges in cell */
732 goto new_range2;
733 }
734
735 }
736}
737
738void __init paging_init(void)
739{
740 unsigned long zones_size[MAX_NR_ZONES];
741 unsigned long zholes_size[MAX_NR_ZONES];
742 int nid;
743
744 memset(zones_size, 0, sizeof(zones_size));
745 memset(zholes_size, 0, sizeof(zholes_size));
746
747 for_each_online_node(nid) {
748 unsigned long start_pfn;
749 unsigned long end_pfn;
750
751 start_pfn = init_node_data[nid].node_start_pfn;
752 end_pfn = init_node_data[nid].node_end_pfn;
753
754 zones_size[ZONE_DMA] = end_pfn - start_pfn;
755 zholes_size[ZONE_DMA] = zones_size[ZONE_DMA] -
756 init_node_data[nid].node_present_pages;
757
758 dbg("free_area_init node %d %lx %lx (hole: %lx)\n", nid,
759 zones_size[ZONE_DMA], start_pfn, zholes_size[ZONE_DMA]);
760
761 free_area_init_node(nid, NODE_DATA(nid), zones_size,
762 start_pfn, zholes_size);
763 }
764}
765
766static int __init early_numa(char *p)
767{
768 if (!p)
769 return 0;
770
771 if (strstr(p, "off"))
772 numa_enabled = 0;
773
774 if (strstr(p, "debug"))
775 numa_debug = 1;
776
777 return 0;
778}
779early_param("numa", early_numa);
diff --git a/arch/ppc64/mm/slb.c b/arch/ppc64/mm/slb.c
deleted file mode 100644
index 0473953f6a37..000000000000
--- a/arch/ppc64/mm/slb.c
+++ /dev/null
@@ -1,158 +0,0 @@
1/*
2 * PowerPC64 SLB support.
3 *
4 * Copyright (C) 2004 David Gibson <dwg@au.ibm.com>, IBM
5 * Based on earlier code writteh by:
6 * Dave Engebretsen and Mike Corrigan {engebret|mikejc}@us.ibm.com
7 * Copyright (c) 2001 Dave Engebretsen
8 * Copyright (C) 2002 Anton Blanchard <anton@au.ibm.com>, IBM
9 *
10 *
11 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License
13 * as published by the Free Software Foundation; either version
14 * 2 of the License, or (at your option) any later version.
15 */
16
17#include <linux/config.h>
18#include <asm/pgtable.h>
19#include <asm/mmu.h>
20#include <asm/mmu_context.h>
21#include <asm/paca.h>
22#include <asm/cputable.h>
23
24extern void slb_allocate(unsigned long ea);
25
26static inline unsigned long mk_esid_data(unsigned long ea, unsigned long slot)
27{
28 return (ea & ESID_MASK) | SLB_ESID_V | slot;
29}
30
31static inline unsigned long mk_vsid_data(unsigned long ea, unsigned long flags)
32{
33 return (get_kernel_vsid(ea) << SLB_VSID_SHIFT) | flags;
34}
35
36static inline void create_slbe(unsigned long ea, unsigned long flags,
37 unsigned long entry)
38{
39 asm volatile("slbmte %0,%1" :
40 : "r" (mk_vsid_data(ea, flags)),
41 "r" (mk_esid_data(ea, entry))
42 : "memory" );
43}
44
45static void slb_flush_and_rebolt(void)
46{
47 /* If you change this make sure you change SLB_NUM_BOLTED
48 * appropriately too. */
49 unsigned long ksp_flags = SLB_VSID_KERNEL;
50 unsigned long ksp_esid_data;
51
52 WARN_ON(!irqs_disabled());
53
54 if (cpu_has_feature(CPU_FTR_16M_PAGE))
55 ksp_flags |= SLB_VSID_L;
56
57 ksp_esid_data = mk_esid_data(get_paca()->kstack, 2);
58 if ((ksp_esid_data & ESID_MASK) == KERNELBASE)
59 ksp_esid_data &= ~SLB_ESID_V;
60
61 /* We need to do this all in asm, so we're sure we don't touch
62 * the stack between the slbia and rebolting it. */
63 asm volatile("isync\n"
64 "slbia\n"
65 /* Slot 1 - first VMALLOC segment */
66 "slbmte %0,%1\n"
67 /* Slot 2 - kernel stack */
68 "slbmte %2,%3\n"
69 "isync"
70 :: "r"(mk_vsid_data(VMALLOCBASE, SLB_VSID_KERNEL)),
71 "r"(mk_esid_data(VMALLOCBASE, 1)),
72 "r"(mk_vsid_data(ksp_esid_data, ksp_flags)),
73 "r"(ksp_esid_data)
74 : "memory");
75}
76
77/* Flush all user entries from the segment table of the current processor. */
78void switch_slb(struct task_struct *tsk, struct mm_struct *mm)
79{
80 unsigned long offset = get_paca()->slb_cache_ptr;
81 unsigned long esid_data = 0;
82 unsigned long pc = KSTK_EIP(tsk);
83 unsigned long stack = KSTK_ESP(tsk);
84 unsigned long unmapped_base;
85
86 if (offset <= SLB_CACHE_ENTRIES) {
87 int i;
88 asm volatile("isync" : : : "memory");
89 for (i = 0; i < offset; i++) {
90 esid_data = ((unsigned long)get_paca()->slb_cache[i]
91 << SID_SHIFT) | SLBIE_C;
92 asm volatile("slbie %0" : : "r" (esid_data));
93 }
94 asm volatile("isync" : : : "memory");
95 } else {
96 slb_flush_and_rebolt();
97 }
98
99 /* Workaround POWER5 < DD2.1 issue */
100 if (offset == 1 || offset > SLB_CACHE_ENTRIES)
101 asm volatile("slbie %0" : : "r" (esid_data));
102
103 get_paca()->slb_cache_ptr = 0;
104 get_paca()->context = mm->context;
105
106 /*
107 * preload some userspace segments into the SLB.
108 */
109 if (test_tsk_thread_flag(tsk, TIF_32BIT))
110 unmapped_base = TASK_UNMAPPED_BASE_USER32;
111 else
112 unmapped_base = TASK_UNMAPPED_BASE_USER64;
113
114 if (pc >= KERNELBASE)
115 return;
116 slb_allocate(pc);
117
118 if (GET_ESID(pc) == GET_ESID(stack))
119 return;
120
121 if (stack >= KERNELBASE)
122 return;
123 slb_allocate(stack);
124
125 if ((GET_ESID(pc) == GET_ESID(unmapped_base))
126 || (GET_ESID(stack) == GET_ESID(unmapped_base)))
127 return;
128
129 if (unmapped_base >= KERNELBASE)
130 return;
131 slb_allocate(unmapped_base);
132}
133
134void slb_initialize(void)
135{
136 /* On iSeries the bolted entries have already been set up by
137 * the hypervisor from the lparMap data in head.S */
138#ifndef CONFIG_PPC_ISERIES
139 unsigned long flags = SLB_VSID_KERNEL;
140
141 /* Invalidate the entire SLB (even slot 0) & all the ERATS */
142 if (cpu_has_feature(CPU_FTR_16M_PAGE))
143 flags |= SLB_VSID_L;
144
145 asm volatile("isync":::"memory");
146 asm volatile("slbmte %0,%0"::"r" (0) : "memory");
147 asm volatile("isync; slbia; isync":::"memory");
148 create_slbe(KERNELBASE, flags, 0);
149 create_slbe(VMALLOCBASE, SLB_VSID_KERNEL, 1);
150 /* We don't bolt the stack for the time being - we're in boot,
151 * so the stack is in the bolted segment. By the time it goes
152 * elsewhere, we'll call _switch() which will bolt in the new
153 * one. */
154 asm volatile("isync":::"memory");
155#endif
156
157 get_paca()->stab_rr = SLB_NUM_BOLTED;
158}
diff --git a/arch/ppc64/mm/slb_low.S b/arch/ppc64/mm/slb_low.S
deleted file mode 100644
index a3a03da503bc..000000000000
--- a/arch/ppc64/mm/slb_low.S
+++ /dev/null
@@ -1,151 +0,0 @@
1/*
2 * arch/ppc64/mm/slb_low.S
3 *
4 * Low-level SLB routines
5 *
6 * Copyright (C) 2004 David Gibson <dwg@au.ibm.com>, IBM
7 *
8 * Based on earlier C version:
9 * Dave Engebretsen and Mike Corrigan {engebret|mikejc}@us.ibm.com
10 * Copyright (c) 2001 Dave Engebretsen
11 * Copyright (C) 2002 Anton Blanchard <anton@au.ibm.com>, IBM
12 *
13 * This program is free software; you can redistribute it and/or
14 * modify it under the terms of the GNU General Public License
15 * as published by the Free Software Foundation; either version
16 * 2 of the License, or (at your option) any later version.
17 */
18
19#include <linux/config.h>
20#include <asm/processor.h>
21#include <asm/page.h>
22#include <asm/mmu.h>
23#include <asm/ppc_asm.h>
24#include <asm/asm-offsets.h>
25#include <asm/cputable.h>
26
27/* void slb_allocate(unsigned long ea);
28 *
29 * Create an SLB entry for the given EA (user or kernel).
30 * r3 = faulting address, r13 = PACA
31 * r9, r10, r11 are clobbered by this function
32 * No other registers are examined or changed.
33 */
34_GLOBAL(slb_allocate)
35 /*
36 * First find a slot, round robin. Previously we tried to find
37 * a free slot first but that took too long. Unfortunately we
38 * dont have any LRU information to help us choose a slot.
39 */
40#ifdef CONFIG_PPC_ISERIES
41 /*
42 * On iSeries, the "bolted" stack segment can be cast out on
43 * shared processor switch so we need to check for a miss on
44 * it and restore it to the right slot.
45 */
46 ld r9,PACAKSAVE(r13)
47 clrrdi r9,r9,28
48 clrrdi r11,r3,28
49 li r10,SLB_NUM_BOLTED-1 /* Stack goes in last bolted slot */
50 cmpld r9,r11
51 beq 3f
52#endif /* CONFIG_PPC_ISERIES */
53
54 ld r10,PACASTABRR(r13)
55 addi r10,r10,1
56 /* use a cpu feature mask if we ever change our slb size */
57 cmpldi r10,SLB_NUM_ENTRIES
58
59 blt+ 4f
60 li r10,SLB_NUM_BOLTED
61
624:
63 std r10,PACASTABRR(r13)
643:
65 /* r3 = faulting address, r10 = entry */
66
67 srdi r9,r3,60 /* get region */
68 srdi r3,r3,28 /* get esid */
69 cmpldi cr7,r9,0xc /* cmp KERNELBASE for later use */
70
71 rldimi r10,r3,28,0 /* r10= ESID<<28 | entry */
72 oris r10,r10,SLB_ESID_V@h /* r10 |= SLB_ESID_V */
73
74 /* r3 = esid, r10 = esid_data, cr7 = <>KERNELBASE */
75
76 blt cr7,0f /* user or kernel? */
77
78 /* kernel address: proto-VSID = ESID */
79 /* WARNING - MAGIC: we don't use the VSID 0xfffffffff, but
80 * this code will generate the protoVSID 0xfffffffff for the
81 * top segment. That's ok, the scramble below will translate
82 * it to VSID 0, which is reserved as a bad VSID - one which
83 * will never have any pages in it. */
84 li r11,SLB_VSID_KERNEL
85BEGIN_FTR_SECTION
86 bne cr7,9f
87 li r11,(SLB_VSID_KERNEL|SLB_VSID_L)
88END_FTR_SECTION_IFSET(CPU_FTR_16M_PAGE)
89 b 9f
90
910: /* user address: proto-VSID = context<<15 | ESID */
92 srdi. r9,r3,USER_ESID_BITS
93 bne- 8f /* invalid ea bits set */
94
95#ifdef CONFIG_HUGETLB_PAGE
96BEGIN_FTR_SECTION
97 lhz r9,PACAHIGHHTLBAREAS(r13)
98 srdi r11,r3,(HTLB_AREA_SHIFT-SID_SHIFT)
99 srd r9,r9,r11
100 lhz r11,PACALOWHTLBAREAS(r13)
101 srd r11,r11,r3
102 or r9,r9,r11
103END_FTR_SECTION_IFSET(CPU_FTR_16M_PAGE)
104#endif /* CONFIG_HUGETLB_PAGE */
105
106 li r11,SLB_VSID_USER
107
108#ifdef CONFIG_HUGETLB_PAGE
109BEGIN_FTR_SECTION
110 rldimi r11,r9,8,55 /* shift masked bit into SLB_VSID_L */
111END_FTR_SECTION_IFSET(CPU_FTR_16M_PAGE)
112#endif /* CONFIG_HUGETLB_PAGE */
113
114 ld r9,PACACONTEXTID(r13)
115 rldimi r3,r9,USER_ESID_BITS,0
116
1179: /* r3 = protovsid, r11 = flags, r10 = esid_data, cr7 = <>KERNELBASE */
118 ASM_VSID_SCRAMBLE(r3,r9)
119
120 rldimi r11,r3,SLB_VSID_SHIFT,16 /* combine VSID and flags */
121
122 /*
123 * No need for an isync before or after this slbmte. The exception
124 * we enter with and the rfid we exit with are context synchronizing.
125 */
126 slbmte r11,r10
127
128 bgelr cr7 /* we're done for kernel addresses */
129
130 /* Update the slb cache */
131 lhz r3,PACASLBCACHEPTR(r13) /* offset = paca->slb_cache_ptr */
132 cmpldi r3,SLB_CACHE_ENTRIES
133 bge 1f
134
135 /* still room in the slb cache */
136 sldi r11,r3,1 /* r11 = offset * sizeof(u16) */
137 rldicl r10,r10,36,28 /* get low 16 bits of the ESID */
138 add r11,r11,r13 /* r11 = (u16 *)paca + offset */
139 sth r10,PACASLBCACHE(r11) /* paca->slb_cache[offset] = esid */
140 addi r3,r3,1 /* offset++ */
141 b 2f
1421: /* offset >= SLB_CACHE_ENTRIES */
143 li r3,SLB_CACHE_ENTRIES+1
1442:
145 sth r3,PACASLBCACHEPTR(r13) /* paca->slb_cache_ptr = offset */
146 blr
147
1488: /* invalid EA */
149 li r3,0 /* BAD_VSID */
150 li r11,SLB_VSID_USER /* flags don't much matter */
151 b 9b
diff --git a/arch/ppc64/mm/stab.c b/arch/ppc64/mm/stab.c
deleted file mode 100644
index 1b83f002bf27..000000000000
--- a/arch/ppc64/mm/stab.c
+++ /dev/null
@@ -1,279 +0,0 @@
1/*
2 * PowerPC64 Segment Translation Support.
3 *
4 * Dave Engebretsen and Mike Corrigan {engebret|mikejc}@us.ibm.com
5 * Copyright (c) 2001 Dave Engebretsen
6 *
7 * Copyright (C) 2002 Anton Blanchard <anton@au.ibm.com>, IBM
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; either version
12 * 2 of the License, or (at your option) any later version.
13 */
14
15#include <linux/config.h>
16#include <asm/pgtable.h>
17#include <asm/mmu.h>
18#include <asm/mmu_context.h>
19#include <asm/paca.h>
20#include <asm/cputable.h>
21#include <asm/lmb.h>
22#include <asm/abs_addr.h>
23
24struct stab_entry {
25 unsigned long esid_data;
26 unsigned long vsid_data;
27};
28
29/* Both the segment table and SLB code uses the following cache */
30#define NR_STAB_CACHE_ENTRIES 8
31DEFINE_PER_CPU(long, stab_cache_ptr);
32DEFINE_PER_CPU(long, stab_cache[NR_STAB_CACHE_ENTRIES]);
33
34/*
35 * Create a segment table entry for the given esid/vsid pair.
36 */
37static int make_ste(unsigned long stab, unsigned long esid, unsigned long vsid)
38{
39 unsigned long esid_data, vsid_data;
40 unsigned long entry, group, old_esid, castout_entry, i;
41 unsigned int global_entry;
42 struct stab_entry *ste, *castout_ste;
43 unsigned long kernel_segment = (esid << SID_SHIFT) >= KERNELBASE;
44
45 vsid_data = vsid << STE_VSID_SHIFT;
46 esid_data = esid << SID_SHIFT | STE_ESID_KP | STE_ESID_V;
47 if (! kernel_segment)
48 esid_data |= STE_ESID_KS;
49
50 /* Search the primary group first. */
51 global_entry = (esid & 0x1f) << 3;
52 ste = (struct stab_entry *)(stab | ((esid & 0x1f) << 7));
53
54 /* Find an empty entry, if one exists. */
55 for (group = 0; group < 2; group++) {
56 for (entry = 0; entry < 8; entry++, ste++) {
57 if (!(ste->esid_data & STE_ESID_V)) {
58 ste->vsid_data = vsid_data;
59 asm volatile("eieio":::"memory");
60 ste->esid_data = esid_data;
61 return (global_entry | entry);
62 }
63 }
64 /* Now search the secondary group. */
65 global_entry = ((~esid) & 0x1f) << 3;
66 ste = (struct stab_entry *)(stab | (((~esid) & 0x1f) << 7));
67 }
68
69 /*
70 * Could not find empty entry, pick one with a round robin selection.
71 * Search all entries in the two groups.
72 */
73 castout_entry = get_paca()->stab_rr;
74 for (i = 0; i < 16; i++) {
75 if (castout_entry < 8) {
76 global_entry = (esid & 0x1f) << 3;
77 ste = (struct stab_entry *)(stab | ((esid & 0x1f) << 7));
78 castout_ste = ste + castout_entry;
79 } else {
80 global_entry = ((~esid) & 0x1f) << 3;
81 ste = (struct stab_entry *)(stab | (((~esid) & 0x1f) << 7));
82 castout_ste = ste + (castout_entry - 8);
83 }
84
85 /* Dont cast out the first kernel segment */
86 if ((castout_ste->esid_data & ESID_MASK) != KERNELBASE)
87 break;
88
89 castout_entry = (castout_entry + 1) & 0xf;
90 }
91
92 get_paca()->stab_rr = (castout_entry + 1) & 0xf;
93
94 /* Modify the old entry to the new value. */
95
96 /* Force previous translations to complete. DRENG */
97 asm volatile("isync" : : : "memory");
98
99 old_esid = castout_ste->esid_data >> SID_SHIFT;
100 castout_ste->esid_data = 0; /* Invalidate old entry */
101
102 asm volatile("sync" : : : "memory"); /* Order update */
103
104 castout_ste->vsid_data = vsid_data;
105 asm volatile("eieio" : : : "memory"); /* Order update */
106 castout_ste->esid_data = esid_data;
107
108 asm volatile("slbie %0" : : "r" (old_esid << SID_SHIFT));
109 /* Ensure completion of slbie */
110 asm volatile("sync" : : : "memory");
111
112 return (global_entry | (castout_entry & 0x7));
113}
114
115/*
116 * Allocate a segment table entry for the given ea and mm
117 */
118static int __ste_allocate(unsigned long ea, struct mm_struct *mm)
119{
120 unsigned long vsid;
121 unsigned char stab_entry;
122 unsigned long offset;
123
124 /* Kernel or user address? */
125 if (ea >= KERNELBASE) {
126 vsid = get_kernel_vsid(ea);
127 } else {
128 if ((ea >= TASK_SIZE_USER64) || (! mm))
129 return 1;
130
131 vsid = get_vsid(mm->context.id, ea);
132 }
133
134 stab_entry = make_ste(get_paca()->stab_addr, GET_ESID(ea), vsid);
135
136 if (ea < KERNELBASE) {
137 offset = __get_cpu_var(stab_cache_ptr);
138 if (offset < NR_STAB_CACHE_ENTRIES)
139 __get_cpu_var(stab_cache[offset++]) = stab_entry;
140 else
141 offset = NR_STAB_CACHE_ENTRIES+1;
142 __get_cpu_var(stab_cache_ptr) = offset;
143
144 /* Order update */
145 asm volatile("sync":::"memory");
146 }
147
148 return 0;
149}
150
151int ste_allocate(unsigned long ea)
152{
153 return __ste_allocate(ea, current->mm);
154}
155
156/*
157 * Do the segment table work for a context switch: flush all user
158 * entries from the table, then preload some probably useful entries
159 * for the new task
160 */
161void switch_stab(struct task_struct *tsk, struct mm_struct *mm)
162{
163 struct stab_entry *stab = (struct stab_entry *) get_paca()->stab_addr;
164 struct stab_entry *ste;
165 unsigned long offset = __get_cpu_var(stab_cache_ptr);
166 unsigned long pc = KSTK_EIP(tsk);
167 unsigned long stack = KSTK_ESP(tsk);
168 unsigned long unmapped_base;
169
170 /* Force previous translations to complete. DRENG */
171 asm volatile("isync" : : : "memory");
172
173 if (offset <= NR_STAB_CACHE_ENTRIES) {
174 int i;
175
176 for (i = 0; i < offset; i++) {
177 ste = stab + __get_cpu_var(stab_cache[i]);
178 ste->esid_data = 0; /* invalidate entry */
179 }
180 } else {
181 unsigned long entry;
182
183 /* Invalidate all entries. */
184 ste = stab;
185
186 /* Never flush the first entry. */
187 ste += 1;
188 for (entry = 1;
189 entry < (PAGE_SIZE / sizeof(struct stab_entry));
190 entry++, ste++) {
191 unsigned long ea;
192 ea = ste->esid_data & ESID_MASK;
193 if (ea < KERNELBASE) {
194 ste->esid_data = 0;
195 }
196 }
197 }
198
199 asm volatile("sync; slbia; sync":::"memory");
200
201 __get_cpu_var(stab_cache_ptr) = 0;
202
203 /* Now preload some entries for the new task */
204 if (test_tsk_thread_flag(tsk, TIF_32BIT))
205 unmapped_base = TASK_UNMAPPED_BASE_USER32;
206 else
207 unmapped_base = TASK_UNMAPPED_BASE_USER64;
208
209 __ste_allocate(pc, mm);
210
211 if (GET_ESID(pc) == GET_ESID(stack))
212 return;
213
214 __ste_allocate(stack, mm);
215
216 if ((GET_ESID(pc) == GET_ESID(unmapped_base))
217 || (GET_ESID(stack) == GET_ESID(unmapped_base)))
218 return;
219
220 __ste_allocate(unmapped_base, mm);
221
222 /* Order update */
223 asm volatile("sync" : : : "memory");
224}
225
226extern void slb_initialize(void);
227
228/*
229 * Allocate segment tables for secondary CPUs. These must all go in
230 * the first (bolted) segment, so that do_stab_bolted won't get a
231 * recursive segment miss on the segment table itself.
232 */
233void stabs_alloc(void)
234{
235 int cpu;
236
237 if (cpu_has_feature(CPU_FTR_SLB))
238 return;
239
240 for_each_cpu(cpu) {
241 unsigned long newstab;
242
243 if (cpu == 0)
244 continue; /* stab for CPU 0 is statically allocated */
245
246 newstab = lmb_alloc_base(PAGE_SIZE, PAGE_SIZE, 1<<SID_SHIFT);
247 if (! newstab)
248 panic("Unable to allocate segment table for CPU %d.\n",
249 cpu);
250
251 newstab += KERNELBASE;
252
253 memset((void *)newstab, 0, PAGE_SIZE);
254
255 paca[cpu].stab_addr = newstab;
256 paca[cpu].stab_real = virt_to_abs(newstab);
257 printk(KERN_DEBUG "Segment table for CPU %d at 0x%lx virtual, 0x%lx absolute\n", cpu, paca[cpu].stab_addr, paca[cpu].stab_real);
258 }
259}
260
261/*
262 * Build an entry for the base kernel segment and put it into
263 * the segment table or SLB. All other segment table or SLB
264 * entries are faulted in.
265 */
266void stab_initialize(unsigned long stab)
267{
268 unsigned long vsid = get_kernel_vsid(KERNELBASE);
269
270 if (cpu_has_feature(CPU_FTR_SLB)) {
271 slb_initialize();
272 } else {
273 asm volatile("isync; slbia; isync":::"memory");
274 make_ste(stab, GET_ESID(KERNELBASE), vsid);
275
276 /* Order update */
277 asm volatile("sync":::"memory");
278 }
279}
diff --git a/arch/ppc64/mm/tlb.c b/arch/ppc64/mm/tlb.c
deleted file mode 100644
index 21fbffb23a43..000000000000
--- a/arch/ppc64/mm/tlb.c
+++ /dev/null
@@ -1,197 +0,0 @@
1/*
2 * This file contains the routines for flushing entries from the
3 * TLB and MMU hash table.
4 *
5 * Derived from arch/ppc64/mm/init.c:
6 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
7 *
8 * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
9 * and Cort Dougan (PReP) (cort@cs.nmt.edu)
10 * Copyright (C) 1996 Paul Mackerras
11 * Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk).
12 *
13 * Derived from "arch/i386/mm/init.c"
14 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
15 *
16 * Dave Engebretsen <engebret@us.ibm.com>
17 * Rework for PPC64 port.
18 *
19 * This program is free software; you can redistribute it and/or
20 * modify it under the terms of the GNU General Public License
21 * as published by the Free Software Foundation; either version
22 * 2 of the License, or (at your option) any later version.
23 */
24#include <linux/config.h>
25#include <linux/kernel.h>
26#include <linux/mm.h>
27#include <linux/init.h>
28#include <linux/percpu.h>
29#include <linux/hardirq.h>
30#include <asm/pgalloc.h>
31#include <asm/tlbflush.h>
32#include <asm/tlb.h>
33#include <linux/highmem.h>
34
35DEFINE_PER_CPU(struct ppc64_tlb_batch, ppc64_tlb_batch);
36
37/* This is declared as we are using the more or less generic
38 * include/asm-ppc64/tlb.h file -- tgall
39 */
40DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
41DEFINE_PER_CPU(struct pte_freelist_batch *, pte_freelist_cur);
42unsigned long pte_freelist_forced_free;
43
44struct pte_freelist_batch
45{
46 struct rcu_head rcu;
47 unsigned int index;
48 pgtable_free_t tables[0];
49};
50
51DEFINE_PER_CPU(struct pte_freelist_batch *, pte_freelist_cur);
52unsigned long pte_freelist_forced_free;
53
54#define PTE_FREELIST_SIZE \
55 ((PAGE_SIZE - sizeof(struct pte_freelist_batch)) \
56 / sizeof(pgtable_free_t))
57
58#ifdef CONFIG_SMP
59static void pte_free_smp_sync(void *arg)
60{
61 /* Do nothing, just ensure we sync with all CPUs */
62}
63#endif
64
65/* This is only called when we are critically out of memory
66 * (and fail to get a page in pte_free_tlb).
67 */
68static void pgtable_free_now(pgtable_free_t pgf)
69{
70 pte_freelist_forced_free++;
71
72 smp_call_function(pte_free_smp_sync, NULL, 0, 1);
73
74 pgtable_free(pgf);
75}
76
77static void pte_free_rcu_callback(struct rcu_head *head)
78{
79 struct pte_freelist_batch *batch =
80 container_of(head, struct pte_freelist_batch, rcu);
81 unsigned int i;
82
83 for (i = 0; i < batch->index; i++)
84 pgtable_free(batch->tables[i]);
85
86 free_page((unsigned long)batch);
87}
88
89static void pte_free_submit(struct pte_freelist_batch *batch)
90{
91 INIT_RCU_HEAD(&batch->rcu);
92 call_rcu(&batch->rcu, pte_free_rcu_callback);
93}
94
95void pgtable_free_tlb(struct mmu_gather *tlb, pgtable_free_t pgf)
96{
97 /* This is safe as we are holding page_table_lock */
98 cpumask_t local_cpumask = cpumask_of_cpu(smp_processor_id());
99 struct pte_freelist_batch **batchp = &__get_cpu_var(pte_freelist_cur);
100
101 if (atomic_read(&tlb->mm->mm_users) < 2 ||
102 cpus_equal(tlb->mm->cpu_vm_mask, local_cpumask)) {
103 pgtable_free(pgf);
104 return;
105 }
106
107 if (*batchp == NULL) {
108 *batchp = (struct pte_freelist_batch *)__get_free_page(GFP_ATOMIC);
109 if (*batchp == NULL) {
110 pgtable_free_now(pgf);
111 return;
112 }
113 (*batchp)->index = 0;
114 }
115 (*batchp)->tables[(*batchp)->index++] = pgf;
116 if ((*batchp)->index == PTE_FREELIST_SIZE) {
117 pte_free_submit(*batchp);
118 *batchp = NULL;
119 }
120}
121
122/*
123 * Update the MMU hash table to correspond with a change to
124 * a Linux PTE. If wrprot is true, it is permissible to
125 * change the existing HPTE to read-only rather than removing it
126 * (if we remove it we should clear the _PTE_HPTEFLAGS bits).
127 */
128void hpte_update(struct mm_struct *mm, unsigned long addr,
129 unsigned long pte, int wrprot)
130{
131 int i;
132 unsigned long context = 0;
133 struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch);
134
135 if (REGION_ID(addr) == USER_REGION_ID)
136 context = mm->context.id;
137 i = batch->index;
138
139 /*
140 * This can happen when we are in the middle of a TLB batch and
141 * we encounter memory pressure (eg copy_page_range when it tries
142 * to allocate a new pte). If we have to reclaim memory and end
143 * up scanning and resetting referenced bits then our batch context
144 * will change mid stream.
145 */
146 if (i != 0 && (context != batch->context ||
147 batch->large != pte_huge(pte))) {
148 flush_tlb_pending();
149 i = 0;
150 }
151
152 if (i == 0) {
153 batch->context = context;
154 batch->mm = mm;
155 batch->large = pte_huge(pte);
156 }
157 batch->pte[i] = __pte(pte);
158 batch->addr[i] = addr;
159 batch->index = ++i;
160 if (i >= PPC64_TLB_BATCH_NR)
161 flush_tlb_pending();
162}
163
164void __flush_tlb_pending(struct ppc64_tlb_batch *batch)
165{
166 int i;
167 int cpu;
168 cpumask_t tmp;
169 int local = 0;
170
171 BUG_ON(in_interrupt());
172
173 cpu = get_cpu();
174 i = batch->index;
175 tmp = cpumask_of_cpu(cpu);
176 if (cpus_equal(batch->mm->cpu_vm_mask, tmp))
177 local = 1;
178
179 if (i == 1)
180 flush_hash_page(batch->context, batch->addr[0], batch->pte[0],
181 local);
182 else
183 flush_hash_range(batch->context, i, local);
184 batch->index = 0;
185 put_cpu();
186}
187
188void pte_free_finish(void)
189{
190 /* This is safe as we are holding page_table_lock */
191 struct pte_freelist_batch **batchp = &__get_cpu_var(pte_freelist_cur);
192
193 if (*batchp == NULL)
194 return;
195 pte_free_submit(*batchp);
196 *batchp = NULL;
197}
diff --git a/arch/ppc64/oprofile/Kconfig b/arch/ppc64/oprofile/Kconfig
deleted file mode 100644
index 5ade19801b97..000000000000
--- a/arch/ppc64/oprofile/Kconfig
+++ /dev/null
@@ -1,23 +0,0 @@
1
2menu "Profiling support"
3 depends on EXPERIMENTAL
4
5config PROFILING
6 bool "Profiling support (EXPERIMENTAL)"
7 help
8 Say Y here to enable the extended profiling support mechanisms used
9 by profilers such as OProfile.
10
11
12config OPROFILE
13 tristate "OProfile system profiling (EXPERIMENTAL)"
14 depends on PROFILING
15 help
16 OProfile is a profiling system capable of profiling the
17 whole system, include the kernel, kernel modules, libraries,
18 and applications.
19
20 If unsure, say N.
21
22endmenu
23
diff --git a/arch/ppc64/oprofile/Makefile b/arch/ppc64/oprofile/Makefile
deleted file mode 100644
index 162dbf06c142..000000000000
--- a/arch/ppc64/oprofile/Makefile
+++ /dev/null
@@ -1,9 +0,0 @@
1obj-$(CONFIG_OPROFILE) += oprofile.o
2
3DRIVER_OBJS := $(addprefix ../../../drivers/oprofile/, \
4 oprof.o cpu_buffer.o buffer_sync.o \
5 event_buffer.o oprofile_files.o \
6 oprofilefs.o oprofile_stats.o \
7 timer_int.o )
8
9oprofile-y := $(DRIVER_OBJS) common.o op_model_rs64.o op_model_power4.o
diff --git a/arch/ppc64/oprofile/common.c b/arch/ppc64/oprofile/common.c
deleted file mode 100644
index e5f572710aa0..000000000000
--- a/arch/ppc64/oprofile/common.c
+++ /dev/null
@@ -1,145 +0,0 @@
1/*
2 * Copyright (C) 2004 Anton Blanchard <anton@au.ibm.com>, IBM
3 *
4 * Based on alpha version.
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11
12#include <linux/oprofile.h>
13#include <linux/init.h>
14#include <linux/smp.h>
15#include <linux/errno.h>
16#include <asm/ptrace.h>
17#include <asm/system.h>
18#include <asm/pmc.h>
19#include <asm/cputable.h>
20#include <asm/oprofile_impl.h>
21
22static struct op_ppc64_model *model;
23
24static struct op_counter_config ctr[OP_MAX_COUNTER];
25static struct op_system_config sys;
26
27static void op_handle_interrupt(struct pt_regs *regs)
28{
29 model->handle_interrupt(regs, ctr);
30}
31
32static int op_ppc64_setup(void)
33{
34 int err;
35
36 /* Grab the hardware */
37 err = reserve_pmc_hardware(op_handle_interrupt);
38 if (err)
39 return err;
40
41 /* Pre-compute the values to stuff in the hardware registers. */
42 model->reg_setup(ctr, &sys, model->num_counters);
43
44 /* Configure the registers on all cpus. */
45 on_each_cpu(model->cpu_setup, NULL, 0, 1);
46
47 return 0;
48}
49
50static void op_ppc64_shutdown(void)
51{
52 release_pmc_hardware();
53}
54
55static void op_ppc64_cpu_start(void *dummy)
56{
57 model->start(ctr);
58}
59
60static int op_ppc64_start(void)
61{
62 on_each_cpu(op_ppc64_cpu_start, NULL, 0, 1);
63 return 0;
64}
65
66static inline void op_ppc64_cpu_stop(void *dummy)
67{
68 model->stop();
69}
70
71static void op_ppc64_stop(void)
72{
73 on_each_cpu(op_ppc64_cpu_stop, NULL, 0, 1);
74}
75
76static int op_ppc64_create_files(struct super_block *sb, struct dentry *root)
77{
78 int i;
79
80 /*
81 * There is one mmcr0, mmcr1 and mmcra for setting the events for
82 * all of the counters.
83 */
84 oprofilefs_create_ulong(sb, root, "mmcr0", &sys.mmcr0);
85 oprofilefs_create_ulong(sb, root, "mmcr1", &sys.mmcr1);
86 oprofilefs_create_ulong(sb, root, "mmcra", &sys.mmcra);
87
88 for (i = 0; i < model->num_counters; ++i) {
89 struct dentry *dir;
90 char buf[3];
91
92 snprintf(buf, sizeof buf, "%d", i);
93 dir = oprofilefs_mkdir(sb, root, buf);
94
95 oprofilefs_create_ulong(sb, dir, "enabled", &ctr[i].enabled);
96 oprofilefs_create_ulong(sb, dir, "event", &ctr[i].event);
97 oprofilefs_create_ulong(sb, dir, "count", &ctr[i].count);
98 /*
99 * We dont support per counter user/kernel selection, but
100 * we leave the entries because userspace expects them
101 */
102 oprofilefs_create_ulong(sb, dir, "kernel", &ctr[i].kernel);
103 oprofilefs_create_ulong(sb, dir, "user", &ctr[i].user);
104 oprofilefs_create_ulong(sb, dir, "unit_mask", &ctr[i].unit_mask);
105 }
106
107 oprofilefs_create_ulong(sb, root, "enable_kernel", &sys.enable_kernel);
108 oprofilefs_create_ulong(sb, root, "enable_user", &sys.enable_user);
109 oprofilefs_create_ulong(sb, root, "backtrace_spinlocks",
110 &sys.backtrace_spinlocks);
111
112 /* Default to tracing both kernel and user */
113 sys.enable_kernel = 1;
114 sys.enable_user = 1;
115
116 /* Turn on backtracing through spinlocks by default */
117 sys.backtrace_spinlocks = 1;
118
119 return 0;
120}
121
122int __init oprofile_arch_init(struct oprofile_operations *ops)
123{
124 if (!cur_cpu_spec->oprofile_model || !cur_cpu_spec->oprofile_cpu_type)
125 return -ENODEV;
126
127 model = cur_cpu_spec->oprofile_model;
128 model->num_counters = cur_cpu_spec->num_pmcs;
129
130 ops->cpu_type = cur_cpu_spec->oprofile_cpu_type;
131 ops->create_files = op_ppc64_create_files;
132 ops->setup = op_ppc64_setup;
133 ops->shutdown = op_ppc64_shutdown;
134 ops->start = op_ppc64_start;
135 ops->stop = op_ppc64_stop;
136
137 printk(KERN_INFO "oprofile: using %s performance monitoring.\n",
138 ops->cpu_type);
139
140 return 0;
141}
142
143void oprofile_arch_exit(void)
144{
145}
diff --git a/arch/ppc64/oprofile/op_model_power4.c b/arch/ppc64/oprofile/op_model_power4.c
deleted file mode 100644
index 32b2bb5625fe..000000000000
--- a/arch/ppc64/oprofile/op_model_power4.c
+++ /dev/null
@@ -1,309 +0,0 @@
1/*
2 * Copyright (C) 2004 Anton Blanchard <anton@au.ibm.com>, IBM
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 */
9
10#include <linux/oprofile.h>
11#include <linux/init.h>
12#include <linux/smp.h>
13#include <asm/ptrace.h>
14#include <asm/system.h>
15#include <asm/processor.h>
16#include <asm/cputable.h>
17#include <asm/systemcfg.h>
18#include <asm/rtas.h>
19#include <asm/oprofile_impl.h>
20
21#define dbg(args...)
22
23static unsigned long reset_value[OP_MAX_COUNTER];
24
25static int oprofile_running;
26static int mmcra_has_sihv;
27
28/* mmcr values are set in power4_reg_setup, used in power4_cpu_setup */
29static u32 mmcr0_val;
30static u64 mmcr1_val;
31static u32 mmcra_val;
32
33/*
34 * Since we do not have an NMI, backtracing through spinlocks is
35 * only a best guess. In light of this, allow it to be disabled at
36 * runtime.
37 */
38static int backtrace_spinlocks;
39
40static void power4_reg_setup(struct op_counter_config *ctr,
41 struct op_system_config *sys,
42 int num_ctrs)
43{
44 int i;
45
46 /*
47 * SIHV / SIPR bits are only implemented on POWER4+ (GQ) and above.
48 * However we disable it on all POWER4 until we verify it works
49 * (I was seeing some strange behaviour last time I tried).
50 *
51 * It has been verified to work on POWER5 so we enable it there.
52 */
53 if (cpu_has_feature(CPU_FTR_MMCRA_SIHV))
54 mmcra_has_sihv = 1;
55
56 /*
57 * The performance counter event settings are given in the mmcr0,
58 * mmcr1 and mmcra values passed from the user in the
59 * op_system_config structure (sys variable).
60 */
61 mmcr0_val = sys->mmcr0;
62 mmcr1_val = sys->mmcr1;
63 mmcra_val = sys->mmcra;
64
65 backtrace_spinlocks = sys->backtrace_spinlocks;
66
67 for (i = 0; i < cur_cpu_spec->num_pmcs; ++i)
68 reset_value[i] = 0x80000000UL - ctr[i].count;
69
70 /* setup user and kernel profiling */
71 if (sys->enable_kernel)
72 mmcr0_val &= ~MMCR0_KERNEL_DISABLE;
73 else
74 mmcr0_val |= MMCR0_KERNEL_DISABLE;
75
76 if (sys->enable_user)
77 mmcr0_val &= ~MMCR0_PROBLEM_DISABLE;
78 else
79 mmcr0_val |= MMCR0_PROBLEM_DISABLE;
80}
81
82extern void ppc64_enable_pmcs(void);
83
84static void power4_cpu_setup(void *unused)
85{
86 unsigned int mmcr0 = mmcr0_val;
87 unsigned long mmcra = mmcra_val;
88
89 ppc64_enable_pmcs();
90
91 /* set the freeze bit */
92 mmcr0 |= MMCR0_FC;
93 mtspr(SPRN_MMCR0, mmcr0);
94
95 mmcr0 |= MMCR0_FCM1|MMCR0_PMXE|MMCR0_FCECE;
96 mmcr0 |= MMCR0_PMC1CE|MMCR0_PMCjCE;
97 mtspr(SPRN_MMCR0, mmcr0);
98
99 mtspr(SPRN_MMCR1, mmcr1_val);
100
101 mmcra |= MMCRA_SAMPLE_ENABLE;
102 mtspr(SPRN_MMCRA, mmcra);
103
104 dbg("setup on cpu %d, mmcr0 %lx\n", smp_processor_id(),
105 mfspr(SPRN_MMCR0));
106 dbg("setup on cpu %d, mmcr1 %lx\n", smp_processor_id(),
107 mfspr(SPRN_MMCR1));
108 dbg("setup on cpu %d, mmcra %lx\n", smp_processor_id(),
109 mfspr(SPRN_MMCRA));
110}
111
112static void power4_start(struct op_counter_config *ctr)
113{
114 int i;
115 unsigned int mmcr0;
116
117 /* set the PMM bit (see comment below) */
118 mtmsrd(mfmsr() | MSR_PMM);
119
120 for (i = 0; i < cur_cpu_spec->num_pmcs; ++i) {
121 if (ctr[i].enabled) {
122 ctr_write(i, reset_value[i]);
123 } else {
124 ctr_write(i, 0);
125 }
126 }
127
128 mmcr0 = mfspr(SPRN_MMCR0);
129
130 /*
131 * We must clear the PMAO bit on some (GQ) chips. Just do it
132 * all the time
133 */
134 mmcr0 &= ~MMCR0_PMAO;
135
136 /*
137 * now clear the freeze bit, counting will not start until we
138 * rfid from this excetion, because only at that point will
139 * the PMM bit be cleared
140 */
141 mmcr0 &= ~MMCR0_FC;
142 mtspr(SPRN_MMCR0, mmcr0);
143
144 oprofile_running = 1;
145
146 dbg("start on cpu %d, mmcr0 %x\n", smp_processor_id(), mmcr0);
147}
148
149static void power4_stop(void)
150{
151 unsigned int mmcr0;
152
153 /* freeze counters */
154 mmcr0 = mfspr(SPRN_MMCR0);
155 mmcr0 |= MMCR0_FC;
156 mtspr(SPRN_MMCR0, mmcr0);
157
158 oprofile_running = 0;
159
160 dbg("stop on cpu %d, mmcr0 %x\n", smp_processor_id(), mmcr0);
161
162 mb();
163}
164
165/* Fake functions used by canonicalize_pc */
166static void __attribute_used__ hypervisor_bucket(void)
167{
168}
169
170static void __attribute_used__ rtas_bucket(void)
171{
172}
173
174static void __attribute_used__ kernel_unknown_bucket(void)
175{
176}
177
178static unsigned long check_spinlock_pc(struct pt_regs *regs,
179 unsigned long profile_pc)
180{
181 unsigned long pc = instruction_pointer(regs);
182
183 /*
184 * If both the SIAR (sampled instruction) and the perfmon exception
185 * occurred in a spinlock region then we account the sample to the
186 * calling function. This isnt 100% correct, we really need soft
187 * IRQ disable so we always get the perfmon exception at the
188 * point at which the SIAR is set.
189 */
190 if (backtrace_spinlocks && in_lock_functions(pc) &&
191 in_lock_functions(profile_pc))
192 return regs->link;
193 else
194 return profile_pc;
195}
196
197/*
198 * On GQ and newer the MMCRA stores the HV and PR bits at the time
199 * the SIAR was sampled. We use that to work out if the SIAR was sampled in
200 * the hypervisor, our exception vectors or RTAS.
201 */
202static unsigned long get_pc(struct pt_regs *regs)
203{
204 unsigned long pc = mfspr(SPRN_SIAR);
205 unsigned long mmcra;
206
207 /* Cant do much about it */
208 if (!mmcra_has_sihv)
209 return check_spinlock_pc(regs, pc);
210
211 mmcra = mfspr(SPRN_MMCRA);
212
213 /* Were we in the hypervisor? */
214 if ((systemcfg->platform == PLATFORM_PSERIES_LPAR) &&
215 (mmcra & MMCRA_SIHV))
216 /* function descriptor madness */
217 return *((unsigned long *)hypervisor_bucket);
218
219 /* We were in userspace, nothing to do */
220 if (mmcra & MMCRA_SIPR)
221 return pc;
222
223#ifdef CONFIG_PPC_RTAS
224 /* Were we in RTAS? */
225 if (pc >= rtas.base && pc < (rtas.base + rtas.size))
226 /* function descriptor madness */
227 return *((unsigned long *)rtas_bucket);
228#endif
229
230 /* Were we in our exception vectors or SLB real mode miss handler? */
231 if (pc < 0x1000000UL)
232 return (unsigned long)__va(pc);
233
234 /* Not sure where we were */
235 if (pc < KERNELBASE)
236 /* function descriptor madness */
237 return *((unsigned long *)kernel_unknown_bucket);
238
239 return check_spinlock_pc(regs, pc);
240}
241
242static int get_kernel(unsigned long pc)
243{
244 int is_kernel;
245
246 if (!mmcra_has_sihv) {
247 is_kernel = (pc >= KERNELBASE);
248 } else {
249 unsigned long mmcra = mfspr(SPRN_MMCRA);
250 is_kernel = ((mmcra & MMCRA_SIPR) == 0);
251 }
252
253 return is_kernel;
254}
255
256static void power4_handle_interrupt(struct pt_regs *regs,
257 struct op_counter_config *ctr)
258{
259 unsigned long pc;
260 int is_kernel;
261 int val;
262 int i;
263 unsigned int mmcr0;
264
265 pc = get_pc(regs);
266 is_kernel = get_kernel(pc);
267
268 /* set the PMM bit (see comment below) */
269 mtmsrd(mfmsr() | MSR_PMM);
270
271 for (i = 0; i < cur_cpu_spec->num_pmcs; ++i) {
272 val = ctr_read(i);
273 if (val < 0) {
274 if (oprofile_running && ctr[i].enabled) {
275 oprofile_add_pc(pc, is_kernel, i);
276 ctr_write(i, reset_value[i]);
277 } else {
278 ctr_write(i, 0);
279 }
280 }
281 }
282
283 mmcr0 = mfspr(SPRN_MMCR0);
284
285 /* reset the perfmon trigger */
286 mmcr0 |= MMCR0_PMXE;
287
288 /*
289 * We must clear the PMAO bit on some (GQ) chips. Just do it
290 * all the time
291 */
292 mmcr0 &= ~MMCR0_PMAO;
293
294 /*
295 * now clear the freeze bit, counting will not start until we
296 * rfid from this exception, because only at that point will
297 * the PMM bit be cleared
298 */
299 mmcr0 &= ~MMCR0_FC;
300 mtspr(SPRN_MMCR0, mmcr0);
301}
302
303struct op_ppc64_model op_model_power4 = {
304 .reg_setup = power4_reg_setup,
305 .cpu_setup = power4_cpu_setup,
306 .start = power4_start,
307 .stop = power4_stop,
308 .handle_interrupt = power4_handle_interrupt,
309};
diff --git a/arch/ppc64/oprofile/op_model_rs64.c b/arch/ppc64/oprofile/op_model_rs64.c
deleted file mode 100644
index 08c5b333f5c4..000000000000
--- a/arch/ppc64/oprofile/op_model_rs64.c
+++ /dev/null
@@ -1,218 +0,0 @@
1/*
2 * Copyright (C) 2004 Anton Blanchard <anton@au.ibm.com>, IBM
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 */
9
10#include <linux/oprofile.h>
11#include <linux/init.h>
12#include <linux/smp.h>
13#include <asm/ptrace.h>
14#include <asm/system.h>
15#include <asm/processor.h>
16#include <asm/cputable.h>
17#include <asm/oprofile_impl.h>
18
19#define dbg(args...)
20
21static void ctrl_write(unsigned int i, unsigned int val)
22{
23 unsigned int tmp = 0;
24 unsigned long shift = 0, mask = 0;
25
26 dbg("ctrl_write %d %x\n", i, val);
27
28 switch(i) {
29 case 0:
30 tmp = mfspr(SPRN_MMCR0);
31 shift = 6;
32 mask = 0x7F;
33 break;
34 case 1:
35 tmp = mfspr(SPRN_MMCR0);
36 shift = 0;
37 mask = 0x3F;
38 break;
39 case 2:
40 tmp = mfspr(SPRN_MMCR1);
41 shift = 31 - 4;
42 mask = 0x1F;
43 break;
44 case 3:
45 tmp = mfspr(SPRN_MMCR1);
46 shift = 31 - 9;
47 mask = 0x1F;
48 break;
49 case 4:
50 tmp = mfspr(SPRN_MMCR1);
51 shift = 31 - 14;
52 mask = 0x1F;
53 break;
54 case 5:
55 tmp = mfspr(SPRN_MMCR1);
56 shift = 31 - 19;
57 mask = 0x1F;
58 break;
59 case 6:
60 tmp = mfspr(SPRN_MMCR1);
61 shift = 31 - 24;
62 mask = 0x1F;
63 break;
64 case 7:
65 tmp = mfspr(SPRN_MMCR1);
66 shift = 31 - 28;
67 mask = 0xF;
68 break;
69 }
70
71 tmp = tmp & ~(mask << shift);
72 tmp |= val << shift;
73
74 switch(i) {
75 case 0:
76 case 1:
77 mtspr(SPRN_MMCR0, tmp);
78 break;
79 default:
80 mtspr(SPRN_MMCR1, tmp);
81 }
82
83 dbg("ctrl_write mmcr0 %lx mmcr1 %lx\n", mfspr(SPRN_MMCR0),
84 mfspr(SPRN_MMCR1));
85}
86
87static unsigned long reset_value[OP_MAX_COUNTER];
88
89static int num_counters;
90
91static void rs64_reg_setup(struct op_counter_config *ctr,
92 struct op_system_config *sys,
93 int num_ctrs)
94{
95 int i;
96
97 num_counters = num_ctrs;
98
99 for (i = 0; i < num_counters; ++i)
100 reset_value[i] = 0x80000000UL - ctr[i].count;
101
102 /* XXX setup user and kernel profiling */
103}
104
105static void rs64_cpu_setup(void *unused)
106{
107 unsigned int mmcr0;
108
109 /* reset MMCR0 and set the freeze bit */
110 mmcr0 = MMCR0_FC;
111 mtspr(SPRN_MMCR0, mmcr0);
112
113 /* reset MMCR1, MMCRA */
114 mtspr(SPRN_MMCR1, 0);
115
116 if (cpu_has_feature(CPU_FTR_MMCRA))
117 mtspr(SPRN_MMCRA, 0);
118
119 mmcr0 |= MMCR0_FCM1|MMCR0_PMXE|MMCR0_FCECE;
120 /* Only applies to POWER3, but should be safe on RS64 */
121 mmcr0 |= MMCR0_PMC1CE|MMCR0_PMCjCE;
122 mtspr(SPRN_MMCR0, mmcr0);
123
124 dbg("setup on cpu %d, mmcr0 %lx\n", smp_processor_id(),
125 mfspr(SPRN_MMCR0));
126 dbg("setup on cpu %d, mmcr1 %lx\n", smp_processor_id(),
127 mfspr(SPRN_MMCR1));
128}
129
130static void rs64_start(struct op_counter_config *ctr)
131{
132 int i;
133 unsigned int mmcr0;
134
135 /* set the PMM bit (see comment below) */
136 mtmsrd(mfmsr() | MSR_PMM);
137
138 for (i = 0; i < num_counters; ++i) {
139 if (ctr[i].enabled) {
140 ctr_write(i, reset_value[i]);
141 ctrl_write(i, ctr[i].event);
142 } else {
143 ctr_write(i, 0);
144 }
145 }
146
147 mmcr0 = mfspr(SPRN_MMCR0);
148
149 /*
150 * now clear the freeze bit, counting will not start until we
151 * rfid from this excetion, because only at that point will
152 * the PMM bit be cleared
153 */
154 mmcr0 &= ~MMCR0_FC;
155 mtspr(SPRN_MMCR0, mmcr0);
156
157 dbg("start on cpu %d, mmcr0 %x\n", smp_processor_id(), mmcr0);
158}
159
160static void rs64_stop(void)
161{
162 unsigned int mmcr0;
163
164 /* freeze counters */
165 mmcr0 = mfspr(SPRN_MMCR0);
166 mmcr0 |= MMCR0_FC;
167 mtspr(SPRN_MMCR0, mmcr0);
168
169 dbg("stop on cpu %d, mmcr0 %x\n", smp_processor_id(), mmcr0);
170
171 mb();
172}
173
174static void rs64_handle_interrupt(struct pt_regs *regs,
175 struct op_counter_config *ctr)
176{
177 unsigned int mmcr0;
178 int val;
179 int i;
180 unsigned long pc = mfspr(SPRN_SIAR);
181 int is_kernel = (pc >= KERNELBASE);
182
183 /* set the PMM bit (see comment below) */
184 mtmsrd(mfmsr() | MSR_PMM);
185
186 for (i = 0; i < num_counters; ++i) {
187 val = ctr_read(i);
188 if (val < 0) {
189 if (ctr[i].enabled) {
190 oprofile_add_pc(pc, is_kernel, i);
191 ctr_write(i, reset_value[i]);
192 } else {
193 ctr_write(i, 0);
194 }
195 }
196 }
197
198 mmcr0 = mfspr(SPRN_MMCR0);
199
200 /* reset the perfmon trigger */
201 mmcr0 |= MMCR0_PMXE;
202
203 /*
204 * now clear the freeze bit, counting will not start until we
205 * rfid from this exception, because only at that point will
206 * the PMM bit be cleared
207 */
208 mmcr0 &= ~MMCR0_FC;
209 mtspr(SPRN_MMCR0, mmcr0);
210}
211
212struct op_ppc64_model op_model_rs64 = {
213 .reg_setup = rs64_reg_setup,
214 .cpu_setup = rs64_cpu_setup,
215 .start = rs64_start,
216 .stop = rs64_stop,
217 .handle_interrupt = rs64_handle_interrupt,
218};
diff --git a/arch/ppc64/xmon/Makefile b/arch/ppc64/xmon/Makefile
deleted file mode 100644
index fb21a7088d3e..000000000000
--- a/arch/ppc64/xmon/Makefile
+++ /dev/null
@@ -1,5 +0,0 @@
1# Makefile for xmon
2
3EXTRA_CFLAGS += -mno-minimal-toc
4
5obj-y := start.o xmon.o ppc-dis.o ppc-opc.o subr_prf.o setjmp.o
diff --git a/arch/ppc64/xmon/ansidecl.h b/arch/ppc64/xmon/ansidecl.h
deleted file mode 100644
index c9b9f0929e9e..000000000000
--- a/arch/ppc64/xmon/ansidecl.h
+++ /dev/null
@@ -1,141 +0,0 @@
1/* ANSI and traditional C compatibility macros
2 Copyright 1991, 1992 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
4
5This program is free software; you can redistribute it and/or modify
6it under the terms of the GNU General Public License as published by
7the Free Software Foundation; either version 2 of the License, or
8(at your option) any later version.
9
10This program is distributed in the hope that it will be useful,
11but WITHOUT ANY WARRANTY; without even the implied warranty of
12MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13GNU General Public License for more details.
14
15You should have received a copy of the GNU General Public License
16along with this program; if not, write to the Free Software
17Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */
18
19/* ANSI and traditional C compatibility macros
20
21 ANSI C is assumed if __STDC__ is #defined.
22
23 Macro ANSI C definition Traditional C definition
24 ----- ---- - ---------- ----------- - ----------
25 PTR `void *' `char *'
26 LONG_DOUBLE `long double' `double'
27 VOLATILE `volatile' `'
28 SIGNED `signed' `'
29 PTRCONST `void *const' `char *'
30 ANSI_PROTOTYPES 1 not defined
31
32 CONST is also defined, but is obsolete. Just use const.
33
34 DEFUN (name, arglist, args)
35
36 Defines function NAME.
37
38 ARGLIST lists the arguments, separated by commas and enclosed in
39 parentheses. ARGLIST becomes the argument list in traditional C.
40
41 ARGS list the arguments with their types. It becomes a prototype in
42 ANSI C, and the type declarations in traditional C. Arguments should
43 be separated with `AND'. For functions with a variable number of
44 arguments, the last thing listed should be `DOTS'.
45
46 DEFUN_VOID (name)
47
48 Defines a function NAME, which takes no arguments.
49
50 obsolete -- EXFUN (name, (prototype)) -- obsolete.
51
52 Replaced by PARAMS. Do not use; will disappear someday soon.
53 Was used in external function declarations.
54 In ANSI C it is `NAME PROTOTYPE' (so PROTOTYPE should be enclosed in
55 parentheses). In traditional C it is `NAME()'.
56 For a function that takes no arguments, PROTOTYPE should be `(void)'.
57
58 PARAMS ((args))
59
60 We could use the EXFUN macro to handle prototype declarations, but
61 the name is misleading and the result is ugly. So we just define a
62 simple macro to handle the parameter lists, as in:
63
64 static int foo PARAMS ((int, char));
65
66 This produces: `static int foo();' or `static int foo (int, char);'
67
68 EXFUN would have done it like this:
69
70 static int EXFUN (foo, (int, char));
71
72 but the function is not external...and it's hard to visually parse
73 the function name out of the mess. EXFUN should be considered
74 obsolete; new code should be written to use PARAMS.
75
76 For example:
77 extern int printf PARAMS ((CONST char *format DOTS));
78 int DEFUN(fprintf, (stream, format),
79 FILE *stream AND CONST char *format DOTS) { ... }
80 void DEFUN_VOID(abort) { ... }
81*/
82
83#ifndef _ANSIDECL_H
84
85#define _ANSIDECL_H 1
86
87
88/* Every source file includes this file,
89 so they will all get the switch for lint. */
90/* LINTLIBRARY */
91
92
93#if defined (__STDC__) || defined (_AIX) || (defined (__mips) && defined (_SYSTYPE_SVR4)) || defined(WIN32)
94/* All known AIX compilers implement these things (but don't always
95 define __STDC__). The RISC/OS MIPS compiler defines these things
96 in SVR4 mode, but does not define __STDC__. */
97
98#define PTR void *
99#define PTRCONST void *CONST
100#define LONG_DOUBLE long double
101
102#define AND ,
103#define NOARGS void
104#define CONST const
105#define VOLATILE volatile
106#define SIGNED signed
107#define DOTS , ...
108
109#define EXFUN(name, proto) name proto
110#define DEFUN(name, arglist, args) name(args)
111#define DEFUN_VOID(name) name(void)
112
113#define PROTO(type, name, arglist) type name arglist
114#define PARAMS(paramlist) paramlist
115#define ANSI_PROTOTYPES 1
116
117#else /* Not ANSI C. */
118
119#define PTR char *
120#define PTRCONST PTR
121#define LONG_DOUBLE double
122
123#define AND ;
124#define NOARGS
125#define CONST
126#ifndef const /* some systems define it in header files for non-ansi mode */
127#define const
128#endif
129#define VOLATILE
130#define SIGNED
131#define DOTS
132
133#define EXFUN(name, proto) name()
134#define DEFUN(name, arglist, args) name arglist args;
135#define DEFUN_VOID(name) name()
136#define PROTO(type, name, arglist) type name ()
137#define PARAMS(paramlist) ()
138
139#endif /* ANSI C. */
140
141#endif /* ansidecl.h */
diff --git a/arch/ppc64/xmon/nonstdio.h b/arch/ppc64/xmon/nonstdio.h
deleted file mode 100644
index 84211a21c6f4..000000000000
--- a/arch/ppc64/xmon/nonstdio.h
+++ /dev/null
@@ -1,22 +0,0 @@
1typedef int FILE;
2extern FILE *xmon_stdin, *xmon_stdout;
3#define EOF (-1)
4#define stdin xmon_stdin
5#define stdout xmon_stdout
6#define printf xmon_printf
7#define fprintf xmon_fprintf
8#define fputs xmon_fputs
9#define fgets xmon_fgets
10#define putchar xmon_putchar
11#define getchar xmon_getchar
12#define putc xmon_putc
13#define getc xmon_getc
14#define fopen(n, m) NULL
15#define fflush(f) do {} while (0)
16#define fclose(f) do {} while (0)
17extern char *fgets(char *, int, void *);
18extern void xmon_printf(const char *, ...);
19extern void xmon_fprintf(void *, const char *, ...);
20extern void xmon_sprintf(char *, const char *, ...);
21
22#define perror(s) printf("%s: no files!\n", (s))
diff --git a/arch/ppc64/xmon/ppc-dis.c b/arch/ppc64/xmon/ppc-dis.c
deleted file mode 100644
index ac0a9d2427e0..000000000000
--- a/arch/ppc64/xmon/ppc-dis.c
+++ /dev/null
@@ -1,184 +0,0 @@
1/* ppc-dis.c -- Disassemble PowerPC instructions
2 Copyright 1994 Free Software Foundation, Inc.
3 Written by Ian Lance Taylor, Cygnus Support
4
5This file is part of GDB, GAS, and the GNU binutils.
6
7GDB, GAS, and the GNU binutils are free software; you can redistribute
8them and/or modify them under the terms of the GNU General Public
9License as published by the Free Software Foundation; either version
102, or (at your option) any later version.
11
12GDB, GAS, and the GNU binutils are distributed in the hope that they
13will be useful, but WITHOUT ANY WARRANTY; without even the implied
14warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
15the GNU General Public License for more details.
16
17You should have received a copy of the GNU General Public License
18along with this file; see the file COPYING. If not, write to the Free
19Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */
20
21#include "nonstdio.h"
22#include "ansidecl.h"
23#include "ppc.h"
24
25extern void print_address (unsigned long memaddr);
26
27/* Print a PowerPC or POWER instruction. */
28
29int
30print_insn_powerpc (unsigned long insn, unsigned long memaddr, int dialect)
31{
32 const struct powerpc_opcode *opcode;
33 const struct powerpc_opcode *opcode_end;
34 unsigned long op;
35
36 if (dialect == 0)
37 dialect = PPC_OPCODE_PPC | PPC_OPCODE_CLASSIC | PPC_OPCODE_COMMON
38 | PPC_OPCODE_64 | PPC_OPCODE_POWER4 | PPC_OPCODE_ALTIVEC;
39
40 /* Get the major opcode of the instruction. */
41 op = PPC_OP (insn);
42
43 /* Find the first match in the opcode table. We could speed this up
44 a bit by doing a binary search on the major opcode. */
45 opcode_end = powerpc_opcodes + powerpc_num_opcodes;
46 again:
47 for (opcode = powerpc_opcodes; opcode < opcode_end; opcode++)
48 {
49 unsigned long table_op;
50 const unsigned char *opindex;
51 const struct powerpc_operand *operand;
52 int invalid;
53 int need_comma;
54 int need_paren;
55
56 table_op = PPC_OP (opcode->opcode);
57 if (op < table_op)
58 break;
59 if (op > table_op)
60 continue;
61
62 if ((insn & opcode->mask) != opcode->opcode
63 || (opcode->flags & dialect) == 0)
64 continue;
65
66 /* Make two passes over the operands. First see if any of them
67 have extraction functions, and, if they do, make sure the
68 instruction is valid. */
69 invalid = 0;
70 for (opindex = opcode->operands; *opindex != 0; opindex++)
71 {
72 operand = powerpc_operands + *opindex;
73 if (operand->extract)
74 (*operand->extract) (insn, dialect, &invalid);
75 }
76 if (invalid)
77 continue;
78
79 /* The instruction is valid. */
80 printf("%s", opcode->name);
81 if (opcode->operands[0] != 0)
82 printf("\t");
83
84 /* Now extract and print the operands. */
85 need_comma = 0;
86 need_paren = 0;
87 for (opindex = opcode->operands; *opindex != 0; opindex++)
88 {
89 long value;
90
91 operand = powerpc_operands + *opindex;
92
93 /* Operands that are marked FAKE are simply ignored. We
94 already made sure that the extract function considered
95 the instruction to be valid. */
96 if ((operand->flags & PPC_OPERAND_FAKE) != 0)
97 continue;
98
99 /* Extract the value from the instruction. */
100 if (operand->extract)
101 value = (*operand->extract) (insn, dialect, &invalid);
102 else
103 {
104 value = (insn >> operand->shift) & ((1 << operand->bits) - 1);
105 if ((operand->flags & PPC_OPERAND_SIGNED) != 0
106 && (value & (1 << (operand->bits - 1))) != 0)
107 value -= 1 << operand->bits;
108 }
109
110 /* If the operand is optional, and the value is zero, don't
111 print anything. */
112 if ((operand->flags & PPC_OPERAND_OPTIONAL) != 0
113 && (operand->flags & PPC_OPERAND_NEXT) == 0
114 && value == 0)
115 continue;
116
117 if (need_comma)
118 {
119 printf(",");
120 need_comma = 0;
121 }
122
123 /* Print the operand as directed by the flags. */
124 if ((operand->flags & PPC_OPERAND_GPR) != 0)
125 printf("r%ld", value);
126 else if ((operand->flags & PPC_OPERAND_FPR) != 0)
127 printf("f%ld", value);
128 else if ((operand->flags & PPC_OPERAND_VR) != 0)
129 printf("v%ld", value);
130 else if ((operand->flags & PPC_OPERAND_RELATIVE) != 0)
131 print_address (memaddr + value);
132 else if ((operand->flags & PPC_OPERAND_ABSOLUTE) != 0)
133 print_address (value & 0xffffffff);
134 else if ((operand->flags & PPC_OPERAND_CR) == 0
135 || (dialect & PPC_OPCODE_PPC) == 0)
136 printf("%ld", value);
137 else
138 {
139 if (operand->bits == 3)
140 printf("cr%d", value);
141 else
142 {
143 static const char *cbnames[4] = { "lt", "gt", "eq", "so" };
144 int cr;
145 int cc;
146
147 cr = value >> 2;
148 if (cr != 0)
149 printf("4*cr%d+", cr);
150 cc = value & 3;
151 printf("%s", cbnames[cc]);
152 }
153 }
154
155 if (need_paren)
156 {
157 printf(")");
158 need_paren = 0;
159 }
160
161 if ((operand->flags & PPC_OPERAND_PARENS) == 0)
162 need_comma = 1;
163 else
164 {
165 printf("(");
166 need_paren = 1;
167 }
168 }
169
170 /* We have found and printed an instruction; return. */
171 return 4;
172 }
173
174 if ((dialect & PPC_OPCODE_ANY) != 0)
175 {
176 dialect = ~PPC_OPCODE_ANY;
177 goto again;
178 }
179
180 /* We could not find a match. */
181 printf(".long 0x%lx", insn);
182
183 return 4;
184}
diff --git a/arch/ppc64/xmon/ppc-opc.c b/arch/ppc64/xmon/ppc-opc.c
deleted file mode 100644
index 5ee8fc32f824..000000000000
--- a/arch/ppc64/xmon/ppc-opc.c
+++ /dev/null
@@ -1,4621 +0,0 @@
1/* ppc-opc.c -- PowerPC opcode list
2 Copyright 1994, 1995, 1996, 1997, 1998, 2000, 2001, 2002, 2003
3 Free Software Foundation, Inc.
4 Written by Ian Lance Taylor, Cygnus Support
5
6 This file is part of GDB, GAS, and the GNU binutils.
7
8 GDB, GAS, and the GNU binutils are free software; you can redistribute
9 them and/or modify them under the terms of the GNU General Public
10 License as published by the Free Software Foundation; either version
11 2, or (at your option) any later version.
12
13 GDB, GAS, and the GNU binutils are distributed in the hope that they
14 will be useful, but WITHOUT ANY WARRANTY; without even the implied
15 warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
16 the GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this file; see the file COPYING. If not, write to the Free
20 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
21 02111-1307, USA. */
22
23#include <linux/stddef.h>
24#include "nonstdio.h"
25#include "ppc.h"
26
27#define ATTRIBUTE_UNUSED
28#define _(x) x
29
30/* This file holds the PowerPC opcode table. The opcode table
31 includes almost all of the extended instruction mnemonics. This
32 permits the disassembler to use them, and simplifies the assembler
33 logic, at the cost of increasing the table size. The table is
34 strictly constant data, so the compiler should be able to put it in
35 the .text section.
36
37 This file also holds the operand table. All knowledge about
38 inserting operands into instructions and vice-versa is kept in this
39 file. */
40
41/* Local insertion and extraction functions. */
42
43static unsigned long insert_bat (unsigned long, long, int, const char **);
44static long extract_bat (unsigned long, int, int *);
45static unsigned long insert_bba (unsigned long, long, int, const char **);
46static long extract_bba (unsigned long, int, int *);
47static unsigned long insert_bd (unsigned long, long, int, const char **);
48static long extract_bd (unsigned long, int, int *);
49static unsigned long insert_bdm (unsigned long, long, int, const char **);
50static long extract_bdm (unsigned long, int, int *);
51static unsigned long insert_bdp (unsigned long, long, int, const char **);
52static long extract_bdp (unsigned long, int, int *);
53static unsigned long insert_bo (unsigned long, long, int, const char **);
54static long extract_bo (unsigned long, int, int *);
55static unsigned long insert_boe (unsigned long, long, int, const char **);
56static long extract_boe (unsigned long, int, int *);
57static unsigned long insert_dq (unsigned long, long, int, const char **);
58static long extract_dq (unsigned long, int, int *);
59static unsigned long insert_ds (unsigned long, long, int, const char **);
60static long extract_ds (unsigned long, int, int *);
61static unsigned long insert_de (unsigned long, long, int, const char **);
62static long extract_de (unsigned long, int, int *);
63static unsigned long insert_des (unsigned long, long, int, const char **);
64static long extract_des (unsigned long, int, int *);
65static unsigned long insert_fxm (unsigned long, long, int, const char **);
66static long extract_fxm (unsigned long, int, int *);
67static unsigned long insert_li (unsigned long, long, int, const char **);
68static long extract_li (unsigned long, int, int *);
69static unsigned long insert_mbe (unsigned long, long, int, const char **);
70static long extract_mbe (unsigned long, int, int *);
71static unsigned long insert_mb6 (unsigned long, long, int, const char **);
72static long extract_mb6 (unsigned long, int, int *);
73static unsigned long insert_nb (unsigned long, long, int, const char **);
74static long extract_nb (unsigned long, int, int *);
75static unsigned long insert_nsi (unsigned long, long, int, const char **);
76static long extract_nsi (unsigned long, int, int *);
77static unsigned long insert_ral (unsigned long, long, int, const char **);
78static unsigned long insert_ram (unsigned long, long, int, const char **);
79static unsigned long insert_raq (unsigned long, long, int, const char **);
80static unsigned long insert_ras (unsigned long, long, int, const char **);
81static unsigned long insert_rbs (unsigned long, long, int, const char **);
82static long extract_rbs (unsigned long, int, int *);
83static unsigned long insert_rsq (unsigned long, long, int, const char **);
84static unsigned long insert_rtq (unsigned long, long, int, const char **);
85static unsigned long insert_sh6 (unsigned long, long, int, const char **);
86static long extract_sh6 (unsigned long, int, int *);
87static unsigned long insert_spr (unsigned long, long, int, const char **);
88static long extract_spr (unsigned long, int, int *);
89static unsigned long insert_tbr (unsigned long, long, int, const char **);
90static long extract_tbr (unsigned long, int, int *);
91static unsigned long insert_ev2 (unsigned long, long, int, const char **);
92static long extract_ev2 (unsigned long, int, int *);
93static unsigned long insert_ev4 (unsigned long, long, int, const char **);
94static long extract_ev4 (unsigned long, int, int *);
95static unsigned long insert_ev8 (unsigned long, long, int, const char **);
96static long extract_ev8 (unsigned long, int, int *);
97
98/* The operands table.
99
100 The fields are bits, shift, insert, extract, flags.
101
102 We used to put parens around the various additions, like the one
103 for BA just below. However, that caused trouble with feeble
104 compilers with a limit on depth of a parenthesized expression, like
105 (reportedly) the compiler in Microsoft Developer Studio 5. So we
106 omit the parens, since the macros are never used in a context where
107 the addition will be ambiguous. */
108
109const struct powerpc_operand powerpc_operands[] =
110{
111 /* The zero index is used to indicate the end of the list of
112 operands. */
113#define UNUSED 0
114 { 0, 0, NULL, NULL, 0 },
115
116 /* The BA field in an XL form instruction. */
117#define BA UNUSED + 1
118#define BA_MASK (0x1f << 16)
119 { 5, 16, NULL, NULL, PPC_OPERAND_CR },
120
121 /* The BA field in an XL form instruction when it must be the same
122 as the BT field in the same instruction. */
123#define BAT BA + 1
124 { 5, 16, insert_bat, extract_bat, PPC_OPERAND_FAKE },
125
126 /* The BB field in an XL form instruction. */
127#define BB BAT + 1
128#define BB_MASK (0x1f << 11)
129 { 5, 11, NULL, NULL, PPC_OPERAND_CR },
130
131 /* The BB field in an XL form instruction when it must be the same
132 as the BA field in the same instruction. */
133#define BBA BB + 1
134 { 5, 11, insert_bba, extract_bba, PPC_OPERAND_FAKE },
135
136 /* The BD field in a B form instruction. The lower two bits are
137 forced to zero. */
138#define BD BBA + 1
139 { 16, 0, insert_bd, extract_bd, PPC_OPERAND_RELATIVE | PPC_OPERAND_SIGNED },
140
141 /* The BD field in a B form instruction when absolute addressing is
142 used. */
143#define BDA BD + 1
144 { 16, 0, insert_bd, extract_bd, PPC_OPERAND_ABSOLUTE | PPC_OPERAND_SIGNED },
145
146 /* The BD field in a B form instruction when the - modifier is used.
147 This sets the y bit of the BO field appropriately. */
148#define BDM BDA + 1
149 { 16, 0, insert_bdm, extract_bdm,
150 PPC_OPERAND_RELATIVE | PPC_OPERAND_SIGNED },
151
152 /* The BD field in a B form instruction when the - modifier is used
153 and absolute address is used. */
154#define BDMA BDM + 1
155 { 16, 0, insert_bdm, extract_bdm,
156 PPC_OPERAND_ABSOLUTE | PPC_OPERAND_SIGNED },
157
158 /* The BD field in a B form instruction when the + modifier is used.
159 This sets the y bit of the BO field appropriately. */
160#define BDP BDMA + 1
161 { 16, 0, insert_bdp, extract_bdp,
162 PPC_OPERAND_RELATIVE | PPC_OPERAND_SIGNED },
163
164 /* The BD field in a B form instruction when the + modifier is used
165 and absolute addressing is used. */
166#define BDPA BDP + 1
167 { 16, 0, insert_bdp, extract_bdp,
168 PPC_OPERAND_ABSOLUTE | PPC_OPERAND_SIGNED },
169
170 /* The BF field in an X or XL form instruction. */
171#define BF BDPA + 1
172 { 3, 23, NULL, NULL, PPC_OPERAND_CR },
173
174 /* An optional BF field. This is used for comparison instructions,
175 in which an omitted BF field is taken as zero. */
176#define OBF BF + 1
177 { 3, 23, NULL, NULL, PPC_OPERAND_CR | PPC_OPERAND_OPTIONAL },
178
179 /* The BFA field in an X or XL form instruction. */
180#define BFA OBF + 1
181 { 3, 18, NULL, NULL, PPC_OPERAND_CR },
182
183 /* The BI field in a B form or XL form instruction. */
184#define BI BFA + 1
185#define BI_MASK (0x1f << 16)
186 { 5, 16, NULL, NULL, PPC_OPERAND_CR },
187
188 /* The BO field in a B form instruction. Certain values are
189 illegal. */
190#define BO BI + 1
191#define BO_MASK (0x1f << 21)
192 { 5, 21, insert_bo, extract_bo, 0 },
193
194 /* The BO field in a B form instruction when the + or - modifier is
195 used. This is like the BO field, but it must be even. */
196#define BOE BO + 1
197 { 5, 21, insert_boe, extract_boe, 0 },
198
199 /* The BT field in an X or XL form instruction. */
200#define BT BOE + 1
201 { 5, 21, NULL, NULL, PPC_OPERAND_CR },
202
203 /* The condition register number portion of the BI field in a B form
204 or XL form instruction. This is used for the extended
205 conditional branch mnemonics, which set the lower two bits of the
206 BI field. This field is optional. */
207#define CR BT + 1
208 { 3, 18, NULL, NULL, PPC_OPERAND_CR | PPC_OPERAND_OPTIONAL },
209
210 /* The CRB field in an X form instruction. */
211#define CRB CR + 1
212 { 5, 6, NULL, NULL, 0 },
213
214 /* The CRFD field in an X form instruction. */
215#define CRFD CRB + 1
216 { 3, 23, NULL, NULL, PPC_OPERAND_CR },
217
218 /* The CRFS field in an X form instruction. */
219#define CRFS CRFD + 1
220 { 3, 0, NULL, NULL, PPC_OPERAND_CR },
221
222 /* The CT field in an X form instruction. */
223#define CT CRFS + 1
224 { 5, 21, NULL, NULL, PPC_OPERAND_OPTIONAL },
225
226 /* The D field in a D form instruction. This is a displacement off
227 a register, and implies that the next operand is a register in
228 parentheses. */
229#define D CT + 1
230 { 16, 0, NULL, NULL, PPC_OPERAND_PARENS | PPC_OPERAND_SIGNED },
231
232 /* The DE field in a DE form instruction. This is like D, but is 12
233 bits only. */
234#define DE D + 1
235 { 14, 0, insert_de, extract_de, PPC_OPERAND_PARENS },
236
237 /* The DES field in a DES form instruction. This is like DS, but is 14
238 bits only (12 stored.) */
239#define DES DE + 1
240 { 14, 0, insert_des, extract_des, PPC_OPERAND_PARENS | PPC_OPERAND_SIGNED },
241
242 /* The DQ field in a DQ form instruction. This is like D, but the
243 lower four bits are forced to zero. */
244#define DQ DES + 1
245 { 16, 0, insert_dq, extract_dq,
246 PPC_OPERAND_PARENS | PPC_OPERAND_SIGNED | PPC_OPERAND_DQ },
247
248 /* The DS field in a DS form instruction. This is like D, but the
249 lower two bits are forced to zero. */
250#define DS DQ + 1
251 { 16, 0, insert_ds, extract_ds,
252 PPC_OPERAND_PARENS | PPC_OPERAND_SIGNED | PPC_OPERAND_DS },
253
254 /* The E field in a wrteei instruction. */
255#define E DS + 1
256 { 1, 15, NULL, NULL, 0 },
257
258 /* The FL1 field in a POWER SC form instruction. */
259#define FL1 E + 1
260 { 4, 12, NULL, NULL, 0 },
261
262 /* The FL2 field in a POWER SC form instruction. */
263#define FL2 FL1 + 1
264 { 3, 2, NULL, NULL, 0 },
265
266 /* The FLM field in an XFL form instruction. */
267#define FLM FL2 + 1
268 { 8, 17, NULL, NULL, 0 },
269
270 /* The FRA field in an X or A form instruction. */
271#define FRA FLM + 1
272#define FRA_MASK (0x1f << 16)
273 { 5, 16, NULL, NULL, PPC_OPERAND_FPR },
274
275 /* The FRB field in an X or A form instruction. */
276#define FRB FRA + 1
277#define FRB_MASK (0x1f << 11)
278 { 5, 11, NULL, NULL, PPC_OPERAND_FPR },
279
280 /* The FRC field in an A form instruction. */
281#define FRC FRB + 1
282#define FRC_MASK (0x1f << 6)
283 { 5, 6, NULL, NULL, PPC_OPERAND_FPR },
284
285 /* The FRS field in an X form instruction or the FRT field in a D, X
286 or A form instruction. */
287#define FRS FRC + 1
288#define FRT FRS
289 { 5, 21, NULL, NULL, PPC_OPERAND_FPR },
290
291 /* The FXM field in an XFX instruction. */
292#define FXM FRS + 1
293#define FXM_MASK (0xff << 12)
294 { 8, 12, insert_fxm, extract_fxm, 0 },
295
296 /* Power4 version for mfcr. */
297#define FXM4 FXM + 1
298 { 8, 12, insert_fxm, extract_fxm, PPC_OPERAND_OPTIONAL },
299
300 /* The L field in a D or X form instruction. */
301#define L FXM4 + 1
302 { 1, 21, NULL, NULL, PPC_OPERAND_OPTIONAL },
303
304 /* The LEV field in a POWER SC form instruction. */
305#define LEV L + 1
306 { 7, 5, NULL, NULL, 0 },
307
308 /* The LI field in an I form instruction. The lower two bits are
309 forced to zero. */
310#define LI LEV + 1
311 { 26, 0, insert_li, extract_li, PPC_OPERAND_RELATIVE | PPC_OPERAND_SIGNED },
312
313 /* The LI field in an I form instruction when used as an absolute
314 address. */
315#define LIA LI + 1
316 { 26, 0, insert_li, extract_li, PPC_OPERAND_ABSOLUTE | PPC_OPERAND_SIGNED },
317
318 /* The LS field in an X (sync) form instruction. */
319#define LS LIA + 1
320 { 2, 21, NULL, NULL, PPC_OPERAND_OPTIONAL },
321
322 /* The MB field in an M form instruction. */
323#define MB LS + 1
324#define MB_MASK (0x1f << 6)
325 { 5, 6, NULL, NULL, 0 },
326
327 /* The ME field in an M form instruction. */
328#define ME MB + 1
329#define ME_MASK (0x1f << 1)
330 { 5, 1, NULL, NULL, 0 },
331
332 /* The MB and ME fields in an M form instruction expressed a single
333 operand which is a bitmask indicating which bits to select. This
334 is a two operand form using PPC_OPERAND_NEXT. See the
335 description in opcode/ppc.h for what this means. */
336#define MBE ME + 1
337 { 5, 6, NULL, NULL, PPC_OPERAND_OPTIONAL | PPC_OPERAND_NEXT },
338 { 32, 0, insert_mbe, extract_mbe, 0 },
339
340 /* The MB or ME field in an MD or MDS form instruction. The high
341 bit is wrapped to the low end. */
342#define MB6 MBE + 2
343#define ME6 MB6
344#define MB6_MASK (0x3f << 5)
345 { 6, 5, insert_mb6, extract_mb6, 0 },
346
347 /* The MO field in an mbar instruction. */
348#define MO MB6 + 1
349 { 5, 21, NULL, NULL, 0 },
350
351 /* The NB field in an X form instruction. The value 32 is stored as
352 0. */
353#define NB MO + 1
354 { 6, 11, insert_nb, extract_nb, 0 },
355
356 /* The NSI field in a D form instruction. This is the same as the
357 SI field, only negated. */
358#define NSI NB + 1
359 { 16, 0, insert_nsi, extract_nsi,
360 PPC_OPERAND_NEGATIVE | PPC_OPERAND_SIGNED },
361
362 /* The RA field in an D, DS, DQ, X, XO, M, or MDS form instruction. */
363#define RA NSI + 1
364#define RA_MASK (0x1f << 16)
365 { 5, 16, NULL, NULL, PPC_OPERAND_GPR },
366
367 /* The RA field in the DQ form lq instruction, which has special
368 value restrictions. */
369#define RAQ RA + 1
370 { 5, 16, insert_raq, NULL, PPC_OPERAND_GPR },
371
372 /* The RA field in a D or X form instruction which is an updating
373 load, which means that the RA field may not be zero and may not
374 equal the RT field. */
375#define RAL RAQ + 1
376 { 5, 16, insert_ral, NULL, PPC_OPERAND_GPR },
377
378 /* The RA field in an lmw instruction, which has special value
379 restrictions. */
380#define RAM RAL + 1
381 { 5, 16, insert_ram, NULL, PPC_OPERAND_GPR },
382
383 /* The RA field in a D or X form instruction which is an updating
384 store or an updating floating point load, which means that the RA
385 field may not be zero. */
386#define RAS RAM + 1
387 { 5, 16, insert_ras, NULL, PPC_OPERAND_GPR },
388
389 /* The RB field in an X, XO, M, or MDS form instruction. */
390#define RB RAS + 1
391#define RB_MASK (0x1f << 11)
392 { 5, 11, NULL, NULL, PPC_OPERAND_GPR },
393
394 /* The RB field in an X form instruction when it must be the same as
395 the RS field in the instruction. This is used for extended
396 mnemonics like mr. */
397#define RBS RB + 1
398 { 5, 1, insert_rbs, extract_rbs, PPC_OPERAND_FAKE },
399
400 /* The RS field in a D, DS, X, XFX, XS, M, MD or MDS form
401 instruction or the RT field in a D, DS, X, XFX or XO form
402 instruction. */
403#define RS RBS + 1
404#define RT RS
405#define RT_MASK (0x1f << 21)
406 { 5, 21, NULL, NULL, PPC_OPERAND_GPR },
407
408 /* The RS field of the DS form stq instruction, which has special
409 value restrictions. */
410#define RSQ RS + 1
411 { 5, 21, insert_rsq, NULL, PPC_OPERAND_GPR },
412
413 /* The RT field of the DQ form lq instruction, which has special
414 value restrictions. */
415#define RTQ RSQ + 1
416 { 5, 21, insert_rtq, NULL, PPC_OPERAND_GPR },
417
418 /* The SH field in an X or M form instruction. */
419#define SH RTQ + 1
420#define SH_MASK (0x1f << 11)
421 { 5, 11, NULL, NULL, 0 },
422
423 /* The SH field in an MD form instruction. This is split. */
424#define SH6 SH + 1
425#define SH6_MASK ((0x1f << 11) | (1 << 1))
426 { 6, 1, insert_sh6, extract_sh6, 0 },
427
428 /* The SI field in a D form instruction. */
429#define SI SH6 + 1
430 { 16, 0, NULL, NULL, PPC_OPERAND_SIGNED },
431
432 /* The SI field in a D form instruction when we accept a wide range
433 of positive values. */
434#define SISIGNOPT SI + 1
435 { 16, 0, NULL, NULL, PPC_OPERAND_SIGNED | PPC_OPERAND_SIGNOPT },
436
437 /* The SPR field in an XFX form instruction. This is flipped--the
438 lower 5 bits are stored in the upper 5 and vice- versa. */
439#define SPR SISIGNOPT + 1
440#define PMR SPR
441#define SPR_MASK (0x3ff << 11)
442 { 10, 11, insert_spr, extract_spr, 0 },
443
444 /* The BAT index number in an XFX form m[ft]ibat[lu] instruction. */
445#define SPRBAT SPR + 1
446#define SPRBAT_MASK (0x3 << 17)
447 { 2, 17, NULL, NULL, 0 },
448
449 /* The SPRG register number in an XFX form m[ft]sprg instruction. */
450#define SPRG SPRBAT + 1
451#define SPRG_MASK (0x3 << 16)
452 { 2, 16, NULL, NULL, 0 },
453
454 /* The SR field in an X form instruction. */
455#define SR SPRG + 1
456 { 4, 16, NULL, NULL, 0 },
457
458 /* The STRM field in an X AltiVec form instruction. */
459#define STRM SR + 1
460#define STRM_MASK (0x3 << 21)
461 { 2, 21, NULL, NULL, 0 },
462
463 /* The SV field in a POWER SC form instruction. */
464#define SV STRM + 1
465 { 14, 2, NULL, NULL, 0 },
466
467 /* The TBR field in an XFX form instruction. This is like the SPR
468 field, but it is optional. */
469#define TBR SV + 1
470 { 10, 11, insert_tbr, extract_tbr, PPC_OPERAND_OPTIONAL },
471
472 /* The TO field in a D or X form instruction. */
473#define TO TBR + 1
474#define TO_MASK (0x1f << 21)
475 { 5, 21, NULL, NULL, 0 },
476
477 /* The U field in an X form instruction. */
478#define U TO + 1
479 { 4, 12, NULL, NULL, 0 },
480
481 /* The UI field in a D form instruction. */
482#define UI U + 1
483 { 16, 0, NULL, NULL, 0 },
484
485 /* The VA field in a VA, VX or VXR form instruction. */
486#define VA UI + 1
487#define VA_MASK (0x1f << 16)
488 { 5, 16, NULL, NULL, PPC_OPERAND_VR },
489
490 /* The VB field in a VA, VX or VXR form instruction. */
491#define VB VA + 1
492#define VB_MASK (0x1f << 11)
493 { 5, 11, NULL, NULL, PPC_OPERAND_VR },
494
495 /* The VC field in a VA form instruction. */
496#define VC VB + 1
497#define VC_MASK (0x1f << 6)
498 { 5, 6, NULL, NULL, PPC_OPERAND_VR },
499
500 /* The VD or VS field in a VA, VX, VXR or X form instruction. */
501#define VD VC + 1
502#define VS VD
503#define VD_MASK (0x1f << 21)
504 { 5, 21, NULL, NULL, PPC_OPERAND_VR },
505
506 /* The SIMM field in a VX form instruction. */
507#define SIMM VD + 1
508 { 5, 16, NULL, NULL, PPC_OPERAND_SIGNED},
509
510 /* The UIMM field in a VX form instruction. */
511#define UIMM SIMM + 1
512 { 5, 16, NULL, NULL, 0 },
513
514 /* The SHB field in a VA form instruction. */
515#define SHB UIMM + 1
516 { 4, 6, NULL, NULL, 0 },
517
518 /* The other UIMM field in a EVX form instruction. */
519#define EVUIMM SHB + 1
520 { 5, 11, NULL, NULL, 0 },
521
522 /* The other UIMM field in a half word EVX form instruction. */
523#define EVUIMM_2 EVUIMM + 1
524 { 32, 11, insert_ev2, extract_ev2, PPC_OPERAND_PARENS },
525
526 /* The other UIMM field in a word EVX form instruction. */
527#define EVUIMM_4 EVUIMM_2 + 1
528 { 32, 11, insert_ev4, extract_ev4, PPC_OPERAND_PARENS },
529
530 /* The other UIMM field in a double EVX form instruction. */
531#define EVUIMM_8 EVUIMM_4 + 1
532 { 32, 11, insert_ev8, extract_ev8, PPC_OPERAND_PARENS },
533
534 /* The WS field. */
535#define WS EVUIMM_8 + 1
536#define WS_MASK (0x7 << 11)
537 { 3, 11, NULL, NULL, 0 },
538
539 /* The L field in an mtmsrd instruction */
540#define MTMSRD_L WS + 1
541 { 1, 16, NULL, NULL, PPC_OPERAND_OPTIONAL },
542
543};
544
545/* The functions used to insert and extract complicated operands. */
546
547/* The BA field in an XL form instruction when it must be the same as
548 the BT field in the same instruction. This operand is marked FAKE.
549 The insertion function just copies the BT field into the BA field,
550 and the extraction function just checks that the fields are the
551 same. */
552
553/*ARGSUSED*/
554static unsigned long
555insert_bat (unsigned long insn,
556 long value ATTRIBUTE_UNUSED,
557 int dialect ATTRIBUTE_UNUSED,
558 const char **errmsg ATTRIBUTE_UNUSED)
559{
560 return insn | (((insn >> 21) & 0x1f) << 16);
561}
562
563static long
564extract_bat (unsigned long insn,
565 int dialect ATTRIBUTE_UNUSED,
566 int *invalid)
567{
568 if (((insn >> 21) & 0x1f) != ((insn >> 16) & 0x1f))
569 *invalid = 1;
570 return 0;
571}
572
573/* The BB field in an XL form instruction when it must be the same as
574 the BA field in the same instruction. This operand is marked FAKE.
575 The insertion function just copies the BA field into the BB field,
576 and the extraction function just checks that the fields are the
577 same. */
578
579/*ARGSUSED*/
580static unsigned long
581insert_bba (unsigned long insn,
582 long value ATTRIBUTE_UNUSED,
583 int dialect ATTRIBUTE_UNUSED,
584 const char **errmsg ATTRIBUTE_UNUSED)
585{
586 return insn | (((insn >> 16) & 0x1f) << 11);
587}
588
589static long
590extract_bba (unsigned long insn,
591 int dialect ATTRIBUTE_UNUSED,
592 int *invalid)
593{
594 if (((insn >> 16) & 0x1f) != ((insn >> 11) & 0x1f))
595 *invalid = 1;
596 return 0;
597}
598
599/* The BD field in a B form instruction. The lower two bits are
600 forced to zero. */
601
602/*ARGSUSED*/
603static unsigned long
604insert_bd (unsigned long insn,
605 long value,
606 int dialect ATTRIBUTE_UNUSED,
607 const char **errmsg ATTRIBUTE_UNUSED)
608{
609 return insn | (value & 0xfffc);
610}
611
612/*ARGSUSED*/
613static long
614extract_bd (unsigned long insn,
615 int dialect ATTRIBUTE_UNUSED,
616 int *invalid ATTRIBUTE_UNUSED)
617{
618 return ((insn & 0xfffc) ^ 0x8000) - 0x8000;
619}
620
621/* The BD field in a B form instruction when the - modifier is used.
622 This modifier means that the branch is not expected to be taken.
623 For chips built to versions of the architecture prior to version 2
624 (ie. not Power4 compatible), we set the y bit of the BO field to 1
625 if the offset is negative. When extracting, we require that the y
626 bit be 1 and that the offset be positive, since if the y bit is 0
627 we just want to print the normal form of the instruction.
628 Power4 compatible targets use two bits, "a", and "t", instead of
629 the "y" bit. "at" == 00 => no hint, "at" == 01 => unpredictable,
630 "at" == 10 => not taken, "at" == 11 => taken. The "t" bit is 00001
631 in BO field, the "a" bit is 00010 for branch on CR(BI) and 01000
632 for branch on CTR. We only handle the taken/not-taken hint here. */
633
634/*ARGSUSED*/
635static unsigned long
636insert_bdm (unsigned long insn,
637 long value,
638 int dialect,
639 const char **errmsg ATTRIBUTE_UNUSED)
640{
641 if ((dialect & PPC_OPCODE_POWER4) == 0)
642 {
643 if ((value & 0x8000) != 0)
644 insn |= 1 << 21;
645 }
646 else
647 {
648 if ((insn & (0x14 << 21)) == (0x04 << 21))
649 insn |= 0x02 << 21;
650 else if ((insn & (0x14 << 21)) == (0x10 << 21))
651 insn |= 0x08 << 21;
652 }
653 return insn | (value & 0xfffc);
654}
655
656static long
657extract_bdm (unsigned long insn,
658 int dialect,
659 int *invalid)
660{
661 if ((dialect & PPC_OPCODE_POWER4) == 0)
662 {
663 if (((insn & (1 << 21)) == 0) != ((insn & (1 << 15)) == 0))
664 *invalid = 1;
665 }
666 else
667 {
668 if ((insn & (0x17 << 21)) != (0x06 << 21)
669 && (insn & (0x1d << 21)) != (0x18 << 21))
670 *invalid = 1;
671 }
672
673 return ((insn & 0xfffc) ^ 0x8000) - 0x8000;
674}
675
676/* The BD field in a B form instruction when the + modifier is used.
677 This is like BDM, above, except that the branch is expected to be
678 taken. */
679
680/*ARGSUSED*/
681static unsigned long
682insert_bdp (unsigned long insn,
683 long value,
684 int dialect,
685 const char **errmsg ATTRIBUTE_UNUSED)
686{
687 if ((dialect & PPC_OPCODE_POWER4) == 0)
688 {
689 if ((value & 0x8000) == 0)
690 insn |= 1 << 21;
691 }
692 else
693 {
694 if ((insn & (0x14 << 21)) == (0x04 << 21))
695 insn |= 0x03 << 21;
696 else if ((insn & (0x14 << 21)) == (0x10 << 21))
697 insn |= 0x09 << 21;
698 }
699 return insn | (value & 0xfffc);
700}
701
702static long
703extract_bdp (unsigned long insn,
704 int dialect,
705 int *invalid)
706{
707 if ((dialect & PPC_OPCODE_POWER4) == 0)
708 {
709 if (((insn & (1 << 21)) == 0) == ((insn & (1 << 15)) == 0))
710 *invalid = 1;
711 }
712 else
713 {
714 if ((insn & (0x17 << 21)) != (0x07 << 21)
715 && (insn & (0x1d << 21)) != (0x19 << 21))
716 *invalid = 1;
717 }
718
719 return ((insn & 0xfffc) ^ 0x8000) - 0x8000;
720}
721
722/* Check for legal values of a BO field. */
723
724static int
725valid_bo (long value, int dialect)
726{
727 if ((dialect & PPC_OPCODE_POWER4) == 0)
728 {
729 /* Certain encodings have bits that are required to be zero.
730 These are (z must be zero, y may be anything):
731 001zy
732 011zy
733 1z00y
734 1z01y
735 1z1zz
736 */
737 switch (value & 0x14)
738 {
739 default:
740 case 0:
741 return 1;
742 case 0x4:
743 return (value & 0x2) == 0;
744 case 0x10:
745 return (value & 0x8) == 0;
746 case 0x14:
747 return value == 0x14;
748 }
749 }
750 else
751 {
752 /* Certain encodings have bits that are required to be zero.
753 These are (z must be zero, a & t may be anything):
754 0000z
755 0001z
756 0100z
757 0101z
758 001at
759 011at
760 1a00t
761 1a01t
762 1z1zz
763 */
764 if ((value & 0x14) == 0)
765 return (value & 0x1) == 0;
766 else if ((value & 0x14) == 0x14)
767 return value == 0x14;
768 else
769 return 1;
770 }
771}
772
773/* The BO field in a B form instruction. Warn about attempts to set
774 the field to an illegal value. */
775
776static unsigned long
777insert_bo (unsigned long insn,
778 long value,
779 int dialect,
780 const char **errmsg)
781{
782 if (!valid_bo (value, dialect))
783 *errmsg = _("invalid conditional option");
784 return insn | ((value & 0x1f) << 21);
785}
786
787static long
788extract_bo (unsigned long insn,
789 int dialect,
790 int *invalid)
791{
792 long value;
793
794 value = (insn >> 21) & 0x1f;
795 if (!valid_bo (value, dialect))
796 *invalid = 1;
797 return value;
798}
799
800/* The BO field in a B form instruction when the + or - modifier is
801 used. This is like the BO field, but it must be even. When
802 extracting it, we force it to be even. */
803
804static unsigned long
805insert_boe (unsigned long insn,
806 long value,
807 int dialect,
808 const char **errmsg)
809{
810 if (!valid_bo (value, dialect))
811 *errmsg = _("invalid conditional option");
812 else if ((value & 1) != 0)
813 *errmsg = _("attempt to set y bit when using + or - modifier");
814
815 return insn | ((value & 0x1f) << 21);
816}
817
818static long
819extract_boe (unsigned long insn,
820 int dialect,
821 int *invalid)
822{
823 long value;
824
825 value = (insn >> 21) & 0x1f;
826 if (!valid_bo (value, dialect))
827 *invalid = 1;
828 return value & 0x1e;
829}
830
831/* The DQ field in a DQ form instruction. This is like D, but the
832 lower four bits are forced to zero. */
833
834/*ARGSUSED*/
835static unsigned long
836insert_dq (unsigned long insn,
837 long value,
838 int dialect ATTRIBUTE_UNUSED,
839 const char **errmsg)
840{
841 if ((value & 0xf) != 0)
842 *errmsg = _("offset not a multiple of 16");
843 return insn | (value & 0xfff0);
844}
845
846/*ARGSUSED*/
847static long
848extract_dq (unsigned long insn,
849 int dialect ATTRIBUTE_UNUSED,
850 int *invalid ATTRIBUTE_UNUSED)
851{
852 return ((insn & 0xfff0) ^ 0x8000) - 0x8000;
853}
854
855static unsigned long
856insert_ev2 (unsigned long insn,
857 long value,
858 int dialect ATTRIBUTE_UNUSED,
859 const char **errmsg)
860{
861 if ((value & 1) != 0)
862 *errmsg = _("offset not a multiple of 2");
863 if ((value > 62) != 0)
864 *errmsg = _("offset greater than 62");
865 return insn | ((value & 0x3e) << 10);
866}
867
868static long
869extract_ev2 (unsigned long insn,
870 int dialect ATTRIBUTE_UNUSED,
871 int *invalid ATTRIBUTE_UNUSED)
872{
873 return (insn >> 10) & 0x3e;
874}
875
876static unsigned long
877insert_ev4 (unsigned long insn,
878 long value,
879 int dialect ATTRIBUTE_UNUSED,
880 const char **errmsg)
881{
882 if ((value & 3) != 0)
883 *errmsg = _("offset not a multiple of 4");
884 if ((value > 124) != 0)
885 *errmsg = _("offset greater than 124");
886 return insn | ((value & 0x7c) << 9);
887}
888
889static long
890extract_ev4 (unsigned long insn,
891 int dialect ATTRIBUTE_UNUSED,
892 int *invalid ATTRIBUTE_UNUSED)
893{
894 return (insn >> 9) & 0x7c;
895}
896
897static unsigned long
898insert_ev8 (unsigned long insn,
899 long value,
900 int dialect ATTRIBUTE_UNUSED,
901 const char **errmsg)
902{
903 if ((value & 7) != 0)
904 *errmsg = _("offset not a multiple of 8");
905 if ((value > 248) != 0)
906 *errmsg = _("offset greater than 248");
907 return insn | ((value & 0xf8) << 8);
908}
909
910static long
911extract_ev8 (unsigned long insn,
912 int dialect ATTRIBUTE_UNUSED,
913 int *invalid ATTRIBUTE_UNUSED)
914{
915 return (insn >> 8) & 0xf8;
916}
917
918/* The DS field in a DS form instruction. This is like D, but the
919 lower two bits are forced to zero. */
920
921/*ARGSUSED*/
922static unsigned long
923insert_ds (unsigned long insn,
924 long value,
925 int dialect ATTRIBUTE_UNUSED,
926 const char **errmsg)
927{
928 if ((value & 3) != 0)
929 *errmsg = _("offset not a multiple of 4");
930 return insn | (value & 0xfffc);
931}
932
933/*ARGSUSED*/
934static long
935extract_ds (unsigned long insn,
936 int dialect ATTRIBUTE_UNUSED,
937 int *invalid ATTRIBUTE_UNUSED)
938{
939 return ((insn & 0xfffc) ^ 0x8000) - 0x8000;
940}
941
942/* The DE field in a DE form instruction. */
943
944/*ARGSUSED*/
945static unsigned long
946insert_de (unsigned long insn,
947 long value,
948 int dialect ATTRIBUTE_UNUSED,
949 const char **errmsg)
950{
951 if (value > 2047 || value < -2048)
952 *errmsg = _("offset not between -2048 and 2047");
953 return insn | ((value << 4) & 0xfff0);
954}
955
956/*ARGSUSED*/
957static long
958extract_de (unsigned long insn,
959 int dialect ATTRIBUTE_UNUSED,
960 int *invalid ATTRIBUTE_UNUSED)
961{
962 return (insn & 0xfff0) >> 4;
963}
964
965/* The DES field in a DES form instruction. */
966
967/*ARGSUSED*/
968static unsigned long
969insert_des (unsigned long insn,
970 long value,
971 int dialect ATTRIBUTE_UNUSED,
972 const char **errmsg)
973{
974 if (value > 8191 || value < -8192)
975 *errmsg = _("offset not between -8192 and 8191");
976 else if ((value & 3) != 0)
977 *errmsg = _("offset not a multiple of 4");
978 return insn | ((value << 2) & 0xfff0);
979}
980
981/*ARGSUSED*/
982static long
983extract_des (unsigned long insn,
984 int dialect ATTRIBUTE_UNUSED,
985 int *invalid ATTRIBUTE_UNUSED)
986{
987 return (((insn >> 2) & 0x3ffc) ^ 0x2000) - 0x2000;
988}
989
990/* FXM mask in mfcr and mtcrf instructions. */
991
992static unsigned long
993insert_fxm (unsigned long insn,
994 long value,
995 int dialect,
996 const char **errmsg)
997{
998 /* If the optional field on mfcr is missing that means we want to use
999 the old form of the instruction that moves the whole cr. In that
1000 case we'll have VALUE zero. There doesn't seem to be a way to
1001 distinguish this from the case where someone writes mfcr %r3,0. */
1002 if (value == 0)
1003 ;
1004
1005 /* If only one bit of the FXM field is set, we can use the new form
1006 of the instruction, which is faster. Unlike the Power4 branch hint
1007 encoding, this is not backward compatible. */
1008 else if ((dialect & PPC_OPCODE_POWER4) != 0 && (value & -value) == value)
1009 insn |= 1 << 20;
1010
1011 /* Any other value on mfcr is an error. */
1012 else if ((insn & (0x3ff << 1)) == 19 << 1)
1013 {
1014 *errmsg = _("ignoring invalid mfcr mask");
1015 value = 0;
1016 }
1017
1018 return insn | ((value & 0xff) << 12);
1019}
1020
1021static long
1022extract_fxm (unsigned long insn,
1023 int dialect,
1024 int *invalid)
1025{
1026 long mask = (insn >> 12) & 0xff;
1027
1028 /* Is this a Power4 insn? */
1029 if ((insn & (1 << 20)) != 0)
1030 {
1031 if ((dialect & PPC_OPCODE_POWER4) == 0)
1032 *invalid = 1;
1033 else
1034 {
1035 /* Exactly one bit of MASK should be set. */
1036 if (mask == 0 || (mask & -mask) != mask)
1037 *invalid = 1;
1038 }
1039 }
1040
1041 /* Check that non-power4 form of mfcr has a zero MASK. */
1042 else if ((insn & (0x3ff << 1)) == 19 << 1)
1043 {
1044 if (mask != 0)
1045 *invalid = 1;
1046 }
1047
1048 return mask;
1049}
1050
1051/* The LI field in an I form instruction. The lower two bits are
1052 forced to zero. */
1053
1054/*ARGSUSED*/
1055static unsigned long
1056insert_li (unsigned long insn,
1057 long value,
1058 int dialect ATTRIBUTE_UNUSED,
1059 const char **errmsg)
1060{
1061 if ((value & 3) != 0)
1062 *errmsg = _("ignoring least significant bits in branch offset");
1063 return insn | (value & 0x3fffffc);
1064}
1065
1066/*ARGSUSED*/
1067static long
1068extract_li (unsigned long insn,
1069 int dialect ATTRIBUTE_UNUSED,
1070 int *invalid ATTRIBUTE_UNUSED)
1071{
1072 return ((insn & 0x3fffffc) ^ 0x2000000) - 0x2000000;
1073}
1074
1075/* The MB and ME fields in an M form instruction expressed as a single
1076 operand which is itself a bitmask. The extraction function always
1077 marks it as invalid, since we never want to recognize an
1078 instruction which uses a field of this type. */
1079
1080static unsigned long
1081insert_mbe (unsigned long insn,
1082 long value,
1083 int dialect ATTRIBUTE_UNUSED,
1084 const char **errmsg)
1085{
1086 unsigned long uval, mask;
1087 int mb, me, mx, count, last;
1088
1089 uval = value;
1090
1091 if (uval == 0)
1092 {
1093 *errmsg = _("illegal bitmask");
1094 return insn;
1095 }
1096
1097 mb = 0;
1098 me = 32;
1099 if ((uval & 1) != 0)
1100 last = 1;
1101 else
1102 last = 0;
1103 count = 0;
1104
1105 /* mb: location of last 0->1 transition */
1106 /* me: location of last 1->0 transition */
1107 /* count: # transitions */
1108
1109 for (mx = 0, mask = 1L << 31; mx < 32; ++mx, mask >>= 1)
1110 {
1111 if ((uval & mask) && !last)
1112 {
1113 ++count;
1114 mb = mx;
1115 last = 1;
1116 }
1117 else if (!(uval & mask) && last)
1118 {
1119 ++count;
1120 me = mx;
1121 last = 0;
1122 }
1123 }
1124 if (me == 0)
1125 me = 32;
1126
1127 if (count != 2 && (count != 0 || ! last))
1128 *errmsg = _("illegal bitmask");
1129
1130 return insn | (mb << 6) | ((me - 1) << 1);
1131}
1132
1133static long
1134extract_mbe (unsigned long insn,
1135 int dialect ATTRIBUTE_UNUSED,
1136 int *invalid)
1137{
1138 long ret;
1139 int mb, me;
1140 int i;
1141
1142 *invalid = 1;
1143
1144 mb = (insn >> 6) & 0x1f;
1145 me = (insn >> 1) & 0x1f;
1146 if (mb < me + 1)
1147 {
1148 ret = 0;
1149 for (i = mb; i <= me; i++)
1150 ret |= 1L << (31 - i);
1151 }
1152 else if (mb == me + 1)
1153 ret = ~0;
1154 else /* (mb > me + 1) */
1155 {
1156 ret = ~0;
1157 for (i = me + 1; i < mb; i++)
1158 ret &= ~(1L << (31 - i));
1159 }
1160 return ret;
1161}
1162
1163/* The MB or ME field in an MD or MDS form instruction. The high bit
1164 is wrapped to the low end. */
1165
1166/*ARGSUSED*/
1167static unsigned long
1168insert_mb6 (unsigned long insn,
1169 long value,
1170 int dialect ATTRIBUTE_UNUSED,
1171 const char **errmsg ATTRIBUTE_UNUSED)
1172{
1173 return insn | ((value & 0x1f) << 6) | (value & 0x20);
1174}
1175
1176/*ARGSUSED*/
1177static long
1178extract_mb6 (unsigned long insn,
1179 int dialect ATTRIBUTE_UNUSED,
1180 int *invalid ATTRIBUTE_UNUSED)
1181{
1182 return ((insn >> 6) & 0x1f) | (insn & 0x20);
1183}
1184
1185/* The NB field in an X form instruction. The value 32 is stored as
1186 0. */
1187
1188static unsigned long
1189insert_nb (unsigned long insn,
1190 long value,
1191 int dialect ATTRIBUTE_UNUSED,
1192 const char **errmsg)
1193{
1194 if (value < 0 || value > 32)
1195 *errmsg = _("value out of range");
1196 if (value == 32)
1197 value = 0;
1198 return insn | ((value & 0x1f) << 11);
1199}
1200
1201/*ARGSUSED*/
1202static long
1203extract_nb (unsigned long insn,
1204 int dialect ATTRIBUTE_UNUSED,
1205 int *invalid ATTRIBUTE_UNUSED)
1206{
1207 long ret;
1208
1209 ret = (insn >> 11) & 0x1f;
1210 if (ret == 0)
1211 ret = 32;
1212 return ret;
1213}
1214
1215/* The NSI field in a D form instruction. This is the same as the SI
1216 field, only negated. The extraction function always marks it as
1217 invalid, since we never want to recognize an instruction which uses
1218 a field of this type. */
1219
1220/*ARGSUSED*/
1221static unsigned long
1222insert_nsi (unsigned long insn,
1223 long value,
1224 int dialect ATTRIBUTE_UNUSED,
1225 const char **errmsg ATTRIBUTE_UNUSED)
1226{
1227 return insn | (-value & 0xffff);
1228}
1229
1230static long
1231extract_nsi (unsigned long insn,
1232 int dialect ATTRIBUTE_UNUSED,
1233 int *invalid)
1234{
1235 *invalid = 1;
1236 return -(((insn & 0xffff) ^ 0x8000) - 0x8000);
1237}
1238
1239/* The RA field in a D or X form instruction which is an updating
1240 load, which means that the RA field may not be zero and may not
1241 equal the RT field. */
1242
1243static unsigned long
1244insert_ral (unsigned long insn,
1245 long value,
1246 int dialect ATTRIBUTE_UNUSED,
1247 const char **errmsg)
1248{
1249 if (value == 0
1250 || (unsigned long) value == ((insn >> 21) & 0x1f))
1251 *errmsg = "invalid register operand when updating";
1252 return insn | ((value & 0x1f) << 16);
1253}
1254
1255/* The RA field in an lmw instruction, which has special value
1256 restrictions. */
1257
1258static unsigned long
1259insert_ram (unsigned long insn,
1260 long value,
1261 int dialect ATTRIBUTE_UNUSED,
1262 const char **errmsg)
1263{
1264 if ((unsigned long) value >= ((insn >> 21) & 0x1f))
1265 *errmsg = _("index register in load range");
1266 return insn | ((value & 0x1f) << 16);
1267}
1268
1269/* The RA field in the DQ form lq instruction, which has special
1270 value restrictions. */
1271
1272/*ARGSUSED*/
1273static unsigned long
1274insert_raq (unsigned long insn,
1275 long value,
1276 int dialect ATTRIBUTE_UNUSED,
1277 const char **errmsg)
1278{
1279 long rtvalue = (insn & RT_MASK) >> 21;
1280
1281 if (value == rtvalue)
1282 *errmsg = _("source and target register operands must be different");
1283 return insn | ((value & 0x1f) << 16);
1284}
1285
1286/* The RA field in a D or X form instruction which is an updating
1287 store or an updating floating point load, which means that the RA
1288 field may not be zero. */
1289
1290static unsigned long
1291insert_ras (unsigned long insn,
1292 long value,
1293 int dialect ATTRIBUTE_UNUSED,
1294 const char **errmsg)
1295{
1296 if (value == 0)
1297 *errmsg = _("invalid register operand when updating");
1298 return insn | ((value & 0x1f) << 16);
1299}
1300
1301/* The RB field in an X form instruction when it must be the same as
1302 the RS field in the instruction. This is used for extended
1303 mnemonics like mr. This operand is marked FAKE. The insertion
1304 function just copies the BT field into the BA field, and the
1305 extraction function just checks that the fields are the same. */
1306
1307/*ARGSUSED*/
1308static unsigned long
1309insert_rbs (unsigned long insn,
1310 long value ATTRIBUTE_UNUSED,
1311 int dialect ATTRIBUTE_UNUSED,
1312 const char **errmsg ATTRIBUTE_UNUSED)
1313{
1314 return insn | (((insn >> 21) & 0x1f) << 11);
1315}
1316
1317static long
1318extract_rbs (unsigned long insn,
1319 int dialect ATTRIBUTE_UNUSED,
1320 int *invalid)
1321{
1322 if (((insn >> 21) & 0x1f) != ((insn >> 11) & 0x1f))
1323 *invalid = 1;
1324 return 0;
1325}
1326
1327/* The RT field of the DQ form lq instruction, which has special
1328 value restrictions. */
1329
1330/*ARGSUSED*/
1331static unsigned long
1332insert_rtq (unsigned long insn,
1333 long value,
1334 int dialect ATTRIBUTE_UNUSED,
1335 const char **errmsg)
1336{
1337 if ((value & 1) != 0)
1338 *errmsg = _("target register operand must be even");
1339 return insn | ((value & 0x1f) << 21);
1340}
1341
1342/* The RS field of the DS form stq instruction, which has special
1343 value restrictions. */
1344
1345/*ARGSUSED*/
1346static unsigned long
1347insert_rsq (unsigned long insn,
1348 long value ATTRIBUTE_UNUSED,
1349 int dialect ATTRIBUTE_UNUSED,
1350 const char **errmsg)
1351{
1352 if ((value & 1) != 0)
1353 *errmsg = _("source register operand must be even");
1354 return insn | ((value & 0x1f) << 21);
1355}
1356
1357/* The SH field in an MD form instruction. This is split. */
1358
1359/*ARGSUSED*/
1360static unsigned long
1361insert_sh6 (unsigned long insn,
1362 long value,
1363 int dialect ATTRIBUTE_UNUSED,
1364 const char **errmsg ATTRIBUTE_UNUSED)
1365{
1366 return insn | ((value & 0x1f) << 11) | ((value & 0x20) >> 4);
1367}
1368
1369/*ARGSUSED*/
1370static long
1371extract_sh6 (unsigned long insn,
1372 int dialect ATTRIBUTE_UNUSED,
1373 int *invalid ATTRIBUTE_UNUSED)
1374{
1375 return ((insn >> 11) & 0x1f) | ((insn << 4) & 0x20);
1376}
1377
1378/* The SPR field in an XFX form instruction. This is flipped--the
1379 lower 5 bits are stored in the upper 5 and vice- versa. */
1380
1381static unsigned long
1382insert_spr (unsigned long insn,
1383 long value,
1384 int dialect ATTRIBUTE_UNUSED,
1385 const char **errmsg ATTRIBUTE_UNUSED)
1386{
1387 return insn | ((value & 0x1f) << 16) | ((value & 0x3e0) << 6);
1388}
1389
1390static long
1391extract_spr (unsigned long insn,
1392 int dialect ATTRIBUTE_UNUSED,
1393 int *invalid ATTRIBUTE_UNUSED)
1394{
1395 return ((insn >> 16) & 0x1f) | ((insn >> 6) & 0x3e0);
1396}
1397
1398/* The TBR field in an XFX instruction. This is just like SPR, but it
1399 is optional. When TBR is omitted, it must be inserted as 268 (the
1400 magic number of the TB register). These functions treat 0
1401 (indicating an omitted optional operand) as 268. This means that
1402 ``mftb 4,0'' is not handled correctly. This does not matter very
1403 much, since the architecture manual does not define mftb as
1404 accepting any values other than 268 or 269. */
1405
1406#define TB (268)
1407
1408static unsigned long
1409insert_tbr (unsigned long insn,
1410 long value,
1411 int dialect ATTRIBUTE_UNUSED,
1412 const char **errmsg ATTRIBUTE_UNUSED)
1413{
1414 if (value == 0)
1415 value = TB;
1416 return insn | ((value & 0x1f) << 16) | ((value & 0x3e0) << 6);
1417}
1418
1419static long
1420extract_tbr (unsigned long insn,
1421 int dialect ATTRIBUTE_UNUSED,
1422 int *invalid ATTRIBUTE_UNUSED)
1423{
1424 long ret;
1425
1426 ret = ((insn >> 16) & 0x1f) | ((insn >> 6) & 0x3e0);
1427 if (ret == TB)
1428 ret = 0;
1429 return ret;
1430}
1431
1432/* Macros used to form opcodes. */
1433
1434/* The main opcode. */
1435#define OP(x) ((((unsigned long)(x)) & 0x3f) << 26)
1436#define OP_MASK OP (0x3f)
1437
1438/* The main opcode combined with a trap code in the TO field of a D
1439 form instruction. Used for extended mnemonics for the trap
1440 instructions. */
1441#define OPTO(x,to) (OP (x) | ((((unsigned long)(to)) & 0x1f) << 21))
1442#define OPTO_MASK (OP_MASK | TO_MASK)
1443
1444/* The main opcode combined with a comparison size bit in the L field
1445 of a D form or X form instruction. Used for extended mnemonics for
1446 the comparison instructions. */
1447#define OPL(x,l) (OP (x) | ((((unsigned long)(l)) & 1) << 21))
1448#define OPL_MASK OPL (0x3f,1)
1449
1450/* An A form instruction. */
1451#define A(op, xop, rc) (OP (op) | ((((unsigned long)(xop)) & 0x1f) << 1) | (((unsigned long)(rc)) & 1))
1452#define A_MASK A (0x3f, 0x1f, 1)
1453
1454/* An A_MASK with the FRB field fixed. */
1455#define AFRB_MASK (A_MASK | FRB_MASK)
1456
1457/* An A_MASK with the FRC field fixed. */
1458#define AFRC_MASK (A_MASK | FRC_MASK)
1459
1460/* An A_MASK with the FRA and FRC fields fixed. */
1461#define AFRAFRC_MASK (A_MASK | FRA_MASK | FRC_MASK)
1462
1463/* A B form instruction. */
1464#define B(op, aa, lk) (OP (op) | ((((unsigned long)(aa)) & 1) << 1) | ((lk) & 1))
1465#define B_MASK B (0x3f, 1, 1)
1466
1467/* A B form instruction setting the BO field. */
1468#define BBO(op, bo, aa, lk) (B ((op), (aa), (lk)) | ((((unsigned long)(bo)) & 0x1f) << 21))
1469#define BBO_MASK BBO (0x3f, 0x1f, 1, 1)
1470
1471/* A BBO_MASK with the y bit of the BO field removed. This permits
1472 matching a conditional branch regardless of the setting of the y
1473 bit. Similarly for the 'at' bits used for power4 branch hints. */
1474#define Y_MASK (((unsigned long) 1) << 21)
1475#define AT1_MASK (((unsigned long) 3) << 21)
1476#define AT2_MASK (((unsigned long) 9) << 21)
1477#define BBOY_MASK (BBO_MASK &~ Y_MASK)
1478#define BBOAT_MASK (BBO_MASK &~ AT1_MASK)
1479
1480/* A B form instruction setting the BO field and the condition bits of
1481 the BI field. */
1482#define BBOCB(op, bo, cb, aa, lk) \
1483 (BBO ((op), (bo), (aa), (lk)) | ((((unsigned long)(cb)) & 0x3) << 16))
1484#define BBOCB_MASK BBOCB (0x3f, 0x1f, 0x3, 1, 1)
1485
1486/* A BBOCB_MASK with the y bit of the BO field removed. */
1487#define BBOYCB_MASK (BBOCB_MASK &~ Y_MASK)
1488#define BBOATCB_MASK (BBOCB_MASK &~ AT1_MASK)
1489#define BBOAT2CB_MASK (BBOCB_MASK &~ AT2_MASK)
1490
1491/* A BBOYCB_MASK in which the BI field is fixed. */
1492#define BBOYBI_MASK (BBOYCB_MASK | BI_MASK)
1493#define BBOATBI_MASK (BBOAT2CB_MASK | BI_MASK)
1494
1495/* An Context form instruction. */
1496#define CTX(op, xop) (OP (op) | (((unsigned long)(xop)) & 0x7))
1497#define CTX_MASK CTX(0x3f, 0x7)
1498
1499/* An User Context form instruction. */
1500#define UCTX(op, xop) (OP (op) | (((unsigned long)(xop)) & 0x1f))
1501#define UCTX_MASK UCTX(0x3f, 0x1f)
1502
1503/* The main opcode mask with the RA field clear. */
1504#define DRA_MASK (OP_MASK | RA_MASK)
1505
1506/* A DS form instruction. */
1507#define DSO(op, xop) (OP (op) | ((xop) & 0x3))
1508#define DS_MASK DSO (0x3f, 3)
1509
1510/* A DE form instruction. */
1511#define DEO(op, xop) (OP (op) | ((xop) & 0xf))
1512#define DE_MASK DEO (0x3e, 0xf)
1513
1514/* An EVSEL form instruction. */
1515#define EVSEL(op, xop) (OP (op) | (((unsigned long)(xop)) & 0xff) << 3)
1516#define EVSEL_MASK EVSEL(0x3f, 0xff)
1517
1518/* An M form instruction. */
1519#define M(op, rc) (OP (op) | ((rc) & 1))
1520#define M_MASK M (0x3f, 1)
1521
1522/* An M form instruction with the ME field specified. */
1523#define MME(op, me, rc) (M ((op), (rc)) | ((((unsigned long)(me)) & 0x1f) << 1))
1524
1525/* An M_MASK with the MB and ME fields fixed. */
1526#define MMBME_MASK (M_MASK | MB_MASK | ME_MASK)
1527
1528/* An M_MASK with the SH and ME fields fixed. */
1529#define MSHME_MASK (M_MASK | SH_MASK | ME_MASK)
1530
1531/* An MD form instruction. */
1532#define MD(op, xop, rc) (OP (op) | ((((unsigned long)(xop)) & 0x7) << 2) | ((rc) & 1))
1533#define MD_MASK MD (0x3f, 0x7, 1)
1534
1535/* An MD_MASK with the MB field fixed. */
1536#define MDMB_MASK (MD_MASK | MB6_MASK)
1537
1538/* An MD_MASK with the SH field fixed. */
1539#define MDSH_MASK (MD_MASK | SH6_MASK)
1540
1541/* An MDS form instruction. */
1542#define MDS(op, xop, rc) (OP (op) | ((((unsigned long)(xop)) & 0xf) << 1) | ((rc) & 1))
1543#define MDS_MASK MDS (0x3f, 0xf, 1)
1544
1545/* An MDS_MASK with the MB field fixed. */
1546#define MDSMB_MASK (MDS_MASK | MB6_MASK)
1547
1548/* An SC form instruction. */
1549#define SC(op, sa, lk) (OP (op) | ((((unsigned long)(sa)) & 1) << 1) | ((lk) & 1))
1550#define SC_MASK (OP_MASK | (((unsigned long)0x3ff) << 16) | (((unsigned long)1) << 1) | 1)
1551
1552/* An VX form instruction. */
1553#define VX(op, xop) (OP (op) | (((unsigned long)(xop)) & 0x7ff))
1554
1555/* The mask for an VX form instruction. */
1556#define VX_MASK VX(0x3f, 0x7ff)
1557
1558/* An VA form instruction. */
1559#define VXA(op, xop) (OP (op) | (((unsigned long)(xop)) & 0x03f))
1560
1561/* The mask for an VA form instruction. */
1562#define VXA_MASK VXA(0x3f, 0x3f)
1563
1564/* An VXR form instruction. */
1565#define VXR(op, xop, rc) (OP (op) | (((rc) & 1) << 10) | (((unsigned long)(xop)) & 0x3ff))
1566
1567/* The mask for a VXR form instruction. */
1568#define VXR_MASK VXR(0x3f, 0x3ff, 1)
1569
1570/* An X form instruction. */
1571#define X(op, xop) (OP (op) | ((((unsigned long)(xop)) & 0x3ff) << 1))
1572
1573/* An X form instruction with the RC bit specified. */
1574#define XRC(op, xop, rc) (X ((op), (xop)) | ((rc) & 1))
1575
1576/* The mask for an X form instruction. */
1577#define X_MASK XRC (0x3f, 0x3ff, 1)
1578
1579/* An X_MASK with the RA field fixed. */
1580#define XRA_MASK (X_MASK | RA_MASK)
1581
1582/* An X_MASK with the RB field fixed. */
1583#define XRB_MASK (X_MASK | RB_MASK)
1584
1585/* An X_MASK with the RT field fixed. */
1586#define XRT_MASK (X_MASK | RT_MASK)
1587
1588/* An X_MASK with the RA and RB fields fixed. */
1589#define XRARB_MASK (X_MASK | RA_MASK | RB_MASK)
1590
1591/* An XRARB_MASK, but with the L bit clear. */
1592#define XRLARB_MASK (XRARB_MASK & ~((unsigned long) 1 << 16))
1593
1594/* An X_MASK with the RT and RA fields fixed. */
1595#define XRTRA_MASK (X_MASK | RT_MASK | RA_MASK)
1596
1597/* An XRTRA_MASK, but with L bit clear. */
1598#define XRTLRA_MASK (XRTRA_MASK & ~((unsigned long) 1 << 21))
1599
1600/* An X form comparison instruction. */
1601#define XCMPL(op, xop, l) (X ((op), (xop)) | ((((unsigned long)(l)) & 1) << 21))
1602
1603/* The mask for an X form comparison instruction. */
1604#define XCMP_MASK (X_MASK | (((unsigned long)1) << 22))
1605
1606/* The mask for an X form comparison instruction with the L field
1607 fixed. */
1608#define XCMPL_MASK (XCMP_MASK | (((unsigned long)1) << 21))
1609
1610/* An X form trap instruction with the TO field specified. */
1611#define XTO(op, xop, to) (X ((op), (xop)) | ((((unsigned long)(to)) & 0x1f) << 21))
1612#define XTO_MASK (X_MASK | TO_MASK)
1613
1614/* An X form tlb instruction with the SH field specified. */
1615#define XTLB(op, xop, sh) (X ((op), (xop)) | ((((unsigned long)(sh)) & 0x1f) << 11))
1616#define XTLB_MASK (X_MASK | SH_MASK)
1617
1618/* An X form sync instruction. */
1619#define XSYNC(op, xop, l) (X ((op), (xop)) | ((((unsigned long)(l)) & 3) << 21))
1620
1621/* An X form sync instruction with everything filled in except the LS field. */
1622#define XSYNC_MASK (0xff9fffff)
1623
1624/* An X form AltiVec dss instruction. */
1625#define XDSS(op, xop, a) (X ((op), (xop)) | ((((unsigned long)(a)) & 1) << 25))
1626#define XDSS_MASK XDSS(0x3f, 0x3ff, 1)
1627
1628/* An XFL form instruction. */
1629#define XFL(op, xop, rc) (OP (op) | ((((unsigned long)(xop)) & 0x3ff) << 1) | (((unsigned long)(rc)) & 1))
1630#define XFL_MASK (XFL (0x3f, 0x3ff, 1) | (((unsigned long)1) << 25) | (((unsigned long)1) << 16))
1631
1632/* An X form isel instruction. */
1633#define XISEL(op, xop) (OP (op) | ((((unsigned long)(xop)) & 0x1f) << 1))
1634#define XISEL_MASK XISEL(0x3f, 0x1f)
1635
1636/* An XL form instruction with the LK field set to 0. */
1637#define XL(op, xop) (OP (op) | ((((unsigned long)(xop)) & 0x3ff) << 1))
1638
1639/* An XL form instruction which uses the LK field. */
1640#define XLLK(op, xop, lk) (XL ((op), (xop)) | ((lk) & 1))
1641
1642/* The mask for an XL form instruction. */
1643#define XL_MASK XLLK (0x3f, 0x3ff, 1)
1644
1645/* An XL form instruction which explicitly sets the BO field. */
1646#define XLO(op, bo, xop, lk) \
1647 (XLLK ((op), (xop), (lk)) | ((((unsigned long)(bo)) & 0x1f) << 21))
1648#define XLO_MASK (XL_MASK | BO_MASK)
1649
1650/* An XL form instruction which explicitly sets the y bit of the BO
1651 field. */
1652#define XLYLK(op, xop, y, lk) (XLLK ((op), (xop), (lk)) | ((((unsigned long)(y)) & 1) << 21))
1653#define XLYLK_MASK (XL_MASK | Y_MASK)
1654
1655/* An XL form instruction which sets the BO field and the condition
1656 bits of the BI field. */
1657#define XLOCB(op, bo, cb, xop, lk) \
1658 (XLO ((op), (bo), (xop), (lk)) | ((((unsigned long)(cb)) & 3) << 16))
1659#define XLOCB_MASK XLOCB (0x3f, 0x1f, 0x3, 0x3ff, 1)
1660
1661/* An XL_MASK or XLYLK_MASK or XLOCB_MASK with the BB field fixed. */
1662#define XLBB_MASK (XL_MASK | BB_MASK)
1663#define XLYBB_MASK (XLYLK_MASK | BB_MASK)
1664#define XLBOCBBB_MASK (XLOCB_MASK | BB_MASK)
1665
1666/* An XL_MASK with the BO and BB fields fixed. */
1667#define XLBOBB_MASK (XL_MASK | BO_MASK | BB_MASK)
1668
1669/* An XL_MASK with the BO, BI and BB fields fixed. */
1670#define XLBOBIBB_MASK (XL_MASK | BO_MASK | BI_MASK | BB_MASK)
1671
1672/* An XO form instruction. */
1673#define XO(op, xop, oe, rc) \
1674 (OP (op) | ((((unsigned long)(xop)) & 0x1ff) << 1) | ((((unsigned long)(oe)) & 1) << 10) | (((unsigned long)(rc)) & 1))
1675#define XO_MASK XO (0x3f, 0x1ff, 1, 1)
1676
1677/* An XO_MASK with the RB field fixed. */
1678#define XORB_MASK (XO_MASK | RB_MASK)
1679
1680/* An XS form instruction. */
1681#define XS(op, xop, rc) (OP (op) | ((((unsigned long)(xop)) & 0x1ff) << 2) | (((unsigned long)(rc)) & 1))
1682#define XS_MASK XS (0x3f, 0x1ff, 1)
1683
1684/* A mask for the FXM version of an XFX form instruction. */
1685#define XFXFXM_MASK (X_MASK | (1 << 11))
1686
1687/* An XFX form instruction with the FXM field filled in. */
1688#define XFXM(op, xop, fxm) \
1689 (X ((op), (xop)) | ((((unsigned long)(fxm)) & 0xff) << 12))
1690
1691/* An XFX form instruction with the SPR field filled in. */
1692#define XSPR(op, xop, spr) \
1693 (X ((op), (xop)) | ((((unsigned long)(spr)) & 0x1f) << 16) | ((((unsigned long)(spr)) & 0x3e0) << 6))
1694#define XSPR_MASK (X_MASK | SPR_MASK)
1695
1696/* An XFX form instruction with the SPR field filled in except for the
1697 SPRBAT field. */
1698#define XSPRBAT_MASK (XSPR_MASK &~ SPRBAT_MASK)
1699
1700/* An XFX form instruction with the SPR field filled in except for the
1701 SPRG field. */
1702#define XSPRG_MASK (XSPR_MASK &~ SPRG_MASK)
1703
1704/* An X form instruction with everything filled in except the E field. */
1705#define XE_MASK (0xffff7fff)
1706
1707/* An X form user context instruction. */
1708#define XUC(op, xop) (OP (op) | (((unsigned long)(xop)) & 0x1f))
1709#define XUC_MASK XUC(0x3f, 0x1f)
1710
1711/* The BO encodings used in extended conditional branch mnemonics. */
1712#define BODNZF (0x0)
1713#define BODNZFP (0x1)
1714#define BODZF (0x2)
1715#define BODZFP (0x3)
1716#define BODNZT (0x8)
1717#define BODNZTP (0x9)
1718#define BODZT (0xa)
1719#define BODZTP (0xb)
1720
1721#define BOF (0x4)
1722#define BOFP (0x5)
1723#define BOFM4 (0x6)
1724#define BOFP4 (0x7)
1725#define BOT (0xc)
1726#define BOTP (0xd)
1727#define BOTM4 (0xe)
1728#define BOTP4 (0xf)
1729
1730#define BODNZ (0x10)
1731#define BODNZP (0x11)
1732#define BODZ (0x12)
1733#define BODZP (0x13)
1734#define BODNZM4 (0x18)
1735#define BODNZP4 (0x19)
1736#define BODZM4 (0x1a)
1737#define BODZP4 (0x1b)
1738
1739#define BOU (0x14)
1740
1741/* The BI condition bit encodings used in extended conditional branch
1742 mnemonics. */
1743#define CBLT (0)
1744#define CBGT (1)
1745#define CBEQ (2)
1746#define CBSO (3)
1747
1748/* The TO encodings used in extended trap mnemonics. */
1749#define TOLGT (0x1)
1750#define TOLLT (0x2)
1751#define TOEQ (0x4)
1752#define TOLGE (0x5)
1753#define TOLNL (0x5)
1754#define TOLLE (0x6)
1755#define TOLNG (0x6)
1756#define TOGT (0x8)
1757#define TOGE (0xc)
1758#define TONL (0xc)
1759#define TOLT (0x10)
1760#define TOLE (0x14)
1761#define TONG (0x14)
1762#define TONE (0x18)
1763#define TOU (0x1f)
1764
1765/* Smaller names for the flags so each entry in the opcodes table will
1766 fit on a single line. */
1767#undef PPC
1768#define PPC PPC_OPCODE_PPC
1769#define PPCCOM PPC_OPCODE_PPC | PPC_OPCODE_COMMON
1770#define NOPOWER4 PPC_OPCODE_NOPOWER4 | PPCCOM
1771#define POWER4 PPC_OPCODE_POWER4
1772#define PPC32 PPC_OPCODE_32 | PPC_OPCODE_PPC
1773#define PPC64 PPC_OPCODE_64 | PPC_OPCODE_PPC
1774#define PPC403 PPC_OPCODE_403
1775#define PPC405 PPC403
1776#define PPC440 PPC_OPCODE_440
1777#define PPC750 PPC
1778#define PPC860 PPC
1779#define PPCVEC PPC_OPCODE_ALTIVEC | PPC_OPCODE_PPC
1780#define POWER PPC_OPCODE_POWER
1781#define POWER2 PPC_OPCODE_POWER | PPC_OPCODE_POWER2
1782#define PPCPWR2 PPC_OPCODE_PPC | PPC_OPCODE_POWER | PPC_OPCODE_POWER2
1783#define POWER32 PPC_OPCODE_POWER | PPC_OPCODE_32
1784#define COM PPC_OPCODE_POWER | PPC_OPCODE_PPC | PPC_OPCODE_COMMON
1785#define COM32 PPC_OPCODE_POWER | PPC_OPCODE_PPC | PPC_OPCODE_COMMON | PPC_OPCODE_32
1786#define M601 PPC_OPCODE_POWER | PPC_OPCODE_601
1787#define PWRCOM PPC_OPCODE_POWER | PPC_OPCODE_601 | PPC_OPCODE_COMMON
1788#define MFDEC1 PPC_OPCODE_POWER
1789#define MFDEC2 PPC_OPCODE_PPC | PPC_OPCODE_601 | PPC_OPCODE_BOOKE
1790#define BOOKE PPC_OPCODE_BOOKE
1791#define BOOKE64 PPC_OPCODE_BOOKE64
1792#define CLASSIC PPC_OPCODE_CLASSIC
1793#define PPCSPE PPC_OPCODE_SPE
1794#define PPCISEL PPC_OPCODE_ISEL
1795#define PPCEFS PPC_OPCODE_EFS
1796#define PPCBRLK PPC_OPCODE_BRLOCK
1797#define PPCPMR PPC_OPCODE_PMR
1798#define PPCCHLK PPC_OPCODE_CACHELCK
1799#define PPCCHLK64 PPC_OPCODE_CACHELCK | PPC_OPCODE_BOOKE64
1800#define PPCRFMCI PPC_OPCODE_RFMCI
1801
1802/* The opcode table.
1803
1804 The format of the opcode table is:
1805
1806 NAME OPCODE MASK FLAGS { OPERANDS }
1807
1808 NAME is the name of the instruction.
1809 OPCODE is the instruction opcode.
1810 MASK is the opcode mask; this is used to tell the disassembler
1811 which bits in the actual opcode must match OPCODE.
1812 FLAGS are flags indicated what processors support the instruction.
1813 OPERANDS is the list of operands.
1814
1815 The disassembler reads the table in order and prints the first
1816 instruction which matches, so this table is sorted to put more
1817 specific instructions before more general instructions. It is also
1818 sorted by major opcode. */
1819
1820const struct powerpc_opcode powerpc_opcodes[] = {
1821{ "attn", X(0,256), X_MASK, POWER4, { 0 } },
1822{ "tdlgti", OPTO(2,TOLGT), OPTO_MASK, PPC64, { RA, SI } },
1823{ "tdllti", OPTO(2,TOLLT), OPTO_MASK, PPC64, { RA, SI } },
1824{ "tdeqi", OPTO(2,TOEQ), OPTO_MASK, PPC64, { RA, SI } },
1825{ "tdlgei", OPTO(2,TOLGE), OPTO_MASK, PPC64, { RA, SI } },
1826{ "tdlnli", OPTO(2,TOLNL), OPTO_MASK, PPC64, { RA, SI } },
1827{ "tdllei", OPTO(2,TOLLE), OPTO_MASK, PPC64, { RA, SI } },
1828{ "tdlngi", OPTO(2,TOLNG), OPTO_MASK, PPC64, { RA, SI } },
1829{ "tdgti", OPTO(2,TOGT), OPTO_MASK, PPC64, { RA, SI } },
1830{ "tdgei", OPTO(2,TOGE), OPTO_MASK, PPC64, { RA, SI } },
1831{ "tdnli", OPTO(2,TONL), OPTO_MASK, PPC64, { RA, SI } },
1832{ "tdlti", OPTO(2,TOLT), OPTO_MASK, PPC64, { RA, SI } },
1833{ "tdlei", OPTO(2,TOLE), OPTO_MASK, PPC64, { RA, SI } },
1834{ "tdngi", OPTO(2,TONG), OPTO_MASK, PPC64, { RA, SI } },
1835{ "tdnei", OPTO(2,TONE), OPTO_MASK, PPC64, { RA, SI } },
1836{ "tdi", OP(2), OP_MASK, PPC64, { TO, RA, SI } },
1837
1838{ "twlgti", OPTO(3,TOLGT), OPTO_MASK, PPCCOM, { RA, SI } },
1839{ "tlgti", OPTO(3,TOLGT), OPTO_MASK, PWRCOM, { RA, SI } },
1840{ "twllti", OPTO(3,TOLLT), OPTO_MASK, PPCCOM, { RA, SI } },
1841{ "tllti", OPTO(3,TOLLT), OPTO_MASK, PWRCOM, { RA, SI } },
1842{ "tweqi", OPTO(3,TOEQ), OPTO_MASK, PPCCOM, { RA, SI } },
1843{ "teqi", OPTO(3,TOEQ), OPTO_MASK, PWRCOM, { RA, SI } },
1844{ "twlgei", OPTO(3,TOLGE), OPTO_MASK, PPCCOM, { RA, SI } },
1845{ "tlgei", OPTO(3,TOLGE), OPTO_MASK, PWRCOM, { RA, SI } },
1846{ "twlnli", OPTO(3,TOLNL), OPTO_MASK, PPCCOM, { RA, SI } },
1847{ "tlnli", OPTO(3,TOLNL), OPTO_MASK, PWRCOM, { RA, SI } },
1848{ "twllei", OPTO(3,TOLLE), OPTO_MASK, PPCCOM, { RA, SI } },
1849{ "tllei", OPTO(3,TOLLE), OPTO_MASK, PWRCOM, { RA, SI } },
1850{ "twlngi", OPTO(3,TOLNG), OPTO_MASK, PPCCOM, { RA, SI } },
1851{ "tlngi", OPTO(3,TOLNG), OPTO_MASK, PWRCOM, { RA, SI } },
1852{ "twgti", OPTO(3,TOGT), OPTO_MASK, PPCCOM, { RA, SI } },
1853{ "tgti", OPTO(3,TOGT), OPTO_MASK, PWRCOM, { RA, SI } },
1854{ "twgei", OPTO(3,TOGE), OPTO_MASK, PPCCOM, { RA, SI } },
1855{ "tgei", OPTO(3,TOGE), OPTO_MASK, PWRCOM, { RA, SI } },
1856{ "twnli", OPTO(3,TONL), OPTO_MASK, PPCCOM, { RA, SI } },
1857{ "tnli", OPTO(3,TONL), OPTO_MASK, PWRCOM, { RA, SI } },
1858{ "twlti", OPTO(3,TOLT), OPTO_MASK, PPCCOM, { RA, SI } },
1859{ "tlti", OPTO(3,TOLT), OPTO_MASK, PWRCOM, { RA, SI } },
1860{ "twlei", OPTO(3,TOLE), OPTO_MASK, PPCCOM, { RA, SI } },
1861{ "tlei", OPTO(3,TOLE), OPTO_MASK, PWRCOM, { RA, SI } },
1862{ "twngi", OPTO(3,TONG), OPTO_MASK, PPCCOM, { RA, SI } },
1863{ "tngi", OPTO(3,TONG), OPTO_MASK, PWRCOM, { RA, SI } },
1864{ "twnei", OPTO(3,TONE), OPTO_MASK, PPCCOM, { RA, SI } },
1865{ "tnei", OPTO(3,TONE), OPTO_MASK, PWRCOM, { RA, SI } },
1866{ "twi", OP(3), OP_MASK, PPCCOM, { TO, RA, SI } },
1867{ "ti", OP(3), OP_MASK, PWRCOM, { TO, RA, SI } },
1868
1869{ "macchw", XO(4,172,0,0), XO_MASK, PPC405|PPC440, { RT, RA, RB } },
1870{ "macchw.", XO(4,172,0,1), XO_MASK, PPC405|PPC440, { RT, RA, RB } },
1871{ "macchwo", XO(4,172,1,0), XO_MASK, PPC405|PPC440, { RT, RA, RB } },
1872{ "macchwo.", XO(4,172,1,1), XO_MASK, PPC405|PPC440, { RT, RA, RB } },
1873{ "macchws", XO(4,236,0,0), XO_MASK, PPC405|PPC440, { RT, RA, RB } },
1874{ "macchws.", XO(4,236,0,1), XO_MASK, PPC405|PPC440, { RT, RA, RB } },
1875{ "macchwso", XO(4,236,1,0), XO_MASK, PPC405|PPC440, { RT, RA, RB } },
1876{ "macchwso.", XO(4,236,1,1), XO_MASK, PPC405|PPC440, { RT, RA, RB } },
1877{ "macchwsu", XO(4,204,0,0), XO_MASK, PPC405|PPC440, { RT, RA, RB } },
1878{ "macchwsu.", XO(4,204,0,1), XO_MASK, PPC405|PPC440, { RT, RA, RB } },
1879{ "macchwsuo", XO(4,204,1,0), XO_MASK, PPC405|PPC440, { RT, RA, RB } },
1880{ "macchwsuo.", XO(4,204,1,1), XO_MASK, PPC405|PPC440, { RT, RA, RB } },
1881{ "macchwu", XO(4,140,0,0), XO_MASK, PPC405|PPC440, { RT, RA, RB } },
1882{ "macchwu.", XO(4,140,0,1), XO_MASK, PPC405|PPC440, { RT, RA, RB } },
1883{ "macchwuo", XO(4,140,1,0), XO_MASK, PPC405|PPC440, { RT, RA, RB } },
1884{ "macchwuo.", XO(4,140,1,1), XO_MASK, PPC405|PPC440, { RT, RA, RB } },
1885{ "machhw", XO(4,44,0,0), XO_MASK, PPC405|PPC440, { RT, RA, RB } },
1886{ "machhw.", XO(4,44,0,1), XO_MASK, PPC405|PPC440, { RT, RA, RB } },
1887{ "machhwo", XO(4,44,1,0), XO_MASK, PPC405|PPC440, { RT, RA, RB } },
1888{ "machhwo.", XO(4,44,1,1), XO_MASK, PPC405|PPC440, { RT, RA, RB } },
1889{ "machhws", XO(4,108,0,0), XO_MASK, PPC405|PPC440, { RT, RA, RB } },
1890{ "machhws.", XO(4,108,0,1), XO_MASK, PPC405|PPC440, { RT, RA, RB } },
1891{ "machhwso", XO(4,108,1,0), XO_MASK, PPC405|PPC440, { RT, RA, RB } },
1892{ "machhwso.", XO(4,108,1,1), XO_MASK, PPC405|PPC440, { RT, RA, RB } },
1893{ "machhwsu", XO(4,76,0,0), XO_MASK, PPC405|PPC440, { RT, RA, RB } },
1894{ "machhwsu.", XO(4,76,0,1), XO_MASK, PPC405|PPC440, { RT, RA, RB } },
1895{ "machhwsuo", XO(4,76,1,0), XO_MASK, PPC405|PPC440, { RT, RA, RB } },
1896{ "machhwsuo.", XO(4,76,1,1), XO_MASK, PPC405|PPC440, { RT, RA, RB } },
1897{ "machhwu", XO(4,12,0,0), XO_MASK, PPC405|PPC440, { RT, RA, RB } },
1898{ "machhwu.", XO(4,12,0,1), XO_MASK, PPC405|PPC440, { RT, RA, RB } },
1899{ "machhwuo", XO(4,12,1,0), XO_MASK, PPC405|PPC440, { RT, RA, RB } },
1900{ "machhwuo.", XO(4,12,1,1), XO_MASK, PPC405|PPC440, { RT, RA, RB } },
1901{ "maclhw", XO(4,428,0,0), XO_MASK, PPC405|PPC440, { RT, RA, RB } },
1902{ "maclhw.", XO(4,428,0,1), XO_MASK, PPC405|PPC440, { RT, RA, RB } },
1903{ "maclhwo", XO(4,428,1,0), XO_MASK, PPC405|PPC440, { RT, RA, RB } },
1904{ "maclhwo.", XO(4,428,1,1), XO_MASK, PPC405|PPC440, { RT, RA, RB } },
1905{ "maclhws", XO(4,492,0,0), XO_MASK, PPC405|PPC440, { RT, RA, RB } },
1906{ "maclhws.", XO(4,492,0,1), XO_MASK, PPC405|PPC440, { RT, RA, RB } },
1907{ "maclhwso", XO(4,492,1,0), XO_MASK, PPC405|PPC440, { RT, RA, RB } },
1908{ "maclhwso.", XO(4,492,1,1), XO_MASK, PPC405|PPC440, { RT, RA, RB } },
1909{ "maclhwsu", XO(4,460,0,0), XO_MASK, PPC405|PPC440, { RT, RA, RB } },
1910{ "maclhwsu.", XO(4,460,0,1), XO_MASK, PPC405|PPC440, { RT, RA, RB } },
1911{ "maclhwsuo", XO(4,460,1,0), XO_MASK, PPC405|PPC440, { RT, RA, RB } },
1912{ "maclhwsuo.", XO(4,460,1,1), XO_MASK, PPC405|PPC440, { RT, RA, RB } },
1913{ "maclhwu", XO(4,396,0,0), XO_MASK, PPC405|PPC440, { RT, RA, RB } },
1914{ "maclhwu.", XO(4,396,0,1), XO_MASK, PPC405|PPC440, { RT, RA, RB } },
1915{ "maclhwuo", XO(4,396,1,0), XO_MASK, PPC405|PPC440, { RT, RA, RB } },
1916{ "maclhwuo.", XO(4,396,1,1), XO_MASK, PPC405|PPC440, { RT, RA, RB } },
1917{ "mulchw", XRC(4,168,0), X_MASK, PPC405|PPC440, { RT, RA, RB } },
1918{ "mulchw.", XRC(4,168,1), X_MASK, PPC405|PPC440, { RT, RA, RB } },
1919{ "mulchwu", XRC(4,136,0), X_MASK, PPC405|PPC440, { RT, RA, RB } },
1920{ "mulchwu.", XRC(4,136,1), X_MASK, PPC405|PPC440, { RT, RA, RB } },
1921{ "mulhhw", XRC(4,40,0), X_MASK, PPC405|PPC440, { RT, RA, RB } },
1922{ "mulhhw.", XRC(4,40,1), X_MASK, PPC405|PPC440, { RT, RA, RB } },
1923{ "mulhhwu", XRC(4,8,0), X_MASK, PPC405|PPC440, { RT, RA, RB } },
1924{ "mulhhwu.", XRC(4,8,1), X_MASK, PPC405|PPC440, { RT, RA, RB } },
1925{ "mullhw", XRC(4,424,0), X_MASK, PPC405|PPC440, { RT, RA, RB } },
1926{ "mullhw.", XRC(4,424,1), X_MASK, PPC405|PPC440, { RT, RA, RB } },
1927{ "mullhwu", XRC(4,392,0), X_MASK, PPC405|PPC440, { RT, RA, RB } },
1928{ "mullhwu.", XRC(4,392,1), X_MASK, PPC405|PPC440, { RT, RA, RB } },
1929{ "nmacchw", XO(4,174,0,0), XO_MASK, PPC405|PPC440, { RT, RA, RB } },
1930{ "nmacchw.", XO(4,174,0,1), XO_MASK, PPC405|PPC440, { RT, RA, RB } },
1931{ "nmacchwo", XO(4,174,1,0), XO_MASK, PPC405|PPC440, { RT, RA, RB } },
1932{ "nmacchwo.", XO(4,174,1,1), XO_MASK, PPC405|PPC440, { RT, RA, RB } },
1933{ "nmacchws", XO(4,238,0,0), XO_MASK, PPC405|PPC440, { RT, RA, RB } },
1934{ "nmacchws.", XO(4,238,0,1), XO_MASK, PPC405|PPC440, { RT, RA, RB } },
1935{ "nmacchwso", XO(4,238,1,0), XO_MASK, PPC405|PPC440, { RT, RA, RB } },
1936{ "nmacchwso.", XO(4,238,1,1), XO_MASK, PPC405|PPC440, { RT, RA, RB } },
1937{ "nmachhw", XO(4,46,0,0), XO_MASK, PPC405|PPC440, { RT, RA, RB } },
1938{ "nmachhw.", XO(4,46,0,1), XO_MASK, PPC405|PPC440, { RT, RA, RB } },
1939{ "nmachhwo", XO(4,46,1,0), XO_MASK, PPC405|PPC440, { RT, RA, RB } },
1940{ "nmachhwo.", XO(4,46,1,1), XO_MASK, PPC405|PPC440, { RT, RA, RB } },
1941{ "nmachhws", XO(4,110,0,0), XO_MASK, PPC405|PPC440, { RT, RA, RB } },
1942{ "nmachhws.", XO(4,110,0,1), XO_MASK, PPC405|PPC440, { RT, RA, RB } },
1943{ "nmachhwso", XO(4,110,1,0), XO_MASK, PPC405|PPC440, { RT, RA, RB } },
1944{ "nmachhwso.", XO(4,110,1,1), XO_MASK, PPC405|PPC440, { RT, RA, RB } },
1945{ "nmaclhw", XO(4,430,0,0), XO_MASK, PPC405|PPC440, { RT, RA, RB } },
1946{ "nmaclhw.", XO(4,430,0,1), XO_MASK, PPC405|PPC440, { RT, RA, RB } },
1947{ "nmaclhwo", XO(4,430,1,0), XO_MASK, PPC405|PPC440, { RT, RA, RB } },
1948{ "nmaclhwo.", XO(4,430,1,1), XO_MASK, PPC405|PPC440, { RT, RA, RB } },
1949{ "nmaclhws", XO(4,494,0,0), XO_MASK, PPC405|PPC440, { RT, RA, RB } },
1950{ "nmaclhws.", XO(4,494,0,1), XO_MASK, PPC405|PPC440, { RT, RA, RB } },
1951{ "nmaclhwso", XO(4,494,1,0), XO_MASK, PPC405|PPC440, { RT, RA, RB } },
1952{ "nmaclhwso.", XO(4,494,1,1), XO_MASK, PPC405|PPC440, { RT, RA, RB } },
1953{ "mfvscr", VX(4, 1540), VX_MASK, PPCVEC, { VD } },
1954{ "mtvscr", VX(4, 1604), VX_MASK, PPCVEC, { VB } },
1955{ "vaddcuw", VX(4, 384), VX_MASK, PPCVEC, { VD, VA, VB } },
1956{ "vaddfp", VX(4, 10), VX_MASK, PPCVEC, { VD, VA, VB } },
1957{ "vaddsbs", VX(4, 768), VX_MASK, PPCVEC, { VD, VA, VB } },
1958{ "vaddshs", VX(4, 832), VX_MASK, PPCVEC, { VD, VA, VB } },
1959{ "vaddsws", VX(4, 896), VX_MASK, PPCVEC, { VD, VA, VB } },
1960{ "vaddubm", VX(4, 0), VX_MASK, PPCVEC, { VD, VA, VB } },
1961{ "vaddubs", VX(4, 512), VX_MASK, PPCVEC, { VD, VA, VB } },
1962{ "vadduhm", VX(4, 64), VX_MASK, PPCVEC, { VD, VA, VB } },
1963{ "vadduhs", VX(4, 576), VX_MASK, PPCVEC, { VD, VA, VB } },
1964{ "vadduwm", VX(4, 128), VX_MASK, PPCVEC, { VD, VA, VB } },
1965{ "vadduws", VX(4, 640), VX_MASK, PPCVEC, { VD, VA, VB } },
1966{ "vand", VX(4, 1028), VX_MASK, PPCVEC, { VD, VA, VB } },
1967{ "vandc", VX(4, 1092), VX_MASK, PPCVEC, { VD, VA, VB } },
1968{ "vavgsb", VX(4, 1282), VX_MASK, PPCVEC, { VD, VA, VB } },
1969{ "vavgsh", VX(4, 1346), VX_MASK, PPCVEC, { VD, VA, VB } },
1970{ "vavgsw", VX(4, 1410), VX_MASK, PPCVEC, { VD, VA, VB } },
1971{ "vavgub", VX(4, 1026), VX_MASK, PPCVEC, { VD, VA, VB } },
1972{ "vavguh", VX(4, 1090), VX_MASK, PPCVEC, { VD, VA, VB } },
1973{ "vavguw", VX(4, 1154), VX_MASK, PPCVEC, { VD, VA, VB } },
1974{ "vcfsx", VX(4, 842), VX_MASK, PPCVEC, { VD, VB, UIMM } },
1975{ "vcfux", VX(4, 778), VX_MASK, PPCVEC, { VD, VB, UIMM } },
1976{ "vcmpbfp", VXR(4, 966, 0), VXR_MASK, PPCVEC, { VD, VA, VB } },
1977{ "vcmpbfp.", VXR(4, 966, 1), VXR_MASK, PPCVEC, { VD, VA, VB } },
1978{ "vcmpeqfp", VXR(4, 198, 0), VXR_MASK, PPCVEC, { VD, VA, VB } },
1979{ "vcmpeqfp.", VXR(4, 198, 1), VXR_MASK, PPCVEC, { VD, VA, VB } },
1980{ "vcmpequb", VXR(4, 6, 0), VXR_MASK, PPCVEC, { VD, VA, VB } },
1981{ "vcmpequb.", VXR(4, 6, 1), VXR_MASK, PPCVEC, { VD, VA, VB } },
1982{ "vcmpequh", VXR(4, 70, 0), VXR_MASK, PPCVEC, { VD, VA, VB } },
1983{ "vcmpequh.", VXR(4, 70, 1), VXR_MASK, PPCVEC, { VD, VA, VB } },
1984{ "vcmpequw", VXR(4, 134, 0), VXR_MASK, PPCVEC, { VD, VA, VB } },
1985{ "vcmpequw.", VXR(4, 134, 1), VXR_MASK, PPCVEC, { VD, VA, VB } },
1986{ "vcmpgefp", VXR(4, 454, 0), VXR_MASK, PPCVEC, { VD, VA, VB } },
1987{ "vcmpgefp.", VXR(4, 454, 1), VXR_MASK, PPCVEC, { VD, VA, VB } },
1988{ "vcmpgtfp", VXR(4, 710, 0), VXR_MASK, PPCVEC, { VD, VA, VB } },
1989{ "vcmpgtfp.", VXR(4, 710, 1), VXR_MASK, PPCVEC, { VD, VA, VB } },
1990{ "vcmpgtsb", VXR(4, 774, 0), VXR_MASK, PPCVEC, { VD, VA, VB } },
1991{ "vcmpgtsb.", VXR(4, 774, 1), VXR_MASK, PPCVEC, { VD, VA, VB } },
1992{ "vcmpgtsh", VXR(4, 838, 0), VXR_MASK, PPCVEC, { VD, VA, VB } },
1993{ "vcmpgtsh.", VXR(4, 838, 1), VXR_MASK, PPCVEC, { VD, VA, VB } },
1994{ "vcmpgtsw", VXR(4, 902, 0), VXR_MASK, PPCVEC, { VD, VA, VB } },
1995{ "vcmpgtsw.", VXR(4, 902, 1), VXR_MASK, PPCVEC, { VD, VA, VB } },
1996{ "vcmpgtub", VXR(4, 518, 0), VXR_MASK, PPCVEC, { VD, VA, VB } },
1997{ "vcmpgtub.", VXR(4, 518, 1), VXR_MASK, PPCVEC, { VD, VA, VB } },
1998{ "vcmpgtuh", VXR(4, 582, 0), VXR_MASK, PPCVEC, { VD, VA, VB } },
1999{ "vcmpgtuh.", VXR(4, 582, 1), VXR_MASK, PPCVEC, { VD, VA, VB } },
2000{ "vcmpgtuw", VXR(4, 646, 0), VXR_MASK, PPCVEC, { VD, VA, VB } },
2001{ "vcmpgtuw.", VXR(4, 646, 1), VXR_MASK, PPCVEC, { VD, VA, VB } },
2002{ "vctsxs", VX(4, 970), VX_MASK, PPCVEC, { VD, VB, UIMM } },
2003{ "vctuxs", VX(4, 906), VX_MASK, PPCVEC, { VD, VB, UIMM } },
2004{ "vexptefp", VX(4, 394), VX_MASK, PPCVEC, { VD, VB } },
2005{ "vlogefp", VX(4, 458), VX_MASK, PPCVEC, { VD, VB } },
2006{ "vmaddfp", VXA(4, 46), VXA_MASK, PPCVEC, { VD, VA, VC, VB } },
2007{ "vmaxfp", VX(4, 1034), VX_MASK, PPCVEC, { VD, VA, VB } },
2008{ "vmaxsb", VX(4, 258), VX_MASK, PPCVEC, { VD, VA, VB } },
2009{ "vmaxsh", VX(4, 322), VX_MASK, PPCVEC, { VD, VA, VB } },
2010{ "vmaxsw", VX(4, 386), VX_MASK, PPCVEC, { VD, VA, VB } },
2011{ "vmaxub", VX(4, 2), VX_MASK, PPCVEC, { VD, VA, VB } },
2012{ "vmaxuh", VX(4, 66), VX_MASK, PPCVEC, { VD, VA, VB } },
2013{ "vmaxuw", VX(4, 130), VX_MASK, PPCVEC, { VD, VA, VB } },
2014{ "vmhaddshs", VXA(4, 32), VXA_MASK, PPCVEC, { VD, VA, VB, VC } },
2015{ "vmhraddshs", VXA(4, 33), VXA_MASK, PPCVEC, { VD, VA, VB, VC } },
2016{ "vminfp", VX(4, 1098), VX_MASK, PPCVEC, { VD, VA, VB } },
2017{ "vminsb", VX(4, 770), VX_MASK, PPCVEC, { VD, VA, VB } },
2018{ "vminsh", VX(4, 834), VX_MASK, PPCVEC, { VD, VA, VB } },
2019{ "vminsw", VX(4, 898), VX_MASK, PPCVEC, { VD, VA, VB } },
2020{ "vminub", VX(4, 514), VX_MASK, PPCVEC, { VD, VA, VB } },
2021{ "vminuh", VX(4, 578), VX_MASK, PPCVEC, { VD, VA, VB } },
2022{ "vminuw", VX(4, 642), VX_MASK, PPCVEC, { VD, VA, VB } },
2023{ "vmladduhm", VXA(4, 34), VXA_MASK, PPCVEC, { VD, VA, VB, VC } },
2024{ "vmrghb", VX(4, 12), VX_MASK, PPCVEC, { VD, VA, VB } },
2025{ "vmrghh", VX(4, 76), VX_MASK, PPCVEC, { VD, VA, VB } },
2026{ "vmrghw", VX(4, 140), VX_MASK, PPCVEC, { VD, VA, VB } },
2027{ "vmrglb", VX(4, 268), VX_MASK, PPCVEC, { VD, VA, VB } },
2028{ "vmrglh", VX(4, 332), VX_MASK, PPCVEC, { VD, VA, VB } },
2029{ "vmrglw", VX(4, 396), VX_MASK, PPCVEC, { VD, VA, VB } },
2030{ "vmsummbm", VXA(4, 37), VXA_MASK, PPCVEC, { VD, VA, VB, VC } },
2031{ "vmsumshm", VXA(4, 40), VXA_MASK, PPCVEC, { VD, VA, VB, VC } },
2032{ "vmsumshs", VXA(4, 41), VXA_MASK, PPCVEC, { VD, VA, VB, VC } },
2033{ "vmsumubm", VXA(4, 36), VXA_MASK, PPCVEC, { VD, VA, VB, VC } },
2034{ "vmsumuhm", VXA(4, 38), VXA_MASK, PPCVEC, { VD, VA, VB, VC } },
2035{ "vmsumuhs", VXA(4, 39), VXA_MASK, PPCVEC, { VD, VA, VB, VC } },
2036{ "vmulesb", VX(4, 776), VX_MASK, PPCVEC, { VD, VA, VB } },
2037{ "vmulesh", VX(4, 840), VX_MASK, PPCVEC, { VD, VA, VB } },
2038{ "vmuleub", VX(4, 520), VX_MASK, PPCVEC, { VD, VA, VB } },
2039{ "vmuleuh", VX(4, 584), VX_MASK, PPCVEC, { VD, VA, VB } },
2040{ "vmulosb", VX(4, 264), VX_MASK, PPCVEC, { VD, VA, VB } },
2041{ "vmulosh", VX(4, 328), VX_MASK, PPCVEC, { VD, VA, VB } },
2042{ "vmuloub", VX(4, 8), VX_MASK, PPCVEC, { VD, VA, VB } },
2043{ "vmulouh", VX(4, 72), VX_MASK, PPCVEC, { VD, VA, VB } },
2044{ "vnmsubfp", VXA(4, 47), VXA_MASK, PPCVEC, { VD, VA, VC, VB } },
2045{ "vnor", VX(4, 1284), VX_MASK, PPCVEC, { VD, VA, VB } },
2046{ "vor", VX(4, 1156), VX_MASK, PPCVEC, { VD, VA, VB } },
2047{ "vperm", VXA(4, 43), VXA_MASK, PPCVEC, { VD, VA, VB, VC } },
2048{ "vpkpx", VX(4, 782), VX_MASK, PPCVEC, { VD, VA, VB } },
2049{ "vpkshss", VX(4, 398), VX_MASK, PPCVEC, { VD, VA, VB } },
2050{ "vpkshus", VX(4, 270), VX_MASK, PPCVEC, { VD, VA, VB } },
2051{ "vpkswss", VX(4, 462), VX_MASK, PPCVEC, { VD, VA, VB } },
2052{ "vpkswus", VX(4, 334), VX_MASK, PPCVEC, { VD, VA, VB } },
2053{ "vpkuhum", VX(4, 14), VX_MASK, PPCVEC, { VD, VA, VB } },
2054{ "vpkuhus", VX(4, 142), VX_MASK, PPCVEC, { VD, VA, VB } },
2055{ "vpkuwum", VX(4, 78), VX_MASK, PPCVEC, { VD, VA, VB } },
2056{ "vpkuwus", VX(4, 206), VX_MASK, PPCVEC, { VD, VA, VB } },
2057{ "vrefp", VX(4, 266), VX_MASK, PPCVEC, { VD, VB } },
2058{ "vrfim", VX(4, 714), VX_MASK, PPCVEC, { VD, VB } },
2059{ "vrfin", VX(4, 522), VX_MASK, PPCVEC, { VD, VB } },
2060{ "vrfip", VX(4, 650), VX_MASK, PPCVEC, { VD, VB } },
2061{ "vrfiz", VX(4, 586), VX_MASK, PPCVEC, { VD, VB } },
2062{ "vrlb", VX(4, 4), VX_MASK, PPCVEC, { VD, VA, VB } },
2063{ "vrlh", VX(4, 68), VX_MASK, PPCVEC, { VD, VA, VB } },
2064{ "vrlw", VX(4, 132), VX_MASK, PPCVEC, { VD, VA, VB } },
2065{ "vrsqrtefp", VX(4, 330), VX_MASK, PPCVEC, { VD, VB } },
2066{ "vsel", VXA(4, 42), VXA_MASK, PPCVEC, { VD, VA, VB, VC } },
2067{ "vsl", VX(4, 452), VX_MASK, PPCVEC, { VD, VA, VB } },
2068{ "vslb", VX(4, 260), VX_MASK, PPCVEC, { VD, VA, VB } },
2069{ "vsldoi", VXA(4, 44), VXA_MASK, PPCVEC, { VD, VA, VB, SHB } },
2070{ "vslh", VX(4, 324), VX_MASK, PPCVEC, { VD, VA, VB } },
2071{ "vslo", VX(4, 1036), VX_MASK, PPCVEC, { VD, VA, VB } },
2072{ "vslw", VX(4, 388), VX_MASK, PPCVEC, { VD, VA, VB } },
2073{ "vspltb", VX(4, 524), VX_MASK, PPCVEC, { VD, VB, UIMM } },
2074{ "vsplth", VX(4, 588), VX_MASK, PPCVEC, { VD, VB, UIMM } },
2075{ "vspltisb", VX(4, 780), VX_MASK, PPCVEC, { VD, SIMM } },
2076{ "vspltish", VX(4, 844), VX_MASK, PPCVEC, { VD, SIMM } },
2077{ "vspltisw", VX(4, 908), VX_MASK, PPCVEC, { VD, SIMM } },
2078{ "vspltw", VX(4, 652), VX_MASK, PPCVEC, { VD, VB, UIMM } },
2079{ "vsr", VX(4, 708), VX_MASK, PPCVEC, { VD, VA, VB } },
2080{ "vsrab", VX(4, 772), VX_MASK, PPCVEC, { VD, VA, VB } },
2081{ "vsrah", VX(4, 836), VX_MASK, PPCVEC, { VD, VA, VB } },
2082{ "vsraw", VX(4, 900), VX_MASK, PPCVEC, { VD, VA, VB } },
2083{ "vsrb", VX(4, 516), VX_MASK, PPCVEC, { VD, VA, VB } },
2084{ "vsrh", VX(4, 580), VX_MASK, PPCVEC, { VD, VA, VB } },
2085{ "vsro", VX(4, 1100), VX_MASK, PPCVEC, { VD, VA, VB } },
2086{ "vsrw", VX(4, 644), VX_MASK, PPCVEC, { VD, VA, VB } },
2087{ "vsubcuw", VX(4, 1408), VX_MASK, PPCVEC, { VD, VA, VB } },
2088{ "vsubfp", VX(4, 74), VX_MASK, PPCVEC, { VD, VA, VB } },
2089{ "vsubsbs", VX(4, 1792), VX_MASK, PPCVEC, { VD, VA, VB } },
2090{ "vsubshs", VX(4, 1856), VX_MASK, PPCVEC, { VD, VA, VB } },
2091{ "vsubsws", VX(4, 1920), VX_MASK, PPCVEC, { VD, VA, VB } },
2092{ "vsububm", VX(4, 1024), VX_MASK, PPCVEC, { VD, VA, VB } },
2093{ "vsububs", VX(4, 1536), VX_MASK, PPCVEC, { VD, VA, VB } },
2094{ "vsubuhm", VX(4, 1088), VX_MASK, PPCVEC, { VD, VA, VB } },
2095{ "vsubuhs", VX(4, 1600), VX_MASK, PPCVEC, { VD, VA, VB } },
2096{ "vsubuwm", VX(4, 1152), VX_MASK, PPCVEC, { VD, VA, VB } },
2097{ "vsubuws", VX(4, 1664), VX_MASK, PPCVEC, { VD, VA, VB } },
2098{ "vsumsws", VX(4, 1928), VX_MASK, PPCVEC, { VD, VA, VB } },
2099{ "vsum2sws", VX(4, 1672), VX_MASK, PPCVEC, { VD, VA, VB } },
2100{ "vsum4sbs", VX(4, 1800), VX_MASK, PPCVEC, { VD, VA, VB } },
2101{ "vsum4shs", VX(4, 1608), VX_MASK, PPCVEC, { VD, VA, VB } },
2102{ "vsum4ubs", VX(4, 1544), VX_MASK, PPCVEC, { VD, VA, VB } },
2103{ "vupkhpx", VX(4, 846), VX_MASK, PPCVEC, { VD, VB } },
2104{ "vupkhsb", VX(4, 526), VX_MASK, PPCVEC, { VD, VB } },
2105{ "vupkhsh", VX(4, 590), VX_MASK, PPCVEC, { VD, VB } },
2106{ "vupklpx", VX(4, 974), VX_MASK, PPCVEC, { VD, VB } },
2107{ "vupklsb", VX(4, 654), VX_MASK, PPCVEC, { VD, VB } },
2108{ "vupklsh", VX(4, 718), VX_MASK, PPCVEC, { VD, VB } },
2109{ "vxor", VX(4, 1220), VX_MASK, PPCVEC, { VD, VA, VB } },
2110
2111{ "evaddw", VX(4, 512), VX_MASK, PPCSPE, { RS, RA, RB } },
2112{ "evaddiw", VX(4, 514), VX_MASK, PPCSPE, { RS, RB, UIMM } },
2113{ "evsubfw", VX(4, 516), VX_MASK, PPCSPE, { RS, RA, RB } },
2114{ "evsubw", VX(4, 516), VX_MASK, PPCSPE, { RS, RB, RA } },
2115{ "evsubifw", VX(4, 518), VX_MASK, PPCSPE, { RS, UIMM, RB } },
2116{ "evsubiw", VX(4, 518), VX_MASK, PPCSPE, { RS, RB, UIMM } },
2117{ "evabs", VX(4, 520), VX_MASK, PPCSPE, { RS, RA } },
2118{ "evneg", VX(4, 521), VX_MASK, PPCSPE, { RS, RA } },
2119{ "evextsb", VX(4, 522), VX_MASK, PPCSPE, { RS, RA } },
2120{ "evextsh", VX(4, 523), VX_MASK, PPCSPE, { RS, RA } },
2121{ "evrndw", VX(4, 524), VX_MASK, PPCSPE, { RS, RA } },
2122{ "evcntlzw", VX(4, 525), VX_MASK, PPCSPE, { RS, RA } },
2123{ "evcntlsw", VX(4, 526), VX_MASK, PPCSPE, { RS, RA } },
2124
2125{ "brinc", VX(4, 527), VX_MASK, PPCSPE, { RS, RA, RB } },
2126
2127{ "evand", VX(4, 529), VX_MASK, PPCSPE, { RS, RA, RB } },
2128{ "evandc", VX(4, 530), VX_MASK, PPCSPE, { RS, RA, RB } },
2129{ "evmr", VX(4, 535), VX_MASK, PPCSPE, { RS, RA, BBA } },
2130{ "evor", VX(4, 535), VX_MASK, PPCSPE, { RS, RA, RB } },
2131{ "evorc", VX(4, 539), VX_MASK, PPCSPE, { RS, RA, RB } },
2132{ "evxor", VX(4, 534), VX_MASK, PPCSPE, { RS, RA, RB } },
2133{ "eveqv", VX(4, 537), VX_MASK, PPCSPE, { RS, RA, RB } },
2134{ "evnand", VX(4, 542), VX_MASK, PPCSPE, { RS, RA, RB } },
2135{ "evnot", VX(4, 536), VX_MASK, PPCSPE, { RS, RA, BBA } },
2136{ "evnor", VX(4, 536), VX_MASK, PPCSPE, { RS, RA, RB } },
2137
2138{ "evrlw", VX(4, 552), VX_MASK, PPCSPE, { RS, RA, RB } },
2139{ "evrlwi", VX(4, 554), VX_MASK, PPCSPE, { RS, RA, EVUIMM } },
2140{ "evslw", VX(4, 548), VX_MASK, PPCSPE, { RS, RA, RB } },
2141{ "evslwi", VX(4, 550), VX_MASK, PPCSPE, { RS, RA, EVUIMM } },
2142{ "evsrws", VX(4, 545), VX_MASK, PPCSPE, { RS, RA, RB } },
2143{ "evsrwu", VX(4, 544), VX_MASK, PPCSPE, { RS, RA, RB } },
2144{ "evsrwis", VX(4, 547), VX_MASK, PPCSPE, { RS, RA, EVUIMM } },
2145{ "evsrwiu", VX(4, 546), VX_MASK, PPCSPE, { RS, RA, EVUIMM } },
2146{ "evsplati", VX(4, 553), VX_MASK, PPCSPE, { RS, SIMM } },
2147{ "evsplatfi", VX(4, 555), VX_MASK, PPCSPE, { RS, SIMM } },
2148{ "evmergehi", VX(4, 556), VX_MASK, PPCSPE, { RS, RA, RB } },
2149{ "evmergelo", VX(4, 557), VX_MASK, PPCSPE, { RS, RA, RB } },
2150{ "evmergehilo",VX(4,558), VX_MASK, PPCSPE, { RS, RA, RB } },
2151{ "evmergelohi",VX(4,559), VX_MASK, PPCSPE, { RS, RA, RB } },
2152
2153{ "evcmpgts", VX(4, 561), VX_MASK, PPCSPE, { CRFD, RA, RB } },
2154{ "evcmpgtu", VX(4, 560), VX_MASK, PPCSPE, { CRFD, RA, RB } },
2155{ "evcmplts", VX(4, 563), VX_MASK, PPCSPE, { CRFD, RA, RB } },
2156{ "evcmpltu", VX(4, 562), VX_MASK, PPCSPE, { CRFD, RA, RB } },
2157{ "evcmpeq", VX(4, 564), VX_MASK, PPCSPE, { CRFD, RA, RB } },
2158{ "evsel", EVSEL(4,79),EVSEL_MASK, PPCSPE, { RS, RA, RB, CRFS } },
2159
2160{ "evldd", VX(4, 769), VX_MASK, PPCSPE, { RS, EVUIMM_8, RA } },
2161{ "evlddx", VX(4, 768), VX_MASK, PPCSPE, { RS, RA, RB } },
2162{ "evldw", VX(4, 771), VX_MASK, PPCSPE, { RS, EVUIMM_8, RA } },
2163{ "evldwx", VX(4, 770), VX_MASK, PPCSPE, { RS, RA, RB } },
2164{ "evldh", VX(4, 773), VX_MASK, PPCSPE, { RS, EVUIMM_8, RA } },
2165{ "evldhx", VX(4, 772), VX_MASK, PPCSPE, { RS, RA, RB } },
2166{ "evlwhe", VX(4, 785), VX_MASK, PPCSPE, { RS, EVUIMM_4, RA } },
2167{ "evlwhex", VX(4, 784), VX_MASK, PPCSPE, { RS, RA, RB } },
2168{ "evlwhou", VX(4, 789), VX_MASK, PPCSPE, { RS, EVUIMM_4, RA } },
2169{ "evlwhoux", VX(4, 788), VX_MASK, PPCSPE, { RS, RA, RB } },
2170{ "evlwhos", VX(4, 791), VX_MASK, PPCSPE, { RS, EVUIMM_4, RA } },
2171{ "evlwhosx", VX(4, 790), VX_MASK, PPCSPE, { RS, RA, RB } },
2172{ "evlwwsplat",VX(4, 793), VX_MASK, PPCSPE, { RS, EVUIMM_4, RA } },
2173{ "evlwwsplatx",VX(4, 792), VX_MASK, PPCSPE, { RS, RA, RB } },
2174{ "evlwhsplat",VX(4, 797), VX_MASK, PPCSPE, { RS, EVUIMM_4, RA } },
2175{ "evlwhsplatx",VX(4, 796), VX_MASK, PPCSPE, { RS, RA, RB } },
2176{ "evlhhesplat",VX(4, 777), VX_MASK, PPCSPE, { RS, EVUIMM_2, RA } },
2177{ "evlhhesplatx",VX(4, 776), VX_MASK, PPCSPE, { RS, RA, RB } },
2178{ "evlhhousplat",VX(4, 781), VX_MASK, PPCSPE, { RS, EVUIMM_2, RA } },
2179{ "evlhhousplatx",VX(4, 780), VX_MASK, PPCSPE, { RS, RA, RB } },
2180{ "evlhhossplat",VX(4, 783), VX_MASK, PPCSPE, { RS, EVUIMM_2, RA } },
2181{ "evlhhossplatx",VX(4, 782), VX_MASK, PPCSPE, { RS, RA, RB } },
2182
2183{ "evstdd", VX(4, 801), VX_MASK, PPCSPE, { RS, EVUIMM_8, RA } },
2184{ "evstddx", VX(4, 800), VX_MASK, PPCSPE, { RS, RA, RB } },
2185{ "evstdw", VX(4, 803), VX_MASK, PPCSPE, { RS, EVUIMM_8, RA } },
2186{ "evstdwx", VX(4, 802), VX_MASK, PPCSPE, { RS, RA, RB } },
2187{ "evstdh", VX(4, 805), VX_MASK, PPCSPE, { RS, EVUIMM_8, RA } },
2188{ "evstdhx", VX(4, 804), VX_MASK, PPCSPE, { RS, RA, RB } },
2189{ "evstwwe", VX(4, 825), VX_MASK, PPCSPE, { RS, EVUIMM_4, RA } },
2190{ "evstwwex", VX(4, 824), VX_MASK, PPCSPE, { RS, RA, RB } },
2191{ "evstwwo", VX(4, 829), VX_MASK, PPCSPE, { RS, EVUIMM_4, RA } },
2192{ "evstwwox", VX(4, 828), VX_MASK, PPCSPE, { RS, RA, RB } },
2193{ "evstwhe", VX(4, 817), VX_MASK, PPCSPE, { RS, EVUIMM_4, RA } },
2194{ "evstwhex", VX(4, 816), VX_MASK, PPCSPE, { RS, RA, RB } },
2195{ "evstwho", VX(4, 821), VX_MASK, PPCSPE, { RS, EVUIMM_4, RA } },
2196{ "evstwhox", VX(4, 820), VX_MASK, PPCSPE, { RS, RA, RB } },
2197
2198{ "evfsabs", VX(4, 644), VX_MASK, PPCSPE, { RS, RA } },
2199{ "evfsnabs", VX(4, 645), VX_MASK, PPCSPE, { RS, RA } },
2200{ "evfsneg", VX(4, 646), VX_MASK, PPCSPE, { RS, RA } },
2201{ "evfsadd", VX(4, 640), VX_MASK, PPCSPE, { RS, RA, RB } },
2202{ "evfssub", VX(4, 641), VX_MASK, PPCSPE, { RS, RA, RB } },
2203{ "evfsmul", VX(4, 648), VX_MASK, PPCSPE, { RS, RA, RB } },
2204{ "evfsdiv", VX(4, 649), VX_MASK, PPCSPE, { RS, RA, RB } },
2205{ "evfscmpgt", VX(4, 652), VX_MASK, PPCSPE, { CRFD, RA, RB } },
2206{ "evfscmplt", VX(4, 653), VX_MASK, PPCSPE, { CRFD, RA, RB } },
2207{ "evfscmpeq", VX(4, 654), VX_MASK, PPCSPE, { CRFD, RA, RB } },
2208{ "evfststgt", VX(4, 668), VX_MASK, PPCSPE, { CRFD, RA, RB } },
2209{ "evfststlt", VX(4, 669), VX_MASK, PPCSPE, { CRFD, RA, RB } },
2210{ "evfststeq", VX(4, 670), VX_MASK, PPCSPE, { CRFD, RA, RB } },
2211{ "evfscfui", VX(4, 656), VX_MASK, PPCSPE, { RS, RB } },
2212{ "evfsctuiz", VX(4, 664), VX_MASK, PPCSPE, { RS, RB } },
2213{ "evfscfsi", VX(4, 657), VX_MASK, PPCSPE, { RS, RB } },
2214{ "evfscfuf", VX(4, 658), VX_MASK, PPCSPE, { RS, RB } },
2215{ "evfscfsf", VX(4, 659), VX_MASK, PPCSPE, { RS, RB } },
2216{ "evfsctui", VX(4, 660), VX_MASK, PPCSPE, { RS, RB } },
2217{ "evfsctsi", VX(4, 661), VX_MASK, PPCSPE, { RS, RB } },
2218{ "evfsctsiz", VX(4, 666), VX_MASK, PPCSPE, { RS, RB } },
2219{ "evfsctuf", VX(4, 662), VX_MASK, PPCSPE, { RS, RB } },
2220{ "evfsctsf", VX(4, 663), VX_MASK, PPCSPE, { RS, RB } },
2221
2222{ "efsabs", VX(4, 708), VX_MASK, PPCEFS, { RS, RA } },
2223{ "efsnabs", VX(4, 709), VX_MASK, PPCEFS, { RS, RA } },
2224{ "efsneg", VX(4, 710), VX_MASK, PPCEFS, { RS, RA } },
2225{ "efsadd", VX(4, 704), VX_MASK, PPCEFS, { RS, RA, RB } },
2226{ "efssub", VX(4, 705), VX_MASK, PPCEFS, { RS, RA, RB } },
2227{ "efsmul", VX(4, 712), VX_MASK, PPCEFS, { RS, RA, RB } },
2228{ "efsdiv", VX(4, 713), VX_MASK, PPCEFS, { RS, RA, RB } },
2229{ "efscmpgt", VX(4, 716), VX_MASK, PPCEFS, { CRFD, RA, RB } },
2230{ "efscmplt", VX(4, 717), VX_MASK, PPCEFS, { CRFD, RA, RB } },
2231{ "efscmpeq", VX(4, 718), VX_MASK, PPCEFS, { CRFD, RA, RB } },
2232{ "efststgt", VX(4, 732), VX_MASK, PPCEFS, { CRFD, RA, RB } },
2233{ "efststlt", VX(4, 733), VX_MASK, PPCEFS, { CRFD, RA, RB } },
2234{ "efststeq", VX(4, 734), VX_MASK, PPCEFS, { CRFD, RA, RB } },
2235{ "efscfui", VX(4, 720), VX_MASK, PPCEFS, { RS, RB } },
2236{ "efsctuiz", VX(4, 728), VX_MASK, PPCEFS, { RS, RB } },
2237{ "efscfsi", VX(4, 721), VX_MASK, PPCEFS, { RS, RB } },
2238{ "efscfuf", VX(4, 722), VX_MASK, PPCEFS, { RS, RB } },
2239{ "efscfsf", VX(4, 723), VX_MASK, PPCEFS, { RS, RB } },
2240{ "efsctui", VX(4, 724), VX_MASK, PPCEFS, { RS, RB } },
2241{ "efsctsi", VX(4, 725), VX_MASK, PPCEFS, { RS, RB } },
2242{ "efsctsiz", VX(4, 730), VX_MASK, PPCEFS, { RS, RB } },
2243{ "efsctuf", VX(4, 726), VX_MASK, PPCEFS, { RS, RB } },
2244{ "efsctsf", VX(4, 727), VX_MASK, PPCEFS, { RS, RB } },
2245
2246{ "evmhossf", VX(4, 1031), VX_MASK, PPCSPE, { RS, RA, RB } },
2247{ "evmhossfa", VX(4, 1063), VX_MASK, PPCSPE, { RS, RA, RB } },
2248{ "evmhosmf", VX(4, 1039), VX_MASK, PPCSPE, { RS, RA, RB } },
2249{ "evmhosmfa", VX(4, 1071), VX_MASK, PPCSPE, { RS, RA, RB } },
2250{ "evmhosmi", VX(4, 1037), VX_MASK, PPCSPE, { RS, RA, RB } },
2251{ "evmhosmia", VX(4, 1069), VX_MASK, PPCSPE, { RS, RA, RB } },
2252{ "evmhoumi", VX(4, 1036), VX_MASK, PPCSPE, { RS, RA, RB } },
2253{ "evmhoumia", VX(4, 1068), VX_MASK, PPCSPE, { RS, RA, RB } },
2254{ "evmhessf", VX(4, 1027), VX_MASK, PPCSPE, { RS, RA, RB } },
2255{ "evmhessfa", VX(4, 1059), VX_MASK, PPCSPE, { RS, RA, RB } },
2256{ "evmhesmf", VX(4, 1035), VX_MASK, PPCSPE, { RS, RA, RB } },
2257{ "evmhesmfa", VX(4, 1067), VX_MASK, PPCSPE, { RS, RA, RB } },
2258{ "evmhesmi", VX(4, 1033), VX_MASK, PPCSPE, { RS, RA, RB } },
2259{ "evmhesmia", VX(4, 1065), VX_MASK, PPCSPE, { RS, RA, RB } },
2260{ "evmheumi", VX(4, 1032), VX_MASK, PPCSPE, { RS, RA, RB } },
2261{ "evmheumia", VX(4, 1064), VX_MASK, PPCSPE, { RS, RA, RB } },
2262
2263{ "evmhossfaaw",VX(4, 1287), VX_MASK, PPCSPE, { RS, RA, RB } },
2264{ "evmhossiaaw",VX(4, 1285), VX_MASK, PPCSPE, { RS, RA, RB } },
2265{ "evmhosmfaaw",VX(4, 1295), VX_MASK, PPCSPE, { RS, RA, RB } },
2266{ "evmhosmiaaw",VX(4, 1293), VX_MASK, PPCSPE, { RS, RA, RB } },
2267{ "evmhousiaaw",VX(4, 1284), VX_MASK, PPCSPE, { RS, RA, RB } },
2268{ "evmhoumiaaw",VX(4, 1292), VX_MASK, PPCSPE, { RS, RA, RB } },
2269{ "evmhessfaaw",VX(4, 1283), VX_MASK, PPCSPE, { RS, RA, RB } },
2270{ "evmhessiaaw",VX(4, 1281), VX_MASK, PPCSPE, { RS, RA, RB } },
2271{ "evmhesmfaaw",VX(4, 1291), VX_MASK, PPCSPE, { RS, RA, RB } },
2272{ "evmhesmiaaw",VX(4, 1289), VX_MASK, PPCSPE, { RS, RA, RB } },
2273{ "evmheusiaaw",VX(4, 1280), VX_MASK, PPCSPE, { RS, RA, RB } },
2274{ "evmheumiaaw",VX(4, 1288), VX_MASK, PPCSPE, { RS, RA, RB } },
2275
2276{ "evmhossfanw",VX(4, 1415), VX_MASK, PPCSPE, { RS, RA, RB } },
2277{ "evmhossianw",VX(4, 1413), VX_MASK, PPCSPE, { RS, RA, RB } },
2278{ "evmhosmfanw",VX(4, 1423), VX_MASK, PPCSPE, { RS, RA, RB } },
2279{ "evmhosmianw",VX(4, 1421), VX_MASK, PPCSPE, { RS, RA, RB } },
2280{ "evmhousianw",VX(4, 1412), VX_MASK, PPCSPE, { RS, RA, RB } },
2281{ "evmhoumianw",VX(4, 1420), VX_MASK, PPCSPE, { RS, RA, RB } },
2282{ "evmhessfanw",VX(4, 1411), VX_MASK, PPCSPE, { RS, RA, RB } },
2283{ "evmhessianw",VX(4, 1409), VX_MASK, PPCSPE, { RS, RA, RB } },
2284{ "evmhesmfanw",VX(4, 1419), VX_MASK, PPCSPE, { RS, RA, RB } },
2285{ "evmhesmianw",VX(4, 1417), VX_MASK, PPCSPE, { RS, RA, RB } },
2286{ "evmheusianw",VX(4, 1408), VX_MASK, PPCSPE, { RS, RA, RB } },
2287{ "evmheumianw",VX(4, 1416), VX_MASK, PPCSPE, { RS, RA, RB } },
2288
2289{ "evmhogsmfaa",VX(4, 1327), VX_MASK, PPCSPE, { RS, RA, RB } },
2290{ "evmhogsmiaa",VX(4, 1325), VX_MASK, PPCSPE, { RS, RA, RB } },
2291{ "evmhogumiaa",VX(4, 1324), VX_MASK, PPCSPE, { RS, RA, RB } },
2292{ "evmhegsmfaa",VX(4, 1323), VX_MASK, PPCSPE, { RS, RA, RB } },
2293{ "evmhegsmiaa",VX(4, 1321), VX_MASK, PPCSPE, { RS, RA, RB } },
2294{ "evmhegumiaa",VX(4, 1320), VX_MASK, PPCSPE, { RS, RA, RB } },
2295
2296{ "evmhogsmfan",VX(4, 1455), VX_MASK, PPCSPE, { RS, RA, RB } },
2297{ "evmhogsmian",VX(4, 1453), VX_MASK, PPCSPE, { RS, RA, RB } },
2298{ "evmhogumian",VX(4, 1452), VX_MASK, PPCSPE, { RS, RA, RB } },
2299{ "evmhegsmfan",VX(4, 1451), VX_MASK, PPCSPE, { RS, RA, RB } },
2300{ "evmhegsmian",VX(4, 1449), VX_MASK, PPCSPE, { RS, RA, RB } },
2301{ "evmhegumian",VX(4, 1448), VX_MASK, PPCSPE, { RS, RA, RB } },
2302
2303{ "evmwhssf", VX(4, 1095), VX_MASK, PPCSPE, { RS, RA, RB } },
2304{ "evmwhssfa", VX(4, 1127), VX_MASK, PPCSPE, { RS, RA, RB } },
2305{ "evmwhsmf", VX(4, 1103), VX_MASK, PPCSPE, { RS, RA, RB } },
2306{ "evmwhsmfa", VX(4, 1135), VX_MASK, PPCSPE, { RS, RA, RB } },
2307{ "evmwhsmi", VX(4, 1101), VX_MASK, PPCSPE, { RS, RA, RB } },
2308{ "evmwhsmia", VX(4, 1133), VX_MASK, PPCSPE, { RS, RA, RB } },
2309{ "evmwhumi", VX(4, 1100), VX_MASK, PPCSPE, { RS, RA, RB } },
2310{ "evmwhumia", VX(4, 1132), VX_MASK, PPCSPE, { RS, RA, RB } },
2311
2312{ "evmwlumi", VX(4, 1096), VX_MASK, PPCSPE, { RS, RA, RB } },
2313{ "evmwlumia", VX(4, 1128), VX_MASK, PPCSPE, { RS, RA, RB } },
2314
2315{ "evmwlssiaaw",VX(4, 1345), VX_MASK, PPCSPE, { RS, RA, RB } },
2316{ "evmwlsmiaaw",VX(4, 1353), VX_MASK, PPCSPE, { RS, RA, RB } },
2317{ "evmwlusiaaw",VX(4, 1344), VX_MASK, PPCSPE, { RS, RA, RB } },
2318{ "evmwlumiaaw",VX(4, 1352), VX_MASK, PPCSPE, { RS, RA, RB } },
2319
2320{ "evmwlssianw",VX(4, 1473), VX_MASK, PPCSPE, { RS, RA, RB } },
2321{ "evmwlsmianw",VX(4, 1481), VX_MASK, PPCSPE, { RS, RA, RB } },
2322{ "evmwlusianw",VX(4, 1472), VX_MASK, PPCSPE, { RS, RA, RB } },
2323{ "evmwlumianw",VX(4, 1480), VX_MASK, PPCSPE, { RS, RA, RB } },
2324
2325{ "evmwssf", VX(4, 1107), VX_MASK, PPCSPE, { RS, RA, RB } },
2326{ "evmwssfa", VX(4, 1139), VX_MASK, PPCSPE, { RS, RA, RB } },
2327{ "evmwsmf", VX(4, 1115), VX_MASK, PPCSPE, { RS, RA, RB } },
2328{ "evmwsmfa", VX(4, 1147), VX_MASK, PPCSPE, { RS, RA, RB } },
2329{ "evmwsmi", VX(4, 1113), VX_MASK, PPCSPE, { RS, RA, RB } },
2330{ "evmwsmia", VX(4, 1145), VX_MASK, PPCSPE, { RS, RA, RB } },
2331{ "evmwumi", VX(4, 1112), VX_MASK, PPCSPE, { RS, RA, RB } },
2332{ "evmwumia", VX(4, 1144), VX_MASK, PPCSPE, { RS, RA, RB } },
2333
2334{ "evmwssfaa", VX(4, 1363), VX_MASK, PPCSPE, { RS, RA, RB } },
2335{ "evmwsmfaa", VX(4, 1371), VX_MASK, PPCSPE, { RS, RA, RB } },
2336{ "evmwsmiaa", VX(4, 1369), VX_MASK, PPCSPE, { RS, RA, RB } },
2337{ "evmwumiaa", VX(4, 1368), VX_MASK, PPCSPE, { RS, RA, RB } },
2338
2339{ "evmwssfan", VX(4, 1491), VX_MASK, PPCSPE, { RS, RA, RB } },
2340{ "evmwsmfan", VX(4, 1499), VX_MASK, PPCSPE, { RS, RA, RB } },
2341{ "evmwsmian", VX(4, 1497), VX_MASK, PPCSPE, { RS, RA, RB } },
2342{ "evmwumian", VX(4, 1496), VX_MASK, PPCSPE, { RS, RA, RB } },
2343
2344{ "evaddssiaaw",VX(4, 1217), VX_MASK, PPCSPE, { RS, RA } },
2345{ "evaddsmiaaw",VX(4, 1225), VX_MASK, PPCSPE, { RS, RA } },
2346{ "evaddusiaaw",VX(4, 1216), VX_MASK, PPCSPE, { RS, RA } },
2347{ "evaddumiaaw",VX(4, 1224), VX_MASK, PPCSPE, { RS, RA } },
2348
2349{ "evsubfssiaaw",VX(4, 1219), VX_MASK, PPCSPE, { RS, RA } },
2350{ "evsubfsmiaaw",VX(4, 1227), VX_MASK, PPCSPE, { RS, RA } },
2351{ "evsubfusiaaw",VX(4, 1218), VX_MASK, PPCSPE, { RS, RA } },
2352{ "evsubfumiaaw",VX(4, 1226), VX_MASK, PPCSPE, { RS, RA } },
2353
2354{ "evmra", VX(4, 1220), VX_MASK, PPCSPE, { RS, RA } },
2355
2356{ "evdivws", VX(4, 1222), VX_MASK, PPCSPE, { RS, RA, RB } },
2357{ "evdivwu", VX(4, 1223), VX_MASK, PPCSPE, { RS, RA, RB } },
2358
2359{ "mulli", OP(7), OP_MASK, PPCCOM, { RT, RA, SI } },
2360{ "muli", OP(7), OP_MASK, PWRCOM, { RT, RA, SI } },
2361
2362{ "subfic", OP(8), OP_MASK, PPCCOM, { RT, RA, SI } },
2363{ "sfi", OP(8), OP_MASK, PWRCOM, { RT, RA, SI } },
2364
2365{ "dozi", OP(9), OP_MASK, M601, { RT, RA, SI } },
2366
2367{ "bce", B(9,0,0), B_MASK, BOOKE64, { BO, BI, BD } },
2368{ "bcel", B(9,0,1), B_MASK, BOOKE64, { BO, BI, BD } },
2369{ "bcea", B(9,1,0), B_MASK, BOOKE64, { BO, BI, BDA } },
2370{ "bcela", B(9,1,1), B_MASK, BOOKE64, { BO, BI, BDA } },
2371
2372{ "cmplwi", OPL(10,0), OPL_MASK, PPCCOM, { OBF, RA, UI } },
2373{ "cmpldi", OPL(10,1), OPL_MASK, PPC64, { OBF, RA, UI } },
2374{ "cmpli", OP(10), OP_MASK, PPC, { BF, L, RA, UI } },
2375{ "cmpli", OP(10), OP_MASK, PWRCOM, { BF, RA, UI } },
2376
2377{ "cmpwi", OPL(11,0), OPL_MASK, PPCCOM, { OBF, RA, SI } },
2378{ "cmpdi", OPL(11,1), OPL_MASK, PPC64, { OBF, RA, SI } },
2379{ "cmpi", OP(11), OP_MASK, PPC, { BF, L, RA, SI } },
2380{ "cmpi", OP(11), OP_MASK, PWRCOM, { BF, RA, SI } },
2381
2382{ "addic", OP(12), OP_MASK, PPCCOM, { RT, RA, SI } },
2383{ "ai", OP(12), OP_MASK, PWRCOM, { RT, RA, SI } },
2384{ "subic", OP(12), OP_MASK, PPCCOM, { RT, RA, NSI } },
2385
2386{ "addic.", OP(13), OP_MASK, PPCCOM, { RT, RA, SI } },
2387{ "ai.", OP(13), OP_MASK, PWRCOM, { RT, RA, SI } },
2388{ "subic.", OP(13), OP_MASK, PPCCOM, { RT, RA, NSI } },
2389
2390{ "li", OP(14), DRA_MASK, PPCCOM, { RT, SI } },
2391{ "lil", OP(14), DRA_MASK, PWRCOM, { RT, SI } },
2392{ "addi", OP(14), OP_MASK, PPCCOM, { RT, RA, SI } },
2393{ "cal", OP(14), OP_MASK, PWRCOM, { RT, D, RA } },
2394{ "subi", OP(14), OP_MASK, PPCCOM, { RT, RA, NSI } },
2395{ "la", OP(14), OP_MASK, PPCCOM, { RT, D, RA } },
2396
2397{ "lis", OP(15), DRA_MASK, PPCCOM, { RT, SISIGNOPT } },
2398{ "liu", OP(15), DRA_MASK, PWRCOM, { RT, SISIGNOPT } },
2399{ "addis", OP(15), OP_MASK, PPCCOM, { RT,RA,SISIGNOPT } },
2400{ "cau", OP(15), OP_MASK, PWRCOM, { RT,RA,SISIGNOPT } },
2401{ "subis", OP(15), OP_MASK, PPCCOM, { RT, RA, NSI } },
2402
2403{ "bdnz-", BBO(16,BODNZ,0,0), BBOATBI_MASK, PPCCOM, { BDM } },
2404{ "bdnz+", BBO(16,BODNZ,0,0), BBOATBI_MASK, PPCCOM, { BDP } },
2405{ "bdnz", BBO(16,BODNZ,0,0), BBOATBI_MASK, PPCCOM, { BD } },
2406{ "bdn", BBO(16,BODNZ,0,0), BBOATBI_MASK, PWRCOM, { BD } },
2407{ "bdnzl-", BBO(16,BODNZ,0,1), BBOATBI_MASK, PPCCOM, { BDM } },
2408{ "bdnzl+", BBO(16,BODNZ,0,1), BBOATBI_MASK, PPCCOM, { BDP } },
2409{ "bdnzl", BBO(16,BODNZ,0,1), BBOATBI_MASK, PPCCOM, { BD } },
2410{ "bdnl", BBO(16,BODNZ,0,1), BBOATBI_MASK, PWRCOM, { BD } },
2411{ "bdnza-", BBO(16,BODNZ,1,0), BBOATBI_MASK, PPCCOM, { BDMA } },
2412{ "bdnza+", BBO(16,BODNZ,1,0), BBOATBI_MASK, PPCCOM, { BDPA } },
2413{ "bdnza", BBO(16,BODNZ,1,0), BBOATBI_MASK, PPCCOM, { BDA } },
2414{ "bdna", BBO(16,BODNZ,1,0), BBOATBI_MASK, PWRCOM, { BDA } },
2415{ "bdnzla-", BBO(16,BODNZ,1,1), BBOATBI_MASK, PPCCOM, { BDMA } },
2416{ "bdnzla+", BBO(16,BODNZ,1,1), BBOATBI_MASK, PPCCOM, { BDPA } },
2417{ "bdnzla", BBO(16,BODNZ,1,1), BBOATBI_MASK, PPCCOM, { BDA } },
2418{ "bdnla", BBO(16,BODNZ,1,1), BBOATBI_MASK, PWRCOM, { BDA } },
2419{ "bdz-", BBO(16,BODZ,0,0), BBOATBI_MASK, PPCCOM, { BDM } },
2420{ "bdz+", BBO(16,BODZ,0,0), BBOATBI_MASK, PPCCOM, { BDP } },
2421{ "bdz", BBO(16,BODZ,0,0), BBOATBI_MASK, COM, { BD } },
2422{ "bdzl-", BBO(16,BODZ,0,1), BBOATBI_MASK, PPCCOM, { BDM } },
2423{ "bdzl+", BBO(16,BODZ,0,1), BBOATBI_MASK, PPCCOM, { BDP } },
2424{ "bdzl", BBO(16,BODZ,0,1), BBOATBI_MASK, COM, { BD } },
2425{ "bdza-", BBO(16,BODZ,1,0), BBOATBI_MASK, PPCCOM, { BDMA } },
2426{ "bdza+", BBO(16,BODZ,1,0), BBOATBI_MASK, PPCCOM, { BDPA } },
2427{ "bdza", BBO(16,BODZ,1,0), BBOATBI_MASK, COM, { BDA } },
2428{ "bdzla-", BBO(16,BODZ,1,1), BBOATBI_MASK, PPCCOM, { BDMA } },
2429{ "bdzla+", BBO(16,BODZ,1,1), BBOATBI_MASK, PPCCOM, { BDPA } },
2430{ "bdzla", BBO(16,BODZ,1,1), BBOATBI_MASK, COM, { BDA } },
2431{ "blt-", BBOCB(16,BOT,CBLT,0,0), BBOATCB_MASK, PPCCOM, { CR, BDM } },
2432{ "blt+", BBOCB(16,BOT,CBLT,0,0), BBOATCB_MASK, PPCCOM, { CR, BDP } },
2433{ "blt", BBOCB(16,BOT,CBLT,0,0), BBOATCB_MASK, COM, { CR, BD } },
2434{ "bltl-", BBOCB(16,BOT,CBLT,0,1), BBOATCB_MASK, PPCCOM, { CR, BDM } },
2435{ "bltl+", BBOCB(16,BOT,CBLT,0,1), BBOATCB_MASK, PPCCOM, { CR, BDP } },
2436{ "bltl", BBOCB(16,BOT,CBLT,0,1), BBOATCB_MASK, COM, { CR, BD } },
2437{ "blta-", BBOCB(16,BOT,CBLT,1,0), BBOATCB_MASK, PPCCOM, { CR, BDMA } },
2438{ "blta+", BBOCB(16,BOT,CBLT,1,0), BBOATCB_MASK, PPCCOM, { CR, BDPA } },
2439{ "blta", BBOCB(16,BOT,CBLT,1,0), BBOATCB_MASK, COM, { CR, BDA } },
2440{ "bltla-", BBOCB(16,BOT,CBLT,1,1), BBOATCB_MASK, PPCCOM, { CR, BDMA } },
2441{ "bltla+", BBOCB(16,BOT,CBLT,1,1), BBOATCB_MASK, PPCCOM, { CR, BDPA } },
2442{ "bltla", BBOCB(16,BOT,CBLT,1,1), BBOATCB_MASK, COM, { CR, BDA } },
2443{ "bgt-", BBOCB(16,BOT,CBGT,0,0), BBOATCB_MASK, PPCCOM, { CR, BDM } },
2444{ "bgt+", BBOCB(16,BOT,CBGT,0,0), BBOATCB_MASK, PPCCOM, { CR, BDP } },
2445{ "bgt", BBOCB(16,BOT,CBGT,0,0), BBOATCB_MASK, COM, { CR, BD } },
2446{ "bgtl-", BBOCB(16,BOT,CBGT,0,1), BBOATCB_MASK, PPCCOM, { CR, BDM } },
2447{ "bgtl+", BBOCB(16,BOT,CBGT,0,1), BBOATCB_MASK, PPCCOM, { CR, BDP } },
2448{ "bgtl", BBOCB(16,BOT,CBGT,0,1), BBOATCB_MASK, COM, { CR, BD } },
2449{ "bgta-", BBOCB(16,BOT,CBGT,1,0), BBOATCB_MASK, PPCCOM, { CR, BDMA } },
2450{ "bgta+", BBOCB(16,BOT,CBGT,1,0), BBOATCB_MASK, PPCCOM, { CR, BDPA } },
2451{ "bgta", BBOCB(16,BOT,CBGT,1,0), BBOATCB_MASK, COM, { CR, BDA } },
2452{ "bgtla-", BBOCB(16,BOT,CBGT,1,1), BBOATCB_MASK, PPCCOM, { CR, BDMA } },
2453{ "bgtla+", BBOCB(16,BOT,CBGT,1,1), BBOATCB_MASK, PPCCOM, { CR, BDPA } },
2454{ "bgtla", BBOCB(16,BOT,CBGT,1,1), BBOATCB_MASK, COM, { CR, BDA } },
2455{ "beq-", BBOCB(16,BOT,CBEQ,0,0), BBOATCB_MASK, PPCCOM, { CR, BDM } },
2456{ "beq+", BBOCB(16,BOT,CBEQ,0,0), BBOATCB_MASK, PPCCOM, { CR, BDP } },
2457{ "beq", BBOCB(16,BOT,CBEQ,0,0), BBOATCB_MASK, COM, { CR, BD } },
2458{ "beql-", BBOCB(16,BOT,CBEQ,0,1), BBOATCB_MASK, PPCCOM, { CR, BDM } },
2459{ "beql+", BBOCB(16,BOT,CBEQ,0,1), BBOATCB_MASK, PPCCOM, { CR, BDP } },
2460{ "beql", BBOCB(16,BOT,CBEQ,0,1), BBOATCB_MASK, COM, { CR, BD } },
2461{ "beqa-", BBOCB(16,BOT,CBEQ,1,0), BBOATCB_MASK, PPCCOM, { CR, BDMA } },
2462{ "beqa+", BBOCB(16,BOT,CBEQ,1,0), BBOATCB_MASK, PPCCOM, { CR, BDPA } },
2463{ "beqa", BBOCB(16,BOT,CBEQ,1,0), BBOATCB_MASK, COM, { CR, BDA } },
2464{ "beqla-", BBOCB(16,BOT,CBEQ,1,1), BBOATCB_MASK, PPCCOM, { CR, BDMA } },
2465{ "beqla+", BBOCB(16,BOT,CBEQ,1,1), BBOATCB_MASK, PPCCOM, { CR, BDPA } },
2466{ "beqla", BBOCB(16,BOT,CBEQ,1,1), BBOATCB_MASK, COM, { CR, BDA } },
2467{ "bso-", BBOCB(16,BOT,CBSO,0,0), BBOATCB_MASK, PPCCOM, { CR, BDM } },
2468{ "bso+", BBOCB(16,BOT,CBSO,0,0), BBOATCB_MASK, PPCCOM, { CR, BDP } },
2469{ "bso", BBOCB(16,BOT,CBSO,0,0), BBOATCB_MASK, COM, { CR, BD } },
2470{ "bsol-", BBOCB(16,BOT,CBSO,0,1), BBOATCB_MASK, PPCCOM, { CR, BDM } },
2471{ "bsol+", BBOCB(16,BOT,CBSO,0,1), BBOATCB_MASK, PPCCOM, { CR, BDP } },
2472{ "bsol", BBOCB(16,BOT,CBSO,0,1), BBOATCB_MASK, COM, { CR, BD } },
2473{ "bsoa-", BBOCB(16,BOT,CBSO,1,0), BBOATCB_MASK, PPCCOM, { CR, BDMA } },
2474{ "bsoa+", BBOCB(16,BOT,CBSO,1,0), BBOATCB_MASK, PPCCOM, { CR, BDPA } },
2475{ "bsoa", BBOCB(16,BOT,CBSO,1,0), BBOATCB_MASK, COM, { CR, BDA } },
2476{ "bsola-", BBOCB(16,BOT,CBSO,1,1), BBOATCB_MASK, PPCCOM, { CR, BDMA } },
2477{ "bsola+", BBOCB(16,BOT,CBSO,1,1), BBOATCB_MASK, PPCCOM, { CR, BDPA } },
2478{ "bsola", BBOCB(16,BOT,CBSO,1,1), BBOATCB_MASK, COM, { CR, BDA } },
2479{ "bun-", BBOCB(16,BOT,CBSO,0,0), BBOATCB_MASK, PPCCOM, { CR, BDM } },
2480{ "bun+", BBOCB(16,BOT,CBSO,0,0), BBOATCB_MASK, PPCCOM, { CR, BDP } },
2481{ "bun", BBOCB(16,BOT,CBSO,0,0), BBOATCB_MASK, PPCCOM, { CR, BD } },
2482{ "bunl-", BBOCB(16,BOT,CBSO,0,1), BBOATCB_MASK, PPCCOM, { CR, BDM } },
2483{ "bunl+", BBOCB(16,BOT,CBSO,0,1), BBOATCB_MASK, PPCCOM, { CR, BDP } },
2484{ "bunl", BBOCB(16,BOT,CBSO,0,1), BBOATCB_MASK, PPCCOM, { CR, BD } },
2485{ "buna-", BBOCB(16,BOT,CBSO,1,0), BBOATCB_MASK, PPCCOM, { CR, BDMA } },
2486{ "buna+", BBOCB(16,BOT,CBSO,1,0), BBOATCB_MASK, PPCCOM, { CR, BDPA } },
2487{ "buna", BBOCB(16,BOT,CBSO,1,0), BBOATCB_MASK, PPCCOM, { CR, BDA } },
2488{ "bunla-", BBOCB(16,BOT,CBSO,1,1), BBOATCB_MASK, PPCCOM, { CR, BDMA } },
2489{ "bunla+", BBOCB(16,BOT,CBSO,1,1), BBOATCB_MASK, PPCCOM, { CR, BDPA } },
2490{ "bunla", BBOCB(16,BOT,CBSO,1,1), BBOATCB_MASK, PPCCOM, { CR, BDA } },
2491{ "bge-", BBOCB(16,BOF,CBLT,0,0), BBOATCB_MASK, PPCCOM, { CR, BDM } },
2492{ "bge+", BBOCB(16,BOF,CBLT,0,0), BBOATCB_MASK, PPCCOM, { CR, BDP } },
2493{ "bge", BBOCB(16,BOF,CBLT,0,0), BBOATCB_MASK, COM, { CR, BD } },
2494{ "bgel-", BBOCB(16,BOF,CBLT,0,1), BBOATCB_MASK, PPCCOM, { CR, BDM } },
2495{ "bgel+", BBOCB(16,BOF,CBLT,0,1), BBOATCB_MASK, PPCCOM, { CR, BDP } },
2496{ "bgel", BBOCB(16,BOF,CBLT,0,1), BBOATCB_MASK, COM, { CR, BD } },
2497{ "bgea-", BBOCB(16,BOF,CBLT,1,0), BBOATCB_MASK, PPCCOM, { CR, BDMA } },
2498{ "bgea+", BBOCB(16,BOF,CBLT,1,0), BBOATCB_MASK, PPCCOM, { CR, BDPA } },
2499{ "bgea", BBOCB(16,BOF,CBLT,1,0), BBOATCB_MASK, COM, { CR, BDA } },
2500{ "bgela-", BBOCB(16,BOF,CBLT,1,1), BBOATCB_MASK, PPCCOM, { CR, BDMA } },
2501{ "bgela+", BBOCB(16,BOF,CBLT,1,1), BBOATCB_MASK, PPCCOM, { CR, BDPA } },
2502{ "bgela", BBOCB(16,BOF,CBLT,1,1), BBOATCB_MASK, COM, { CR, BDA } },
2503{ "bnl-", BBOCB(16,BOF,CBLT,0,0), BBOATCB_MASK, PPCCOM, { CR, BDM } },
2504{ "bnl+", BBOCB(16,BOF,CBLT,0,0), BBOATCB_MASK, PPCCOM, { CR, BDP } },
2505{ "bnl", BBOCB(16,BOF,CBLT,0,0), BBOATCB_MASK, COM, { CR, BD } },
2506{ "bnll-", BBOCB(16,BOF,CBLT,0,1), BBOATCB_MASK, PPCCOM, { CR, BDM } },
2507{ "bnll+", BBOCB(16,BOF,CBLT,0,1), BBOATCB_MASK, PPCCOM, { CR, BDP } },
2508{ "bnll", BBOCB(16,BOF,CBLT,0,1), BBOATCB_MASK, COM, { CR, BD } },
2509{ "bnla-", BBOCB(16,BOF,CBLT,1,0), BBOATCB_MASK, PPCCOM, { CR, BDMA } },
2510{ "bnla+", BBOCB(16,BOF,CBLT,1,0), BBOATCB_MASK, PPCCOM, { CR, BDPA } },
2511{ "bnla", BBOCB(16,BOF,CBLT,1,0), BBOATCB_MASK, COM, { CR, BDA } },
2512{ "bnlla-", BBOCB(16,BOF,CBLT,1,1), BBOATCB_MASK, PPCCOM, { CR, BDMA } },
2513{ "bnlla+", BBOCB(16,BOF,CBLT,1,1), BBOATCB_MASK, PPCCOM, { CR, BDPA } },
2514{ "bnlla", BBOCB(16,BOF,CBLT,1,1), BBOATCB_MASK, COM, { CR, BDA } },
2515{ "ble-", BBOCB(16,BOF,CBGT,0,0), BBOATCB_MASK, PPCCOM, { CR, BDM } },
2516{ "ble+", BBOCB(16,BOF,CBGT,0,0), BBOATCB_MASK, PPCCOM, { CR, BDP } },
2517{ "ble", BBOCB(16,BOF,CBGT,0,0), BBOATCB_MASK, COM, { CR, BD } },
2518{ "blel-", BBOCB(16,BOF,CBGT,0,1), BBOATCB_MASK, PPCCOM, { CR, BDM } },
2519{ "blel+", BBOCB(16,BOF,CBGT,0,1), BBOATCB_MASK, PPCCOM, { CR, BDP } },
2520{ "blel", BBOCB(16,BOF,CBGT,0,1), BBOATCB_MASK, COM, { CR, BD } },
2521{ "blea-", BBOCB(16,BOF,CBGT,1,0), BBOATCB_MASK, PPCCOM, { CR, BDMA } },
2522{ "blea+", BBOCB(16,BOF,CBGT,1,0), BBOATCB_MASK, PPCCOM, { CR, BDPA } },
2523{ "blea", BBOCB(16,BOF,CBGT,1,0), BBOATCB_MASK, COM, { CR, BDA } },
2524{ "blela-", BBOCB(16,BOF,CBGT,1,1), BBOATCB_MASK, PPCCOM, { CR, BDMA } },
2525{ "blela+", BBOCB(16,BOF,CBGT,1,1), BBOATCB_MASK, PPCCOM, { CR, BDPA } },
2526{ "blela", BBOCB(16,BOF,CBGT,1,1), BBOATCB_MASK, COM, { CR, BDA } },
2527{ "bng-", BBOCB(16,BOF,CBGT,0,0), BBOATCB_MASK, PPCCOM, { CR, BDM } },
2528{ "bng+", BBOCB(16,BOF,CBGT,0,0), BBOATCB_MASK, PPCCOM, { CR, BDP } },
2529{ "bng", BBOCB(16,BOF,CBGT,0,0), BBOATCB_MASK, COM, { CR, BD } },
2530{ "bngl-", BBOCB(16,BOF,CBGT,0,1), BBOATCB_MASK, PPCCOM, { CR, BDM } },
2531{ "bngl+", BBOCB(16,BOF,CBGT,0,1), BBOATCB_MASK, PPCCOM, { CR, BDP } },
2532{ "bngl", BBOCB(16,BOF,CBGT,0,1), BBOATCB_MASK, COM, { CR, BD } },
2533{ "bnga-", BBOCB(16,BOF,CBGT,1,0), BBOATCB_MASK, PPCCOM, { CR, BDMA } },
2534{ "bnga+", BBOCB(16,BOF,CBGT,1,0), BBOATCB_MASK, PPCCOM, { CR, BDPA } },
2535{ "bnga", BBOCB(16,BOF,CBGT,1,0), BBOATCB_MASK, COM, { CR, BDA } },
2536{ "bngla-", BBOCB(16,BOF,CBGT,1,1), BBOATCB_MASK, PPCCOM, { CR, BDMA } },
2537{ "bngla+", BBOCB(16,BOF,CBGT,1,1), BBOATCB_MASK, PPCCOM, { CR, BDPA } },
2538{ "bngla", BBOCB(16,BOF,CBGT,1,1), BBOATCB_MASK, COM, { CR, BDA } },
2539{ "bne-", BBOCB(16,BOF,CBEQ,0,0), BBOATCB_MASK, PPCCOM, { CR, BDM } },
2540{ "bne+", BBOCB(16,BOF,CBEQ,0,0), BBOATCB_MASK, PPCCOM, { CR, BDP } },
2541{ "bne", BBOCB(16,BOF,CBEQ,0,0), BBOATCB_MASK, COM, { CR, BD } },
2542{ "bnel-", BBOCB(16,BOF,CBEQ,0,1), BBOATCB_MASK, PPCCOM, { CR, BDM } },
2543{ "bnel+", BBOCB(16,BOF,CBEQ,0,1), BBOATCB_MASK, PPCCOM, { CR, BDP } },
2544{ "bnel", BBOCB(16,BOF,CBEQ,0,1), BBOATCB_MASK, COM, { CR, BD } },
2545{ "bnea-", BBOCB(16,BOF,CBEQ,1,0), BBOATCB_MASK, PPCCOM, { CR, BDMA } },
2546{ "bnea+", BBOCB(16,BOF,CBEQ,1,0), BBOATCB_MASK, PPCCOM, { CR, BDPA } },
2547{ "bnea", BBOCB(16,BOF,CBEQ,1,0), BBOATCB_MASK, COM, { CR, BDA } },
2548{ "bnela-", BBOCB(16,BOF,CBEQ,1,1), BBOATCB_MASK, PPCCOM, { CR, BDMA } },
2549{ "bnela+", BBOCB(16,BOF,CBEQ,1,1), BBOATCB_MASK, PPCCOM, { CR, BDPA } },
2550{ "bnela", BBOCB(16,BOF,CBEQ,1,1), BBOATCB_MASK, COM, { CR, BDA } },
2551{ "bns-", BBOCB(16,BOF,CBSO,0,0), BBOATCB_MASK, PPCCOM, { CR, BDM } },
2552{ "bns+", BBOCB(16,BOF,CBSO,0,0), BBOATCB_MASK, PPCCOM, { CR, BDP } },
2553{ "bns", BBOCB(16,BOF,CBSO,0,0), BBOATCB_MASK, COM, { CR, BD } },
2554{ "bnsl-", BBOCB(16,BOF,CBSO,0,1), BBOATCB_MASK, PPCCOM, { CR, BDM } },
2555{ "bnsl+", BBOCB(16,BOF,CBSO,0,1), BBOATCB_MASK, PPCCOM, { CR, BDP } },
2556{ "bnsl", BBOCB(16,BOF,CBSO,0,1), BBOATCB_MASK, COM, { CR, BD } },
2557{ "bnsa-", BBOCB(16,BOF,CBSO,1,0), BBOATCB_MASK, PPCCOM, { CR, BDMA } },
2558{ "bnsa+", BBOCB(16,BOF,CBSO,1,0), BBOATCB_MASK, PPCCOM, { CR, BDPA } },
2559{ "bnsa", BBOCB(16,BOF,CBSO,1,0), BBOATCB_MASK, COM, { CR, BDA } },
2560{ "bnsla-", BBOCB(16,BOF,CBSO,1,1), BBOATCB_MASK, PPCCOM, { CR, BDMA } },
2561{ "bnsla+", BBOCB(16,BOF,CBSO,1,1), BBOATCB_MASK, PPCCOM, { CR, BDPA } },
2562{ "bnsla", BBOCB(16,BOF,CBSO,1,1), BBOATCB_MASK, COM, { CR, BDA } },
2563{ "bnu-", BBOCB(16,BOF,CBSO,0,0), BBOATCB_MASK, PPCCOM, { CR, BDM } },
2564{ "bnu+", BBOCB(16,BOF,CBSO,0,0), BBOATCB_MASK, PPCCOM, { CR, BDP } },
2565{ "bnu", BBOCB(16,BOF,CBSO,0,0), BBOATCB_MASK, PPCCOM, { CR, BD } },
2566{ "bnul-", BBOCB(16,BOF,CBSO,0,1), BBOATCB_MASK, PPCCOM, { CR, BDM } },
2567{ "bnul+", BBOCB(16,BOF,CBSO,0,1), BBOATCB_MASK, PPCCOM, { CR, BDP } },
2568{ "bnul", BBOCB(16,BOF,CBSO,0,1), BBOATCB_MASK, PPCCOM, { CR, BD } },
2569{ "bnua-", BBOCB(16,BOF,CBSO,1,0), BBOATCB_MASK, PPCCOM, { CR, BDMA } },
2570{ "bnua+", BBOCB(16,BOF,CBSO,1,0), BBOATCB_MASK, PPCCOM, { CR, BDPA } },
2571{ "bnua", BBOCB(16,BOF,CBSO,1,0), BBOATCB_MASK, PPCCOM, { CR, BDA } },
2572{ "bnula-", BBOCB(16,BOF,CBSO,1,1), BBOATCB_MASK, PPCCOM, { CR, BDMA } },
2573{ "bnula+", BBOCB(16,BOF,CBSO,1,1), BBOATCB_MASK, PPCCOM, { CR, BDPA } },
2574{ "bnula", BBOCB(16,BOF,CBSO,1,1), BBOATCB_MASK, PPCCOM, { CR, BDA } },
2575{ "bdnzt-", BBO(16,BODNZT,0,0), BBOY_MASK, NOPOWER4, { BI, BDM } },
2576{ "bdnzt+", BBO(16,BODNZT,0,0), BBOY_MASK, NOPOWER4, { BI, BDP } },
2577{ "bdnzt", BBO(16,BODNZT,0,0), BBOY_MASK, PPCCOM, { BI, BD } },
2578{ "bdnztl-", BBO(16,BODNZT,0,1), BBOY_MASK, NOPOWER4, { BI, BDM } },
2579{ "bdnztl+", BBO(16,BODNZT,0,1), BBOY_MASK, NOPOWER4, { BI, BDP } },
2580{ "bdnztl", BBO(16,BODNZT,0,1), BBOY_MASK, PPCCOM, { BI, BD } },
2581{ "bdnzta-", BBO(16,BODNZT,1,0), BBOY_MASK, NOPOWER4, { BI, BDMA } },
2582{ "bdnzta+", BBO(16,BODNZT,1,0), BBOY_MASK, NOPOWER4, { BI, BDPA } },
2583{ "bdnzta", BBO(16,BODNZT,1,0), BBOY_MASK, PPCCOM, { BI, BDA } },
2584{ "bdnztla-",BBO(16,BODNZT,1,1), BBOY_MASK, NOPOWER4, { BI, BDMA } },
2585{ "bdnztla+",BBO(16,BODNZT,1,1), BBOY_MASK, NOPOWER4, { BI, BDPA } },
2586{ "bdnztla", BBO(16,BODNZT,1,1), BBOY_MASK, PPCCOM, { BI, BDA } },
2587{ "bdnzf-", BBO(16,BODNZF,0,0), BBOY_MASK, NOPOWER4, { BI, BDM } },
2588{ "bdnzf+", BBO(16,BODNZF,0,0), BBOY_MASK, NOPOWER4, { BI, BDP } },
2589{ "bdnzf", BBO(16,BODNZF,0,0), BBOY_MASK, PPCCOM, { BI, BD } },
2590{ "bdnzfl-", BBO(16,BODNZF,0,1), BBOY_MASK, NOPOWER4, { BI, BDM } },
2591{ "bdnzfl+", BBO(16,BODNZF,0,1), BBOY_MASK, NOPOWER4, { BI, BDP } },
2592{ "bdnzfl", BBO(16,BODNZF,0,1), BBOY_MASK, PPCCOM, { BI, BD } },
2593{ "bdnzfa-", BBO(16,BODNZF,1,0), BBOY_MASK, NOPOWER4, { BI, BDMA } },
2594{ "bdnzfa+", BBO(16,BODNZF,1,0), BBOY_MASK, NOPOWER4, { BI, BDPA } },
2595{ "bdnzfa", BBO(16,BODNZF,1,0), BBOY_MASK, PPCCOM, { BI, BDA } },
2596{ "bdnzfla-",BBO(16,BODNZF,1,1), BBOY_MASK, NOPOWER4, { BI, BDMA } },
2597{ "bdnzfla+",BBO(16,BODNZF,1,1), BBOY_MASK, NOPOWER4, { BI, BDPA } },
2598{ "bdnzfla", BBO(16,BODNZF,1,1), BBOY_MASK, PPCCOM, { BI, BDA } },
2599{ "bt-", BBO(16,BOT,0,0), BBOAT_MASK, PPCCOM, { BI, BDM } },
2600{ "bt+", BBO(16,BOT,0,0), BBOAT_MASK, PPCCOM, { BI, BDP } },
2601{ "bt", BBO(16,BOT,0,0), BBOAT_MASK, PPCCOM, { BI, BD } },
2602{ "bbt", BBO(16,BOT,0,0), BBOAT_MASK, PWRCOM, { BI, BD } },
2603{ "btl-", BBO(16,BOT,0,1), BBOAT_MASK, PPCCOM, { BI, BDM } },
2604{ "btl+", BBO(16,BOT,0,1), BBOAT_MASK, PPCCOM, { BI, BDP } },
2605{ "btl", BBO(16,BOT,0,1), BBOAT_MASK, PPCCOM, { BI, BD } },
2606{ "bbtl", BBO(16,BOT,0,1), BBOAT_MASK, PWRCOM, { BI, BD } },
2607{ "bta-", BBO(16,BOT,1,0), BBOAT_MASK, PPCCOM, { BI, BDMA } },
2608{ "bta+", BBO(16,BOT,1,0), BBOAT_MASK, PPCCOM, { BI, BDPA } },
2609{ "bta", BBO(16,BOT,1,0), BBOAT_MASK, PPCCOM, { BI, BDA } },
2610{ "bbta", BBO(16,BOT,1,0), BBOAT_MASK, PWRCOM, { BI, BDA } },
2611{ "btla-", BBO(16,BOT,1,1), BBOAT_MASK, PPCCOM, { BI, BDMA } },
2612{ "btla+", BBO(16,BOT,1,1), BBOAT_MASK, PPCCOM, { BI, BDPA } },
2613{ "btla", BBO(16,BOT,1,1), BBOAT_MASK, PPCCOM, { BI, BDA } },
2614{ "bbtla", BBO(16,BOT,1,1), BBOAT_MASK, PWRCOM, { BI, BDA } },
2615{ "bf-", BBO(16,BOF,0,0), BBOAT_MASK, PPCCOM, { BI, BDM } },
2616{ "bf+", BBO(16,BOF,0,0), BBOAT_MASK, PPCCOM, { BI, BDP } },
2617{ "bf", BBO(16,BOF,0,0), BBOAT_MASK, PPCCOM, { BI, BD } },
2618{ "bbf", BBO(16,BOF,0,0), BBOAT_MASK, PWRCOM, { BI, BD } },
2619{ "bfl-", BBO(16,BOF,0,1), BBOAT_MASK, PPCCOM, { BI, BDM } },
2620{ "bfl+", BBO(16,BOF,0,1), BBOAT_MASK, PPCCOM, { BI, BDP } },
2621{ "bfl", BBO(16,BOF,0,1), BBOAT_MASK, PPCCOM, { BI, BD } },
2622{ "bbfl", BBO(16,BOF,0,1), BBOAT_MASK, PWRCOM, { BI, BD } },
2623{ "bfa-", BBO(16,BOF,1,0), BBOAT_MASK, PPCCOM, { BI, BDMA } },
2624{ "bfa+", BBO(16,BOF,1,0), BBOAT_MASK, PPCCOM, { BI, BDPA } },
2625{ "bfa", BBO(16,BOF,1,0), BBOAT_MASK, PPCCOM, { BI, BDA } },
2626{ "bbfa", BBO(16,BOF,1,0), BBOAT_MASK, PWRCOM, { BI, BDA } },
2627{ "bfla-", BBO(16,BOF,1,1), BBOAT_MASK, PPCCOM, { BI, BDMA } },
2628{ "bfla+", BBO(16,BOF,1,1), BBOAT_MASK, PPCCOM, { BI, BDPA } },
2629{ "bfla", BBO(16,BOF,1,1), BBOAT_MASK, PPCCOM, { BI, BDA } },
2630{ "bbfla", BBO(16,BOF,1,1), BBOAT_MASK, PWRCOM, { BI, BDA } },
2631{ "bdzt-", BBO(16,BODZT,0,0), BBOY_MASK, NOPOWER4, { BI, BDM } },
2632{ "bdzt+", BBO(16,BODZT,0,0), BBOY_MASK, NOPOWER4, { BI, BDP } },
2633{ "bdzt", BBO(16,BODZT,0,0), BBOY_MASK, PPCCOM, { BI, BD } },
2634{ "bdztl-", BBO(16,BODZT,0,1), BBOY_MASK, NOPOWER4, { BI, BDM } },
2635{ "bdztl+", BBO(16,BODZT,0,1), BBOY_MASK, NOPOWER4, { BI, BDP } },
2636{ "bdztl", BBO(16,BODZT,0,1), BBOY_MASK, PPCCOM, { BI, BD } },
2637{ "bdzta-", BBO(16,BODZT,1,0), BBOY_MASK, NOPOWER4, { BI, BDMA } },
2638{ "bdzta+", BBO(16,BODZT,1,0), BBOY_MASK, NOPOWER4, { BI, BDPA } },
2639{ "bdzta", BBO(16,BODZT,1,0), BBOY_MASK, PPCCOM, { BI, BDA } },
2640{ "bdztla-", BBO(16,BODZT,1,1), BBOY_MASK, NOPOWER4, { BI, BDMA } },
2641{ "bdztla+", BBO(16,BODZT,1,1), BBOY_MASK, NOPOWER4, { BI, BDPA } },
2642{ "bdztla", BBO(16,BODZT,1,1), BBOY_MASK, PPCCOM, { BI, BDA } },
2643{ "bdzf-", BBO(16,BODZF,0,0), BBOY_MASK, NOPOWER4, { BI, BDM } },
2644{ "bdzf+", BBO(16,BODZF,0,0), BBOY_MASK, NOPOWER4, { BI, BDP } },
2645{ "bdzf", BBO(16,BODZF,0,0), BBOY_MASK, PPCCOM, { BI, BD } },
2646{ "bdzfl-", BBO(16,BODZF,0,1), BBOY_MASK, NOPOWER4, { BI, BDM } },
2647{ "bdzfl+", BBO(16,BODZF,0,1), BBOY_MASK, NOPOWER4, { BI, BDP } },
2648{ "bdzfl", BBO(16,BODZF,0,1), BBOY_MASK, PPCCOM, { BI, BD } },
2649{ "bdzfa-", BBO(16,BODZF,1,0), BBOY_MASK, NOPOWER4, { BI, BDMA } },
2650{ "bdzfa+", BBO(16,BODZF,1,0), BBOY_MASK, NOPOWER4, { BI, BDPA } },
2651{ "bdzfa", BBO(16,BODZF,1,0), BBOY_MASK, PPCCOM, { BI, BDA } },
2652{ "bdzfla-", BBO(16,BODZF,1,1), BBOY_MASK, NOPOWER4, { BI, BDMA } },
2653{ "bdzfla+", BBO(16,BODZF,1,1), BBOY_MASK, NOPOWER4, { BI, BDPA } },
2654{ "bdzfla", BBO(16,BODZF,1,1), BBOY_MASK, PPCCOM, { BI, BDA } },
2655{ "bc-", B(16,0,0), B_MASK, PPCCOM, { BOE, BI, BDM } },
2656{ "bc+", B(16,0,0), B_MASK, PPCCOM, { BOE, BI, BDP } },
2657{ "bc", B(16,0,0), B_MASK, COM, { BO, BI, BD } },
2658{ "bcl-", B(16,0,1), B_MASK, PPCCOM, { BOE, BI, BDM } },
2659{ "bcl+", B(16,0,1), B_MASK, PPCCOM, { BOE, BI, BDP } },
2660{ "bcl", B(16,0,1), B_MASK, COM, { BO, BI, BD } },
2661{ "bca-", B(16,1,0), B_MASK, PPCCOM, { BOE, BI, BDMA } },
2662{ "bca+", B(16,1,0), B_MASK, PPCCOM, { BOE, BI, BDPA } },
2663{ "bca", B(16,1,0), B_MASK, COM, { BO, BI, BDA } },
2664{ "bcla-", B(16,1,1), B_MASK, PPCCOM, { BOE, BI, BDMA } },
2665{ "bcla+", B(16,1,1), B_MASK, PPCCOM, { BOE, BI, BDPA } },
2666{ "bcla", B(16,1,1), B_MASK, COM, { BO, BI, BDA } },
2667
2668{ "sc", SC(17,1,0), 0xffffffff, PPC, { 0 } },
2669{ "svc", SC(17,0,0), SC_MASK, POWER, { LEV, FL1, FL2 } },
2670{ "svcl", SC(17,0,1), SC_MASK, POWER, { LEV, FL1, FL2 } },
2671{ "svca", SC(17,1,0), SC_MASK, PWRCOM, { SV } },
2672{ "svcla", SC(17,1,1), SC_MASK, POWER, { SV } },
2673
2674{ "b", B(18,0,0), B_MASK, COM, { LI } },
2675{ "bl", B(18,0,1), B_MASK, COM, { LI } },
2676{ "ba", B(18,1,0), B_MASK, COM, { LIA } },
2677{ "bla", B(18,1,1), B_MASK, COM, { LIA } },
2678
2679{ "mcrf", XL(19,0), XLBB_MASK|(3 << 21)|(3 << 16), COM, { BF, BFA } },
2680
2681{ "blr", XLO(19,BOU,16,0), XLBOBIBB_MASK, PPCCOM, { 0 } },
2682{ "br", XLO(19,BOU,16,0), XLBOBIBB_MASK, PWRCOM, { 0 } },
2683{ "blrl", XLO(19,BOU,16,1), XLBOBIBB_MASK, PPCCOM, { 0 } },
2684{ "brl", XLO(19,BOU,16,1), XLBOBIBB_MASK, PWRCOM, { 0 } },
2685{ "bdnzlr", XLO(19,BODNZ,16,0), XLBOBIBB_MASK, PPCCOM, { 0 } },
2686{ "bdnzlr-", XLO(19,BODNZ,16,0), XLBOBIBB_MASK, NOPOWER4, { 0 } },
2687{ "bdnzlr-", XLO(19,BODNZM4,16,0), XLBOBIBB_MASK, POWER4, { 0 } },
2688{ "bdnzlr+", XLO(19,BODNZP,16,0), XLBOBIBB_MASK, NOPOWER4, { 0 } },
2689{ "bdnzlr+", XLO(19,BODNZP4,16,0), XLBOBIBB_MASK, POWER4, { 0 } },
2690{ "bdnzlrl", XLO(19,BODNZ,16,1), XLBOBIBB_MASK, PPCCOM, { 0 } },
2691{ "bdnzlrl-",XLO(19,BODNZ,16,1), XLBOBIBB_MASK, NOPOWER4, { 0 } },
2692{ "bdnzlrl-",XLO(19,BODNZM4,16,1), XLBOBIBB_MASK, POWER4, { 0 } },
2693{ "bdnzlrl+",XLO(19,BODNZP,16,1), XLBOBIBB_MASK, NOPOWER4, { 0 } },
2694{ "bdnzlrl+",XLO(19,BODNZP4,16,1), XLBOBIBB_MASK, POWER4, { 0 } },
2695{ "bdzlr", XLO(19,BODZ,16,0), XLBOBIBB_MASK, PPCCOM, { 0 } },
2696{ "bdzlr-", XLO(19,BODZ,16,0), XLBOBIBB_MASK, NOPOWER4, { 0 } },
2697{ "bdzlr-", XLO(19,BODZM4,16,0), XLBOBIBB_MASK, POWER4, { 0 } },
2698{ "bdzlr+", XLO(19,BODZP,16,0), XLBOBIBB_MASK, NOPOWER4, { 0 } },
2699{ "bdzlr+", XLO(19,BODZP4,16,0), XLBOBIBB_MASK, POWER4, { 0 } },
2700{ "bdzlrl", XLO(19,BODZ,16,1), XLBOBIBB_MASK, PPCCOM, { 0 } },
2701{ "bdzlrl-", XLO(19,BODZ,16,1), XLBOBIBB_MASK, NOPOWER4, { 0 } },
2702{ "bdzlrl-", XLO(19,BODZM4,16,1), XLBOBIBB_MASK, POWER4, { 0 } },
2703{ "bdzlrl+", XLO(19,BODZP,16,1), XLBOBIBB_MASK, NOPOWER4, { 0 } },
2704{ "bdzlrl+", XLO(19,BODZP4,16,1), XLBOBIBB_MASK, POWER4, { 0 } },
2705{ "bltlr", XLOCB(19,BOT,CBLT,16,0), XLBOCBBB_MASK, PPCCOM, { CR } },
2706{ "bltlr-", XLOCB(19,BOT,CBLT,16,0), XLBOCBBB_MASK, NOPOWER4, { CR } },
2707{ "bltlr-", XLOCB(19,BOTM4,CBLT,16,0), XLBOCBBB_MASK, POWER4, { CR } },
2708{ "bltlr+", XLOCB(19,BOTP,CBLT,16,0), XLBOCBBB_MASK, NOPOWER4, { CR } },
2709{ "bltlr+", XLOCB(19,BOTP4,CBLT,16,0), XLBOCBBB_MASK, POWER4, { CR } },
2710{ "bltr", XLOCB(19,BOT,CBLT,16,0), XLBOCBBB_MASK, PWRCOM, { CR } },
2711{ "bltlrl", XLOCB(19,BOT,CBLT,16,1), XLBOCBBB_MASK, PPCCOM, { CR } },
2712{ "bltlrl-", XLOCB(19,BOT,CBLT,16,1), XLBOCBBB_MASK, NOPOWER4, { CR } },
2713{ "bltlrl-", XLOCB(19,BOTM4,CBLT,16,1), XLBOCBBB_MASK, POWER4, { CR } },
2714{ "bltlrl+", XLOCB(19,BOTP,CBLT,16,1), XLBOCBBB_MASK, NOPOWER4, { CR } },
2715{ "bltlrl+", XLOCB(19,BOTP4,CBLT,16,1), XLBOCBBB_MASK, POWER4, { CR } },
2716{ "bltrl", XLOCB(19,BOT,CBLT,16,1), XLBOCBBB_MASK, PWRCOM, { CR } },
2717{ "bgtlr", XLOCB(19,BOT,CBGT,16,0), XLBOCBBB_MASK, PPCCOM, { CR } },
2718{ "bgtlr-", XLOCB(19,BOT,CBGT,16,0), XLBOCBBB_MASK, NOPOWER4, { CR } },
2719{ "bgtlr-", XLOCB(19,BOTM4,CBGT,16,0), XLBOCBBB_MASK, POWER4, { CR } },
2720{ "bgtlr+", XLOCB(19,BOTP,CBGT,16,0), XLBOCBBB_MASK, NOPOWER4, { CR } },
2721{ "bgtlr+", XLOCB(19,BOTP4,CBGT,16,0), XLBOCBBB_MASK, POWER4, { CR } },
2722{ "bgtr", XLOCB(19,BOT,CBGT,16,0), XLBOCBBB_MASK, PWRCOM, { CR } },
2723{ "bgtlrl", XLOCB(19,BOT,CBGT,16,1), XLBOCBBB_MASK, PPCCOM, { CR } },
2724{ "bgtlrl-", XLOCB(19,BOT,CBGT,16,1), XLBOCBBB_MASK, NOPOWER4, { CR } },
2725{ "bgtlrl-", XLOCB(19,BOTM4,CBGT,16,1), XLBOCBBB_MASK, POWER4, { CR } },
2726{ "bgtlrl+", XLOCB(19,BOTP,CBGT,16,1), XLBOCBBB_MASK, NOPOWER4, { CR } },
2727{ "bgtlrl+", XLOCB(19,BOTP4,CBGT,16,1), XLBOCBBB_MASK, POWER4, { CR } },
2728{ "bgtrl", XLOCB(19,BOT,CBGT,16,1), XLBOCBBB_MASK, PWRCOM, { CR } },
2729{ "beqlr", XLOCB(19,BOT,CBEQ,16,0), XLBOCBBB_MASK, PPCCOM, { CR } },
2730{ "beqlr-", XLOCB(19,BOT,CBEQ,16,0), XLBOCBBB_MASK, NOPOWER4, { CR } },
2731{ "beqlr-", XLOCB(19,BOTM4,CBEQ,16,0), XLBOCBBB_MASK, POWER4, { CR } },
2732{ "beqlr+", XLOCB(19,BOTP,CBEQ,16,0), XLBOCBBB_MASK, NOPOWER4, { CR } },
2733{ "beqlr+", XLOCB(19,BOTP4,CBEQ,16,0), XLBOCBBB_MASK, POWER4, { CR } },
2734{ "beqr", XLOCB(19,BOT,CBEQ,16,0), XLBOCBBB_MASK, PWRCOM, { CR } },
2735{ "beqlrl", XLOCB(19,BOT,CBEQ,16,1), XLBOCBBB_MASK, PPCCOM, { CR } },
2736{ "beqlrl-", XLOCB(19,BOT,CBEQ,16,1), XLBOCBBB_MASK, NOPOWER4, { CR } },
2737{ "beqlrl-", XLOCB(19,BOTM4,CBEQ,16,1), XLBOCBBB_MASK, POWER4, { CR } },
2738{ "beqlrl+", XLOCB(19,BOTP,CBEQ,16,1), XLBOCBBB_MASK, NOPOWER4, { CR } },
2739{ "beqlrl+", XLOCB(19,BOTP4,CBEQ,16,1), XLBOCBBB_MASK, POWER4, { CR } },
2740{ "beqrl", XLOCB(19,BOT,CBEQ,16,1), XLBOCBBB_MASK, PWRCOM, { CR } },
2741{ "bsolr", XLOCB(19,BOT,CBSO,16,0), XLBOCBBB_MASK, PPCCOM, { CR } },
2742{ "bsolr-", XLOCB(19,BOT,CBSO,16,0), XLBOCBBB_MASK, NOPOWER4, { CR } },
2743{ "bsolr-", XLOCB(19,BOTM4,CBSO,16,0), XLBOCBBB_MASK, POWER4, { CR } },
2744{ "bsolr+", XLOCB(19,BOTP,CBSO,16,0), XLBOCBBB_MASK, NOPOWER4, { CR } },
2745{ "bsolr+", XLOCB(19,BOTP4,CBSO,16,0), XLBOCBBB_MASK, POWER4, { CR } },
2746{ "bsor", XLOCB(19,BOT,CBSO,16,0), XLBOCBBB_MASK, PWRCOM, { CR } },
2747{ "bsolrl", XLOCB(19,BOT,CBSO,16,1), XLBOCBBB_MASK, PPCCOM, { CR } },
2748{ "bsolrl-", XLOCB(19,BOT,CBSO,16,1), XLBOCBBB_MASK, NOPOWER4, { CR } },
2749{ "bsolrl-", XLOCB(19,BOTM4,CBSO,16,1), XLBOCBBB_MASK, POWER4, { CR } },
2750{ "bsolrl+", XLOCB(19,BOTP,CBSO,16,1), XLBOCBBB_MASK, NOPOWER4, { CR } },
2751{ "bsolrl+", XLOCB(19,BOTP4,CBSO,16,1), XLBOCBBB_MASK, POWER4, { CR } },
2752{ "bsorl", XLOCB(19,BOT,CBSO,16,1), XLBOCBBB_MASK, PWRCOM, { CR } },
2753{ "bunlr", XLOCB(19,BOT,CBSO,16,0), XLBOCBBB_MASK, PPCCOM, { CR } },
2754{ "bunlr-", XLOCB(19,BOT,CBSO,16,0), XLBOCBBB_MASK, NOPOWER4, { CR } },
2755{ "bunlr-", XLOCB(19,BOTM4,CBSO,16,0), XLBOCBBB_MASK, POWER4, { CR } },
2756{ "bunlr+", XLOCB(19,BOTP,CBSO,16,0), XLBOCBBB_MASK, NOPOWER4, { CR } },
2757{ "bunlr+", XLOCB(19,BOTP4,CBSO,16,0), XLBOCBBB_MASK, POWER4, { CR } },
2758{ "bunlrl", XLOCB(19,BOT,CBSO,16,1), XLBOCBBB_MASK, PPCCOM, { CR } },
2759{ "bunlrl-", XLOCB(19,BOT,CBSO,16,1), XLBOCBBB_MASK, NOPOWER4, { CR } },
2760{ "bunlrl-", XLOCB(19,BOTM4,CBSO,16,1), XLBOCBBB_MASK, POWER4, { CR } },
2761{ "bunlrl+", XLOCB(19,BOTP,CBSO,16,1), XLBOCBBB_MASK, NOPOWER4, { CR } },
2762{ "bunlrl+", XLOCB(19,BOTP4,CBSO,16,1), XLBOCBBB_MASK, POWER4, { CR } },
2763{ "bgelr", XLOCB(19,BOF,CBLT,16,0), XLBOCBBB_MASK, PPCCOM, { CR } },
2764{ "bgelr-", XLOCB(19,BOF,CBLT,16,0), XLBOCBBB_MASK, NOPOWER4, { CR } },
2765{ "bgelr-", XLOCB(19,BOFM4,CBLT,16,0), XLBOCBBB_MASK, POWER4, { CR } },
2766{ "bgelr+", XLOCB(19,BOFP,CBLT,16,0), XLBOCBBB_MASK, NOPOWER4, { CR } },
2767{ "bgelr+", XLOCB(19,BOFP4,CBLT,16,0), XLBOCBBB_MASK, POWER4, { CR } },
2768{ "bger", XLOCB(19,BOF,CBLT,16,0), XLBOCBBB_MASK, PWRCOM, { CR } },
2769{ "bgelrl", XLOCB(19,BOF,CBLT,16,1), XLBOCBBB_MASK, PPCCOM, { CR } },
2770{ "bgelrl-", XLOCB(19,BOF,CBLT,16,1), XLBOCBBB_MASK, NOPOWER4, { CR } },
2771{ "bgelrl-", XLOCB(19,BOFM4,CBLT,16,1), XLBOCBBB_MASK, POWER4, { CR } },
2772{ "bgelrl+", XLOCB(19,BOFP,CBLT,16,1), XLBOCBBB_MASK, NOPOWER4, { CR } },
2773{ "bgelrl+", XLOCB(19,BOFP4,CBLT,16,1), XLBOCBBB_MASK, POWER4, { CR } },
2774{ "bgerl", XLOCB(19,BOF,CBLT,16,1), XLBOCBBB_MASK, PWRCOM, { CR } },
2775{ "bnllr", XLOCB(19,BOF,CBLT,16,0), XLBOCBBB_MASK, PPCCOM, { CR } },
2776{ "bnllr-", XLOCB(19,BOF,CBLT,16,0), XLBOCBBB_MASK, NOPOWER4, { CR } },
2777{ "bnllr-", XLOCB(19,BOFM4,CBLT,16,0), XLBOCBBB_MASK, POWER4, { CR } },
2778{ "bnllr+", XLOCB(19,BOFP,CBLT,16,0), XLBOCBBB_MASK, NOPOWER4, { CR } },
2779{ "bnllr+", XLOCB(19,BOFP4,CBLT,16,0), XLBOCBBB_MASK, POWER4, { CR } },
2780{ "bnlr", XLOCB(19,BOF,CBLT,16,0), XLBOCBBB_MASK, PWRCOM, { CR } },
2781{ "bnllrl", XLOCB(19,BOF,CBLT,16,1), XLBOCBBB_MASK, PPCCOM, { CR } },
2782{ "bnllrl-", XLOCB(19,BOF,CBLT,16,1), XLBOCBBB_MASK, NOPOWER4, { CR } },
2783{ "bnllrl-", XLOCB(19,BOFM4,CBLT,16,1), XLBOCBBB_MASK, POWER4, { CR } },
2784{ "bnllrl+", XLOCB(19,BOFP,CBLT,16,1), XLBOCBBB_MASK, NOPOWER4, { CR } },
2785{ "bnllrl+", XLOCB(19,BOFP4,CBLT,16,1), XLBOCBBB_MASK, POWER4, { CR } },
2786{ "bnlrl", XLOCB(19,BOF,CBLT,16,1), XLBOCBBB_MASK, PWRCOM, { CR } },
2787{ "blelr", XLOCB(19,BOF,CBGT,16,0), XLBOCBBB_MASK, PPCCOM, { CR } },
2788{ "blelr-", XLOCB(19,BOF,CBGT,16,0), XLBOCBBB_MASK, NOPOWER4, { CR } },
2789{ "blelr-", XLOCB(19,BOFM4,CBGT,16,0), XLBOCBBB_MASK, POWER4, { CR } },
2790{ "blelr+", XLOCB(19,BOFP,CBGT,16,0), XLBOCBBB_MASK, NOPOWER4, { CR } },
2791{ "blelr+", XLOCB(19,BOFP4,CBGT,16,0), XLBOCBBB_MASK, POWER4, { CR } },
2792{ "bler", XLOCB(19,BOF,CBGT,16,0), XLBOCBBB_MASK, PWRCOM, { CR } },
2793{ "blelrl", XLOCB(19,BOF,CBGT,16,1), XLBOCBBB_MASK, PPCCOM, { CR } },
2794{ "blelrl-", XLOCB(19,BOF,CBGT,16,1), XLBOCBBB_MASK, NOPOWER4, { CR } },
2795{ "blelrl-", XLOCB(19,BOFM4,CBGT,16,1), XLBOCBBB_MASK, POWER4, { CR } },
2796{ "blelrl+", XLOCB(19,BOFP,CBGT,16,1), XLBOCBBB_MASK, NOPOWER4, { CR } },
2797{ "blelrl+", XLOCB(19,BOFP4,CBGT,16,1), XLBOCBBB_MASK, POWER4, { CR } },
2798{ "blerl", XLOCB(19,BOF,CBGT,16,1), XLBOCBBB_MASK, PWRCOM, { CR } },
2799{ "bnglr", XLOCB(19,BOF,CBGT,16,0), XLBOCBBB_MASK, PPCCOM, { CR } },
2800{ "bnglr-", XLOCB(19,BOF,CBGT,16,0), XLBOCBBB_MASK, NOPOWER4, { CR } },
2801{ "bnglr-", XLOCB(19,BOFM4,CBGT,16,0), XLBOCBBB_MASK, POWER4, { CR } },
2802{ "bnglr+", XLOCB(19,BOFP,CBGT,16,0), XLBOCBBB_MASK, NOPOWER4, { CR } },
2803{ "bnglr+", XLOCB(19,BOFP4,CBGT,16,0), XLBOCBBB_MASK, POWER4, { CR } },
2804{ "bngr", XLOCB(19,BOF,CBGT,16,0), XLBOCBBB_MASK, PWRCOM, { CR } },
2805{ "bnglrl", XLOCB(19,BOF,CBGT,16,1), XLBOCBBB_MASK, PPCCOM, { CR } },
2806{ "bnglrl-", XLOCB(19,BOF,CBGT,16,1), XLBOCBBB_MASK, NOPOWER4, { CR } },
2807{ "bnglrl-", XLOCB(19,BOFM4,CBGT,16,1), XLBOCBBB_MASK, POWER4, { CR } },
2808{ "bnglrl+", XLOCB(19,BOFP,CBGT,16,1), XLBOCBBB_MASK, NOPOWER4, { CR } },
2809{ "bnglrl+", XLOCB(19,BOFP4,CBGT,16,1), XLBOCBBB_MASK, POWER4, { CR } },
2810{ "bngrl", XLOCB(19,BOF,CBGT,16,1), XLBOCBBB_MASK, PWRCOM, { CR } },
2811{ "bnelr", XLOCB(19,BOF,CBEQ,16,0), XLBOCBBB_MASK, PPCCOM, { CR } },
2812{ "bnelr-", XLOCB(19,BOF,CBEQ,16,0), XLBOCBBB_MASK, NOPOWER4, { CR } },
2813{ "bnelr-", XLOCB(19,BOFM4,CBEQ,16,0), XLBOCBBB_MASK, POWER4, { CR } },
2814{ "bnelr+", XLOCB(19,BOFP,CBEQ,16,0), XLBOCBBB_MASK, NOPOWER4, { CR } },
2815{ "bnelr+", XLOCB(19,BOFP4,CBEQ,16,0), XLBOCBBB_MASK, POWER4, { CR } },
2816{ "bner", XLOCB(19,BOF,CBEQ,16,0), XLBOCBBB_MASK, PWRCOM, { CR } },
2817{ "bnelrl", XLOCB(19,BOF,CBEQ,16,1), XLBOCBBB_MASK, PPCCOM, { CR } },
2818{ "bnelrl-", XLOCB(19,BOF,CBEQ,16,1), XLBOCBBB_MASK, NOPOWER4, { CR } },
2819{ "bnelrl-", XLOCB(19,BOFM4,CBEQ,16,1), XLBOCBBB_MASK, POWER4, { CR } },
2820{ "bnelrl+", XLOCB(19,BOFP,CBEQ,16,1), XLBOCBBB_MASK, NOPOWER4, { CR } },
2821{ "bnelrl+", XLOCB(19,BOFP4,CBEQ,16,1), XLBOCBBB_MASK, POWER4, { CR } },
2822{ "bnerl", XLOCB(19,BOF,CBEQ,16,1), XLBOCBBB_MASK, PWRCOM, { CR } },
2823{ "bnslr", XLOCB(19,BOF,CBSO,16,0), XLBOCBBB_MASK, PPCCOM, { CR } },
2824{ "bnslr-", XLOCB(19,BOF,CBSO,16,0), XLBOCBBB_MASK, NOPOWER4, { CR } },
2825{ "bnslr-", XLOCB(19,BOFM4,CBSO,16,0), XLBOCBBB_MASK, POWER4, { CR } },
2826{ "bnslr+", XLOCB(19,BOFP,CBSO,16,0), XLBOCBBB_MASK, NOPOWER4, { CR } },
2827{ "bnslr+", XLOCB(19,BOFP4,CBSO,16,0), XLBOCBBB_MASK, POWER4, { CR } },
2828{ "bnsr", XLOCB(19,BOF,CBSO,16,0), XLBOCBBB_MASK, PWRCOM, { CR } },
2829{ "bnslrl", XLOCB(19,BOF,CBSO,16,1), XLBOCBBB_MASK, PPCCOM, { CR } },
2830{ "bnslrl-", XLOCB(19,BOF,CBSO,16,1), XLBOCBBB_MASK, NOPOWER4, { CR } },
2831{ "bnslrl-", XLOCB(19,BOFM4,CBSO,16,1), XLBOCBBB_MASK, POWER4, { CR } },
2832{ "bnslrl+", XLOCB(19,BOFP,CBSO,16,1), XLBOCBBB_MASK, NOPOWER4, { CR } },
2833{ "bnslrl+", XLOCB(19,BOFP4,CBSO,16,1), XLBOCBBB_MASK, POWER4, { CR } },
2834{ "bnsrl", XLOCB(19,BOF,CBSO,16,1), XLBOCBBB_MASK, PWRCOM, { CR } },
2835{ "bnulr", XLOCB(19,BOF,CBSO,16,0), XLBOCBBB_MASK, PPCCOM, { CR } },
2836{ "bnulr-", XLOCB(19,BOF,CBSO,16,0), XLBOCBBB_MASK, NOPOWER4, { CR } },
2837{ "bnulr-", XLOCB(19,BOFM4,CBSO,16,0), XLBOCBBB_MASK, POWER4, { CR } },
2838{ "bnulr+", XLOCB(19,BOFP,CBSO,16,0), XLBOCBBB_MASK, NOPOWER4, { CR } },
2839{ "bnulr+", XLOCB(19,BOFP4,CBSO,16,0), XLBOCBBB_MASK, POWER4, { CR } },
2840{ "bnulrl", XLOCB(19,BOF,CBSO,16,1), XLBOCBBB_MASK, PPCCOM, { CR } },
2841{ "bnulrl-", XLOCB(19,BOF,CBSO,16,1), XLBOCBBB_MASK, NOPOWER4, { CR } },
2842{ "bnulrl-", XLOCB(19,BOFM4,CBSO,16,1), XLBOCBBB_MASK, POWER4, { CR } },
2843{ "bnulrl+", XLOCB(19,BOFP,CBSO,16,1), XLBOCBBB_MASK, NOPOWER4, { CR } },
2844{ "bnulrl+", XLOCB(19,BOFP4,CBSO,16,1), XLBOCBBB_MASK, POWER4, { CR } },
2845{ "btlr", XLO(19,BOT,16,0), XLBOBB_MASK, PPCCOM, { BI } },
2846{ "btlr-", XLO(19,BOT,16,0), XLBOBB_MASK, NOPOWER4, { BI } },
2847{ "btlr-", XLO(19,BOTM4,16,0), XLBOBB_MASK, POWER4, { BI } },
2848{ "btlr+", XLO(19,BOTP,16,0), XLBOBB_MASK, NOPOWER4, { BI } },
2849{ "btlr+", XLO(19,BOTP4,16,0), XLBOBB_MASK, POWER4, { BI } },
2850{ "bbtr", XLO(19,BOT,16,0), XLBOBB_MASK, PWRCOM, { BI } },
2851{ "btlrl", XLO(19,BOT,16,1), XLBOBB_MASK, PPCCOM, { BI } },
2852{ "btlrl-", XLO(19,BOT,16,1), XLBOBB_MASK, NOPOWER4, { BI } },
2853{ "btlrl-", XLO(19,BOTM4,16,1), XLBOBB_MASK, POWER4, { BI } },
2854{ "btlrl+", XLO(19,BOTP,16,1), XLBOBB_MASK, NOPOWER4, { BI } },
2855{ "btlrl+", XLO(19,BOTP4,16,1), XLBOBB_MASK, POWER4, { BI } },
2856{ "bbtrl", XLO(19,BOT,16,1), XLBOBB_MASK, PWRCOM, { BI } },
2857{ "bflr", XLO(19,BOF,16,0), XLBOBB_MASK, PPCCOM, { BI } },
2858{ "bflr-", XLO(19,BOF,16,0), XLBOBB_MASK, NOPOWER4, { BI } },
2859{ "bflr-", XLO(19,BOFM4,16,0), XLBOBB_MASK, POWER4, { BI } },
2860{ "bflr+", XLO(19,BOFP,16,0), XLBOBB_MASK, NOPOWER4, { BI } },
2861{ "bflr+", XLO(19,BOFP4,16,0), XLBOBB_MASK, POWER4, { BI } },
2862{ "bbfr", XLO(19,BOF,16,0), XLBOBB_MASK, PWRCOM, { BI } },
2863{ "bflrl", XLO(19,BOF,16,1), XLBOBB_MASK, PPCCOM, { BI } },
2864{ "bflrl-", XLO(19,BOF,16,1), XLBOBB_MASK, NOPOWER4, { BI } },
2865{ "bflrl-", XLO(19,BOFM4,16,1), XLBOBB_MASK, POWER4, { BI } },
2866{ "bflrl+", XLO(19,BOFP,16,1), XLBOBB_MASK, NOPOWER4, { BI } },
2867{ "bflrl+", XLO(19,BOFP4,16,1), XLBOBB_MASK, POWER4, { BI } },
2868{ "bbfrl", XLO(19,BOF,16,1), XLBOBB_MASK, PWRCOM, { BI } },
2869{ "bdnztlr", XLO(19,BODNZT,16,0), XLBOBB_MASK, PPCCOM, { BI } },
2870{ "bdnztlr-",XLO(19,BODNZT,16,0), XLBOBB_MASK, NOPOWER4, { BI } },
2871{ "bdnztlr+",XLO(19,BODNZTP,16,0), XLBOBB_MASK, NOPOWER4, { BI } },
2872{ "bdnztlrl",XLO(19,BODNZT,16,1), XLBOBB_MASK, PPCCOM, { BI } },
2873{ "bdnztlrl-",XLO(19,BODNZT,16,1), XLBOBB_MASK, NOPOWER4, { BI } },
2874{ "bdnztlrl+",XLO(19,BODNZTP,16,1), XLBOBB_MASK, NOPOWER4, { BI } },
2875{ "bdnzflr", XLO(19,BODNZF,16,0), XLBOBB_MASK, PPCCOM, { BI } },
2876{ "bdnzflr-",XLO(19,BODNZF,16,0), XLBOBB_MASK, NOPOWER4, { BI } },
2877{ "bdnzflr+",XLO(19,BODNZFP,16,0), XLBOBB_MASK, NOPOWER4, { BI } },
2878{ "bdnzflrl",XLO(19,BODNZF,16,1), XLBOBB_MASK, PPCCOM, { BI } },
2879{ "bdnzflrl-",XLO(19,BODNZF,16,1), XLBOBB_MASK, NOPOWER4, { BI } },
2880{ "bdnzflrl+",XLO(19,BODNZFP,16,1), XLBOBB_MASK, NOPOWER4, { BI } },
2881{ "bdztlr", XLO(19,BODZT,16,0), XLBOBB_MASK, PPCCOM, { BI } },
2882{ "bdztlr-", XLO(19,BODZT,16,0), XLBOBB_MASK, NOPOWER4, { BI } },
2883{ "bdztlr+", XLO(19,BODZTP,16,0), XLBOBB_MASK, NOPOWER4, { BI } },
2884{ "bdztlrl", XLO(19,BODZT,16,1), XLBOBB_MASK, PPCCOM, { BI } },
2885{ "bdztlrl-",XLO(19,BODZT,16,1), XLBOBB_MASK, NOPOWER4, { BI } },
2886{ "bdztlrl+",XLO(19,BODZTP,16,1), XLBOBB_MASK, NOPOWER4, { BI } },
2887{ "bdzflr", XLO(19,BODZF,16,0), XLBOBB_MASK, PPCCOM, { BI } },
2888{ "bdzflr-", XLO(19,BODZF,16,0), XLBOBB_MASK, NOPOWER4, { BI } },
2889{ "bdzflr+", XLO(19,BODZFP,16,0), XLBOBB_MASK, NOPOWER4, { BI } },
2890{ "bdzflrl", XLO(19,BODZF,16,1), XLBOBB_MASK, PPCCOM, { BI } },
2891{ "bdzflrl-",XLO(19,BODZF,16,1), XLBOBB_MASK, NOPOWER4, { BI } },
2892{ "bdzflrl+",XLO(19,BODZFP,16,1), XLBOBB_MASK, NOPOWER4, { BI } },
2893{ "bclr", XLLK(19,16,0), XLYBB_MASK, PPCCOM, { BO, BI } },
2894{ "bclrl", XLLK(19,16,1), XLYBB_MASK, PPCCOM, { BO, BI } },
2895{ "bclr+", XLYLK(19,16,1,0), XLYBB_MASK, PPCCOM, { BOE, BI } },
2896{ "bclrl+", XLYLK(19,16,1,1), XLYBB_MASK, PPCCOM, { BOE, BI } },
2897{ "bclr-", XLYLK(19,16,0,0), XLYBB_MASK, PPCCOM, { BOE, BI } },
2898{ "bclrl-", XLYLK(19,16,0,1), XLYBB_MASK, PPCCOM, { BOE, BI } },
2899{ "bcr", XLLK(19,16,0), XLBB_MASK, PWRCOM, { BO, BI } },
2900{ "bcrl", XLLK(19,16,1), XLBB_MASK, PWRCOM, { BO, BI } },
2901{ "bclre", XLLK(19,17,0), XLBB_MASK, BOOKE64, { BO, BI } },
2902{ "bclrel", XLLK(19,17,1), XLBB_MASK, BOOKE64, { BO, BI } },
2903
2904{ "rfid", XL(19,18), 0xffffffff, PPC64, { 0 } },
2905
2906{ "crnot", XL(19,33), XL_MASK, PPCCOM, { BT, BA, BBA } },
2907{ "crnor", XL(19,33), XL_MASK, COM, { BT, BA, BB } },
2908{ "rfmci", X(19,38), 0xffffffff, PPCRFMCI, { 0 } },
2909
2910{ "rfi", XL(19,50), 0xffffffff, COM, { 0 } },
2911{ "rfci", XL(19,51), 0xffffffff, PPC403 | BOOKE, { 0 } },
2912
2913{ "rfsvc", XL(19,82), 0xffffffff, POWER, { 0 } },
2914
2915{ "crandc", XL(19,129), XL_MASK, COM, { BT, BA, BB } },
2916
2917{ "isync", XL(19,150), 0xffffffff, PPCCOM, { 0 } },
2918{ "ics", XL(19,150), 0xffffffff, PWRCOM, { 0 } },
2919
2920{ "crclr", XL(19,193), XL_MASK, PPCCOM, { BT, BAT, BBA } },
2921{ "crxor", XL(19,193), XL_MASK, COM, { BT, BA, BB } },
2922
2923{ "crnand", XL(19,225), XL_MASK, COM, { BT, BA, BB } },
2924
2925{ "crand", XL(19,257), XL_MASK, COM, { BT, BA, BB } },
2926
2927{ "crset", XL(19,289), XL_MASK, PPCCOM, { BT, BAT, BBA } },
2928{ "creqv", XL(19,289), XL_MASK, COM, { BT, BA, BB } },
2929
2930{ "crorc", XL(19,417), XL_MASK, COM, { BT, BA, BB } },
2931
2932{ "crmove", XL(19,449), XL_MASK, PPCCOM, { BT, BA, BBA } },
2933{ "cror", XL(19,449), XL_MASK, COM, { BT, BA, BB } },
2934
2935{ "bctr", XLO(19,BOU,528,0), XLBOBIBB_MASK, COM, { 0 } },
2936{ "bctrl", XLO(19,BOU,528,1), XLBOBIBB_MASK, COM, { 0 } },
2937{ "bltctr", XLOCB(19,BOT,CBLT,528,0), XLBOCBBB_MASK, PPCCOM, { CR } },
2938{ "bltctr-", XLOCB(19,BOT,CBLT,528,0), XLBOCBBB_MASK, NOPOWER4, { CR } },
2939{ "bltctr-", XLOCB(19,BOTM4,CBLT,528,0), XLBOCBBB_MASK, POWER4, { CR } },
2940{ "bltctr+", XLOCB(19,BOTP,CBLT,528,0), XLBOCBBB_MASK, NOPOWER4, { CR } },
2941{ "bltctr+", XLOCB(19,BOTP4,CBLT,528,0), XLBOCBBB_MASK, POWER4, { CR } },
2942{ "bltctrl", XLOCB(19,BOT,CBLT,528,1), XLBOCBBB_MASK, PPCCOM, { CR } },
2943{ "bltctrl-",XLOCB(19,BOT,CBLT,528,1), XLBOCBBB_MASK, NOPOWER4, { CR } },
2944{ "bltctrl-",XLOCB(19,BOTM4,CBLT,528,1), XLBOCBBB_MASK, POWER4, { CR } },
2945{ "bltctrl+",XLOCB(19,BOTP,CBLT,528,1), XLBOCBBB_MASK, NOPOWER4, { CR } },
2946{ "bltctrl+",XLOCB(19,BOTP4,CBLT,528,1), XLBOCBBB_MASK, POWER4, { CR } },
2947{ "bgtctr", XLOCB(19,BOT,CBGT,528,0), XLBOCBBB_MASK, PPCCOM, { CR } },
2948{ "bgtctr-", XLOCB(19,BOT,CBGT,528,0), XLBOCBBB_MASK, NOPOWER4, { CR } },
2949{ "bgtctr-", XLOCB(19,BOTM4,CBGT,528,0), XLBOCBBB_MASK, POWER4, { CR } },
2950{ "bgtctr+", XLOCB(19,BOTP,CBGT,528,0), XLBOCBBB_MASK, NOPOWER4, { CR } },
2951{ "bgtctr+", XLOCB(19,BOTP4,CBGT,528,0), XLBOCBBB_MASK, POWER4, { CR } },
2952{ "bgtctrl", XLOCB(19,BOT,CBGT,528,1), XLBOCBBB_MASK, PPCCOM, { CR } },
2953{ "bgtctrl-",XLOCB(19,BOT,CBGT,528,1), XLBOCBBB_MASK, NOPOWER4, { CR } },
2954{ "bgtctrl-",XLOCB(19,BOTM4,CBGT,528,1), XLBOCBBB_MASK, POWER4, { CR } },
2955{ "bgtctrl+",XLOCB(19,BOTP,CBGT,528,1), XLBOCBBB_MASK, NOPOWER4, { CR } },
2956{ "bgtctrl+",XLOCB(19,BOTP4,CBGT,528,1), XLBOCBBB_MASK, POWER4, { CR } },
2957{ "beqctr", XLOCB(19,BOT,CBEQ,528,0), XLBOCBBB_MASK, PPCCOM, { CR } },
2958{ "beqctr-", XLOCB(19,BOT,CBEQ,528,0), XLBOCBBB_MASK, NOPOWER4, { CR } },
2959{ "beqctr-", XLOCB(19,BOTM4,CBEQ,528,0), XLBOCBBB_MASK, POWER4, { CR } },
2960{ "beqctr+", XLOCB(19,BOTP,CBEQ,528,0), XLBOCBBB_MASK, NOPOWER4, { CR } },
2961{ "beqctr+", XLOCB(19,BOTP4,CBEQ,528,0), XLBOCBBB_MASK, POWER4, { CR } },
2962{ "beqctrl", XLOCB(19,BOT,CBEQ,528,1), XLBOCBBB_MASK, PPCCOM, { CR } },
2963{ "beqctrl-",XLOCB(19,BOT,CBEQ,528,1), XLBOCBBB_MASK, NOPOWER4, { CR } },
2964{ "beqctrl-",XLOCB(19,BOTM4,CBEQ,528,1), XLBOCBBB_MASK, POWER4, { CR } },
2965{ "beqctrl+",XLOCB(19,BOTP,CBEQ,528,1), XLBOCBBB_MASK, NOPOWER4, { CR } },
2966{ "beqctrl+",XLOCB(19,BOTP4,CBEQ,528,1), XLBOCBBB_MASK, POWER4, { CR } },
2967{ "bsoctr", XLOCB(19,BOT,CBSO,528,0), XLBOCBBB_MASK, PPCCOM, { CR } },
2968{ "bsoctr-", XLOCB(19,BOT,CBSO,528,0), XLBOCBBB_MASK, NOPOWER4, { CR } },
2969{ "bsoctr-", XLOCB(19,BOTM4,CBSO,528,0), XLBOCBBB_MASK, POWER4, { CR } },
2970{ "bsoctr+", XLOCB(19,BOTP,CBSO,528,0), XLBOCBBB_MASK, NOPOWER4, { CR } },
2971{ "bsoctr+", XLOCB(19,BOTP4,CBSO,528,0), XLBOCBBB_MASK, POWER4, { CR } },
2972{ "bsoctrl", XLOCB(19,BOT,CBSO,528,1), XLBOCBBB_MASK, PPCCOM, { CR } },
2973{ "bsoctrl-",XLOCB(19,BOT,CBSO,528,1), XLBOCBBB_MASK, NOPOWER4, { CR } },
2974{ "bsoctrl-",XLOCB(19,BOTM4,CBSO,528,1), XLBOCBBB_MASK, POWER4, { CR } },
2975{ "bsoctrl+",XLOCB(19,BOTP,CBSO,528,1), XLBOCBBB_MASK, NOPOWER4, { CR } },
2976{ "bsoctrl+",XLOCB(19,BOTP4,CBSO,528,1), XLBOCBBB_MASK, POWER4, { CR } },
2977{ "bunctr", XLOCB(19,BOT,CBSO,528,0), XLBOCBBB_MASK, PPCCOM, { CR } },
2978{ "bunctr-", XLOCB(19,BOT,CBSO,528,0), XLBOCBBB_MASK, NOPOWER4, { CR } },
2979{ "bunctr-", XLOCB(19,BOTM4,CBSO,528,0), XLBOCBBB_MASK, POWER4, { CR } },
2980{ "bunctr+", XLOCB(19,BOTP,CBSO,528,0), XLBOCBBB_MASK, NOPOWER4, { CR } },
2981{ "bunctr+", XLOCB(19,BOTP4,CBSO,528,0), XLBOCBBB_MASK, POWER4, { CR } },
2982{ "bunctrl", XLOCB(19,BOT,CBSO,528,1), XLBOCBBB_MASK, PPCCOM, { CR } },
2983{ "bunctrl-",XLOCB(19,BOT,CBSO,528,1), XLBOCBBB_MASK, NOPOWER4, { CR } },
2984{ "bunctrl-",XLOCB(19,BOTM4,CBSO,528,1), XLBOCBBB_MASK, POWER4, { CR } },
2985{ "bunctrl+",XLOCB(19,BOTP,CBSO,528,1), XLBOCBBB_MASK, NOPOWER4, { CR } },
2986{ "bunctrl+",XLOCB(19,BOTP4,CBSO,528,1), XLBOCBBB_MASK, POWER4, { CR } },
2987{ "bgectr", XLOCB(19,BOF,CBLT,528,0), XLBOCBBB_MASK, PPCCOM, { CR } },
2988{ "bgectr-", XLOCB(19,BOF,CBLT,528,0), XLBOCBBB_MASK, NOPOWER4, { CR } },
2989{ "bgectr-", XLOCB(19,BOFM4,CBLT,528,0), XLBOCBBB_MASK, POWER4, { CR } },
2990{ "bgectr+", XLOCB(19,BOFP,CBLT,528,0), XLBOCBBB_MASK, NOPOWER4, { CR } },
2991{ "bgectr+", XLOCB(19,BOFP4,CBLT,528,0), XLBOCBBB_MASK, POWER4, { CR } },
2992{ "bgectrl", XLOCB(19,BOF,CBLT,528,1), XLBOCBBB_MASK, PPCCOM, { CR } },
2993{ "bgectrl-",XLOCB(19,BOF,CBLT,528,1), XLBOCBBB_MASK, NOPOWER4, { CR } },
2994{ "bgectrl-",XLOCB(19,BOFM4,CBLT,528,1), XLBOCBBB_MASK, POWER4, { CR } },
2995{ "bgectrl+",XLOCB(19,BOFP,CBLT,528,1), XLBOCBBB_MASK, NOPOWER4, { CR } },
2996{ "bgectrl+",XLOCB(19,BOFP4,CBLT,528,1), XLBOCBBB_MASK, POWER4, { CR } },
2997{ "bnlctr", XLOCB(19,BOF,CBLT,528,0), XLBOCBBB_MASK, PPCCOM, { CR } },
2998{ "bnlctr-", XLOCB(19,BOF,CBLT,528,0), XLBOCBBB_MASK, NOPOWER4, { CR } },
2999{ "bnlctr-", XLOCB(19,BOFM4,CBLT,528,0), XLBOCBBB_MASK, POWER4, { CR } },
3000{ "bnlctr+", XLOCB(19,BOFP,CBLT,528,0), XLBOCBBB_MASK, NOPOWER4, { CR } },
3001{ "bnlctr+", XLOCB(19,BOFP4,CBLT,528,0), XLBOCBBB_MASK, POWER4, { CR } },
3002{ "bnlctrl", XLOCB(19,BOF,CBLT,528,1), XLBOCBBB_MASK, PPCCOM, { CR } },
3003{ "bnlctrl-",XLOCB(19,BOF,CBLT,528,1), XLBOCBBB_MASK, NOPOWER4, { CR } },
3004{ "bnlctrl-",XLOCB(19,BOFM4,CBLT,528,1), XLBOCBBB_MASK, POWER4, { CR } },
3005{ "bnlctrl+",XLOCB(19,BOFP,CBLT,528,1), XLBOCBBB_MASK, NOPOWER4, { CR } },
3006{ "bnlctrl+",XLOCB(19,BOFP4,CBLT,528,1), XLBOCBBB_MASK, POWER4, { CR } },
3007{ "blectr", XLOCB(19,BOF,CBGT,528,0), XLBOCBBB_MASK, PPCCOM, { CR } },
3008{ "blectr-", XLOCB(19,BOF,CBGT,528,0), XLBOCBBB_MASK, NOPOWER4, { CR } },
3009{ "blectr-", XLOCB(19,BOFM4,CBGT,528,0), XLBOCBBB_MASK, POWER4, { CR } },
3010{ "blectr+", XLOCB(19,BOFP,CBGT,528,0), XLBOCBBB_MASK, NOPOWER4, { CR } },
3011{ "blectr+", XLOCB(19,BOFP4,CBGT,528,0), XLBOCBBB_MASK, POWER4, { CR } },
3012{ "blectrl", XLOCB(19,BOF,CBGT,528,1), XLBOCBBB_MASK, PPCCOM, { CR } },
3013{ "blectrl-",XLOCB(19,BOF,CBGT,528,1), XLBOCBBB_MASK, NOPOWER4, { CR } },
3014{ "blectrl-",XLOCB(19,BOFM4,CBGT,528,1), XLBOCBBB_MASK, POWER4, { CR } },
3015{ "blectrl+",XLOCB(19,BOFP,CBGT,528,1), XLBOCBBB_MASK, NOPOWER4, { CR } },
3016{ "blectrl+",XLOCB(19,BOFP4,CBGT,528,1), XLBOCBBB_MASK, POWER4, { CR } },
3017{ "bngctr", XLOCB(19,BOF,CBGT,528,0), XLBOCBBB_MASK, PPCCOM, { CR } },
3018{ "bngctr-", XLOCB(19,BOF,CBGT,528,0), XLBOCBBB_MASK, NOPOWER4, { CR } },
3019{ "bngctr-", XLOCB(19,BOFM4,CBGT,528,0), XLBOCBBB_MASK, POWER4, { CR } },
3020{ "bngctr+", XLOCB(19,BOFP,CBGT,528,0), XLBOCBBB_MASK, NOPOWER4, { CR } },
3021{ "bngctr+", XLOCB(19,BOFP4,CBGT,528,0), XLBOCBBB_MASK, POWER4, { CR } },
3022{ "bngctrl", XLOCB(19,BOF,CBGT,528,1), XLBOCBBB_MASK, PPCCOM, { CR } },
3023{ "bngctrl-",XLOCB(19,BOF,CBGT,528,1), XLBOCBBB_MASK, NOPOWER4, { CR } },
3024{ "bngctrl-",XLOCB(19,BOFM4,CBGT,528,1), XLBOCBBB_MASK, POWER4, { CR } },
3025{ "bngctrl+",XLOCB(19,BOFP,CBGT,528,1), XLBOCBBB_MASK, NOPOWER4, { CR } },
3026{ "bngctrl+",XLOCB(19,BOFP4,CBGT,528,1), XLBOCBBB_MASK, POWER4, { CR } },
3027{ "bnectr", XLOCB(19,BOF,CBEQ,528,0), XLBOCBBB_MASK, PPCCOM, { CR } },
3028{ "bnectr-", XLOCB(19,BOF,CBEQ,528,0), XLBOCBBB_MASK, NOPOWER4, { CR } },
3029{ "bnectr-", XLOCB(19,BOFM4,CBEQ,528,0), XLBOCBBB_MASK, POWER4, { CR } },
3030{ "bnectr+", XLOCB(19,BOFP,CBEQ,528,0), XLBOCBBB_MASK, NOPOWER4, { CR } },
3031{ "bnectr+", XLOCB(19,BOFP4,CBEQ,528,0), XLBOCBBB_MASK, POWER4, { CR } },
3032{ "bnectrl", XLOCB(19,BOF,CBEQ,528,1), XLBOCBBB_MASK, PPCCOM, { CR } },
3033{ "bnectrl-",XLOCB(19,BOF,CBEQ,528,1), XLBOCBBB_MASK, NOPOWER4, { CR } },
3034{ "bnectrl-",XLOCB(19,BOFM4,CBEQ,528,1), XLBOCBBB_MASK, POWER4, { CR } },
3035{ "bnectrl+",XLOCB(19,BOFP,CBEQ,528,1), XLBOCBBB_MASK, NOPOWER4, { CR } },
3036{ "bnectrl+",XLOCB(19,BOFP4,CBEQ,528,1), XLBOCBBB_MASK, POWER4, { CR } },
3037{ "bnsctr", XLOCB(19,BOF,CBSO,528,0), XLBOCBBB_MASK, PPCCOM, { CR } },
3038{ "bnsctr-", XLOCB(19,BOF,CBSO,528,0), XLBOCBBB_MASK, NOPOWER4, { CR } },
3039{ "bnsctr-", XLOCB(19,BOFM4,CBSO,528,0), XLBOCBBB_MASK, POWER4, { CR } },
3040{ "bnsctr+", XLOCB(19,BOFP,CBSO,528,0), XLBOCBBB_MASK, NOPOWER4, { CR } },
3041{ "bnsctr+", XLOCB(19,BOFP4,CBSO,528,0), XLBOCBBB_MASK, POWER4, { CR } },
3042{ "bnsctrl", XLOCB(19,BOF,CBSO,528,1), XLBOCBBB_MASK, PPCCOM, { CR } },
3043{ "bnsctrl-",XLOCB(19,BOF,CBSO,528,1), XLBOCBBB_MASK, NOPOWER4, { CR } },
3044{ "bnsctrl-",XLOCB(19,BOFM4,CBSO,528,1), XLBOCBBB_MASK, POWER4, { CR } },
3045{ "bnsctrl+",XLOCB(19,BOFP,CBSO,528,1), XLBOCBBB_MASK, NOPOWER4, { CR } },
3046{ "bnsctrl+",XLOCB(19,BOFP4,CBSO,528,1), XLBOCBBB_MASK, POWER4, { CR } },
3047{ "bnuctr", XLOCB(19,BOF,CBSO,528,0), XLBOCBBB_MASK, PPCCOM, { CR } },
3048{ "bnuctr-", XLOCB(19,BOF,CBSO,528,0), XLBOCBBB_MASK, NOPOWER4, { CR } },
3049{ "bnuctr-", XLOCB(19,BOFM4,CBSO,528,0), XLBOCBBB_MASK, POWER4, { CR } },
3050{ "bnuctr+", XLOCB(19,BOFP,CBSO,528,0), XLBOCBBB_MASK, NOPOWER4, { CR } },
3051{ "bnuctr+", XLOCB(19,BOFP4,CBSO,528,0), XLBOCBBB_MASK, POWER4, { CR } },
3052{ "bnuctrl", XLOCB(19,BOF,CBSO,528,1), XLBOCBBB_MASK, PPCCOM, { CR } },
3053{ "bnuctrl-",XLOCB(19,BOF,CBSO,528,1), XLBOCBBB_MASK, NOPOWER4, { CR } },
3054{ "bnuctrl-",XLOCB(19,BOFM4,CBSO,528,1), XLBOCBBB_MASK, POWER4, { CR } },
3055{ "bnuctrl+",XLOCB(19,BOFP,CBSO,528,1), XLBOCBBB_MASK, NOPOWER4, { CR } },
3056{ "bnuctrl+",XLOCB(19,BOFP4,CBSO,528,1), XLBOCBBB_MASK, POWER4, { CR } },
3057{ "btctr", XLO(19,BOT,528,0), XLBOBB_MASK, PPCCOM, { BI } },
3058{ "btctr-", XLO(19,BOT,528,0), XLBOBB_MASK, NOPOWER4, { BI } },
3059{ "btctr-", XLO(19,BOTM4,528,0), XLBOBB_MASK, POWER4, { BI } },
3060{ "btctr+", XLO(19,BOTP,528,0), XLBOBB_MASK, NOPOWER4, { BI } },
3061{ "btctr+", XLO(19,BOTP4,528,0), XLBOBB_MASK, POWER4, { BI } },
3062{ "btctrl", XLO(19,BOT,528,1), XLBOBB_MASK, PPCCOM, { BI } },
3063{ "btctrl-", XLO(19,BOT,528,1), XLBOBB_MASK, NOPOWER4, { BI } },
3064{ "btctrl-", XLO(19,BOTM4,528,1), XLBOBB_MASK, POWER4, { BI } },
3065{ "btctrl+", XLO(19,BOTP,528,1), XLBOBB_MASK, NOPOWER4, { BI } },
3066{ "btctrl+", XLO(19,BOTP4,528,1), XLBOBB_MASK, POWER4, { BI } },
3067{ "bfctr", XLO(19,BOF,528,0), XLBOBB_MASK, PPCCOM, { BI } },
3068{ "bfctr-", XLO(19,BOF,528,0), XLBOBB_MASK, NOPOWER4, { BI } },
3069{ "bfctr-", XLO(19,BOFM4,528,0), XLBOBB_MASK, POWER4, { BI } },
3070{ "bfctr+", XLO(19,BOFP,528,0), XLBOBB_MASK, NOPOWER4, { BI } },
3071{ "bfctr+", XLO(19,BOFP4,528,0), XLBOBB_MASK, POWER4, { BI } },
3072{ "bfctrl", XLO(19,BOF,528,1), XLBOBB_MASK, PPCCOM, { BI } },
3073{ "bfctrl-", XLO(19,BOF,528,1), XLBOBB_MASK, NOPOWER4, { BI } },
3074{ "bfctrl-", XLO(19,BOFM4,528,1), XLBOBB_MASK, POWER4, { BI } },
3075{ "bfctrl+", XLO(19,BOFP,528,1), XLBOBB_MASK, NOPOWER4, { BI } },
3076{ "bfctrl+", XLO(19,BOFP4,528,1), XLBOBB_MASK, POWER4, { BI } },
3077{ "bcctr", XLLK(19,528,0), XLYBB_MASK, PPCCOM, { BO, BI } },
3078{ "bcctr-", XLYLK(19,528,0,0), XLYBB_MASK, PPCCOM, { BOE, BI } },
3079{ "bcctr+", XLYLK(19,528,1,0), XLYBB_MASK, PPCCOM, { BOE, BI } },
3080{ "bcctrl", XLLK(19,528,1), XLYBB_MASK, PPCCOM, { BO, BI } },
3081{ "bcctrl-", XLYLK(19,528,0,1), XLYBB_MASK, PPCCOM, { BOE, BI } },
3082{ "bcctrl+", XLYLK(19,528,1,1), XLYBB_MASK, PPCCOM, { BOE, BI } },
3083{ "bcc", XLLK(19,528,0), XLBB_MASK, PWRCOM, { BO, BI } },
3084{ "bccl", XLLK(19,528,1), XLBB_MASK, PWRCOM, { BO, BI } },
3085{ "bcctre", XLLK(19,529,0), XLYBB_MASK, BOOKE64, { BO, BI } },
3086{ "bcctrel", XLLK(19,529,1), XLYBB_MASK, BOOKE64, { BO, BI } },
3087
3088{ "rlwimi", M(20,0), M_MASK, PPCCOM, { RA,RS,SH,MBE,ME } },
3089{ "rlimi", M(20,0), M_MASK, PWRCOM, { RA,RS,SH,MBE,ME } },
3090
3091{ "rlwimi.", M(20,1), M_MASK, PPCCOM, { RA,RS,SH,MBE,ME } },
3092{ "rlimi.", M(20,1), M_MASK, PWRCOM, { RA,RS,SH,MBE,ME } },
3093
3094{ "rotlwi", MME(21,31,0), MMBME_MASK, PPCCOM, { RA, RS, SH } },
3095{ "clrlwi", MME(21,31,0), MSHME_MASK, PPCCOM, { RA, RS, MB } },
3096{ "rlwinm", M(21,0), M_MASK, PPCCOM, { RA,RS,SH,MBE,ME } },
3097{ "rlinm", M(21,0), M_MASK, PWRCOM, { RA,RS,SH,MBE,ME } },
3098{ "rotlwi.", MME(21,31,1), MMBME_MASK, PPCCOM, { RA,RS,SH } },
3099{ "clrlwi.", MME(21,31,1), MSHME_MASK, PPCCOM, { RA, RS, MB } },
3100{ "rlwinm.", M(21,1), M_MASK, PPCCOM, { RA,RS,SH,MBE,ME } },
3101{ "rlinm.", M(21,1), M_MASK, PWRCOM, { RA,RS,SH,MBE,ME } },
3102
3103{ "rlmi", M(22,0), M_MASK, M601, { RA,RS,RB,MBE,ME } },
3104{ "rlmi.", M(22,1), M_MASK, M601, { RA,RS,RB,MBE,ME } },
3105
3106{ "be", B(22,0,0), B_MASK, BOOKE64, { LI } },
3107{ "bel", B(22,0,1), B_MASK, BOOKE64, { LI } },
3108{ "bea", B(22,1,0), B_MASK, BOOKE64, { LIA } },
3109{ "bela", B(22,1,1), B_MASK, BOOKE64, { LIA } },
3110
3111{ "rotlw", MME(23,31,0), MMBME_MASK, PPCCOM, { RA, RS, RB } },
3112{ "rlwnm", M(23,0), M_MASK, PPCCOM, { RA,RS,RB,MBE,ME } },
3113{ "rlnm", M(23,0), M_MASK, PWRCOM, { RA,RS,RB,MBE,ME } },
3114{ "rotlw.", MME(23,31,1), MMBME_MASK, PPCCOM, { RA, RS, RB } },
3115{ "rlwnm.", M(23,1), M_MASK, PPCCOM, { RA,RS,RB,MBE,ME } },
3116{ "rlnm.", M(23,1), M_MASK, PWRCOM, { RA,RS,RB,MBE,ME } },
3117
3118{ "nop", OP(24), 0xffffffff, PPCCOM, { 0 } },
3119{ "ori", OP(24), OP_MASK, PPCCOM, { RA, RS, UI } },
3120{ "oril", OP(24), OP_MASK, PWRCOM, { RA, RS, UI } },
3121
3122{ "oris", OP(25), OP_MASK, PPCCOM, { RA, RS, UI } },
3123{ "oriu", OP(25), OP_MASK, PWRCOM, { RA, RS, UI } },
3124
3125{ "xori", OP(26), OP_MASK, PPCCOM, { RA, RS, UI } },
3126{ "xoril", OP(26), OP_MASK, PWRCOM, { RA, RS, UI } },
3127
3128{ "xoris", OP(27), OP_MASK, PPCCOM, { RA, RS, UI } },
3129{ "xoriu", OP(27), OP_MASK, PWRCOM, { RA, RS, UI } },
3130
3131{ "andi.", OP(28), OP_MASK, PPCCOM, { RA, RS, UI } },
3132{ "andil.", OP(28), OP_MASK, PWRCOM, { RA, RS, UI } },
3133
3134{ "andis.", OP(29), OP_MASK, PPCCOM, { RA, RS, UI } },
3135{ "andiu.", OP(29), OP_MASK, PWRCOM, { RA, RS, UI } },
3136
3137{ "rotldi", MD(30,0,0), MDMB_MASK, PPC64, { RA, RS, SH6 } },
3138{ "clrldi", MD(30,0,0), MDSH_MASK, PPC64, { RA, RS, MB6 } },
3139{ "rldicl", MD(30,0,0), MD_MASK, PPC64, { RA, RS, SH6, MB6 } },
3140{ "rotldi.", MD(30,0,1), MDMB_MASK, PPC64, { RA, RS, SH6 } },
3141{ "clrldi.", MD(30,0,1), MDSH_MASK, PPC64, { RA, RS, MB6 } },
3142{ "rldicl.", MD(30,0,1), MD_MASK, PPC64, { RA, RS, SH6, MB6 } },
3143
3144{ "rldicr", MD(30,1,0), MD_MASK, PPC64, { RA, RS, SH6, ME6 } },
3145{ "rldicr.", MD(30,1,1), MD_MASK, PPC64, { RA, RS, SH6, ME6 } },
3146
3147{ "rldic", MD(30,2,0), MD_MASK, PPC64, { RA, RS, SH6, MB6 } },
3148{ "rldic.", MD(30,2,1), MD_MASK, PPC64, { RA, RS, SH6, MB6 } },
3149
3150{ "rldimi", MD(30,3,0), MD_MASK, PPC64, { RA, RS, SH6, MB6 } },
3151{ "rldimi.", MD(30,3,1), MD_MASK, PPC64, { RA, RS, SH6, MB6 } },
3152
3153{ "rotld", MDS(30,8,0), MDSMB_MASK, PPC64, { RA, RS, RB } },
3154{ "rldcl", MDS(30,8,0), MDS_MASK, PPC64, { RA, RS, RB, MB6 } },
3155{ "rotld.", MDS(30,8,1), MDSMB_MASK, PPC64, { RA, RS, RB } },
3156{ "rldcl.", MDS(30,8,1), MDS_MASK, PPC64, { RA, RS, RB, MB6 } },
3157
3158{ "rldcr", MDS(30,9,0), MDS_MASK, PPC64, { RA, RS, RB, ME6 } },
3159{ "rldcr.", MDS(30,9,1), MDS_MASK, PPC64, { RA, RS, RB, ME6 } },
3160
3161{ "cmpw", XCMPL(31,0,0), XCMPL_MASK, PPCCOM, { OBF, RA, RB } },
3162{ "cmpd", XCMPL(31,0,1), XCMPL_MASK, PPC64, { OBF, RA, RB } },
3163{ "cmp", X(31,0), XCMP_MASK, PPC, { BF, L, RA, RB } },
3164{ "cmp", X(31,0), XCMPL_MASK, PWRCOM, { BF, RA, RB } },
3165
3166{ "twlgt", XTO(31,4,TOLGT), XTO_MASK, PPCCOM, { RA, RB } },
3167{ "tlgt", XTO(31,4,TOLGT), XTO_MASK, PWRCOM, { RA, RB } },
3168{ "twllt", XTO(31,4,TOLLT), XTO_MASK, PPCCOM, { RA, RB } },
3169{ "tllt", XTO(31,4,TOLLT), XTO_MASK, PWRCOM, { RA, RB } },
3170{ "tweq", XTO(31,4,TOEQ), XTO_MASK, PPCCOM, { RA, RB } },
3171{ "teq", XTO(31,4,TOEQ), XTO_MASK, PWRCOM, { RA, RB } },
3172{ "twlge", XTO(31,4,TOLGE), XTO_MASK, PPCCOM, { RA, RB } },
3173{ "tlge", XTO(31,4,TOLGE), XTO_MASK, PWRCOM, { RA, RB } },
3174{ "twlnl", XTO(31,4,TOLNL), XTO_MASK, PPCCOM, { RA, RB } },
3175{ "tlnl", XTO(31,4,TOLNL), XTO_MASK, PWRCOM, { RA, RB } },
3176{ "twlle", XTO(31,4,TOLLE), XTO_MASK, PPCCOM, { RA, RB } },
3177{ "tlle", XTO(31,4,TOLLE), XTO_MASK, PWRCOM, { RA, RB } },
3178{ "twlng", XTO(31,4,TOLNG), XTO_MASK, PPCCOM, { RA, RB } },
3179{ "tlng", XTO(31,4,TOLNG), XTO_MASK, PWRCOM, { RA, RB } },
3180{ "twgt", XTO(31,4,TOGT), XTO_MASK, PPCCOM, { RA, RB } },
3181{ "tgt", XTO(31,4,TOGT), XTO_MASK, PWRCOM, { RA, RB } },
3182{ "twge", XTO(31,4,TOGE), XTO_MASK, PPCCOM, { RA, RB } },
3183{ "tge", XTO(31,4,TOGE), XTO_MASK, PWRCOM, { RA, RB } },
3184{ "twnl", XTO(31,4,TONL), XTO_MASK, PPCCOM, { RA, RB } },
3185{ "tnl", XTO(31,4,TONL), XTO_MASK, PWRCOM, { RA, RB } },
3186{ "twlt", XTO(31,4,TOLT), XTO_MASK, PPCCOM, { RA, RB } },
3187{ "tlt", XTO(31,4,TOLT), XTO_MASK, PWRCOM, { RA, RB } },
3188{ "twle", XTO(31,4,TOLE), XTO_MASK, PPCCOM, { RA, RB } },
3189{ "tle", XTO(31,4,TOLE), XTO_MASK, PWRCOM, { RA, RB } },
3190{ "twng", XTO(31,4,TONG), XTO_MASK, PPCCOM, { RA, RB } },
3191{ "tng", XTO(31,4,TONG), XTO_MASK, PWRCOM, { RA, RB } },
3192{ "twne", XTO(31,4,TONE), XTO_MASK, PPCCOM, { RA, RB } },
3193{ "tne", XTO(31,4,TONE), XTO_MASK, PWRCOM, { RA, RB } },
3194{ "trap", XTO(31,4,TOU), 0xffffffff, PPCCOM, { 0 } },
3195{ "tw", X(31,4), X_MASK, PPCCOM, { TO, RA, RB } },
3196{ "t", X(31,4), X_MASK, PWRCOM, { TO, RA, RB } },
3197
3198{ "subfc", XO(31,8,0,0), XO_MASK, PPCCOM, { RT, RA, RB } },
3199{ "sf", XO(31,8,0,0), XO_MASK, PWRCOM, { RT, RA, RB } },
3200{ "subc", XO(31,8,0,0), XO_MASK, PPC, { RT, RB, RA } },
3201{ "subfc.", XO(31,8,0,1), XO_MASK, PPCCOM, { RT, RA, RB } },
3202{ "sf.", XO(31,8,0,1), XO_MASK, PWRCOM, { RT, RA, RB } },
3203{ "subc.", XO(31,8,0,1), XO_MASK, PPCCOM, { RT, RB, RA } },
3204{ "subfco", XO(31,8,1,0), XO_MASK, PPCCOM, { RT, RA, RB } },
3205{ "sfo", XO(31,8,1,0), XO_MASK, PWRCOM, { RT, RA, RB } },
3206{ "subco", XO(31,8,1,0), XO_MASK, PPC, { RT, RB, RA } },
3207{ "subfco.", XO(31,8,1,1), XO_MASK, PPCCOM, { RT, RA, RB } },
3208{ "sfo.", XO(31,8,1,1), XO_MASK, PWRCOM, { RT, RA, RB } },
3209{ "subco.", XO(31,8,1,1), XO_MASK, PPC, { RT, RB, RA } },
3210
3211{ "mulhdu", XO(31,9,0,0), XO_MASK, PPC64, { RT, RA, RB } },
3212{ "mulhdu.", XO(31,9,0,1), XO_MASK, PPC64, { RT, RA, RB } },
3213
3214{ "addc", XO(31,10,0,0), XO_MASK, PPCCOM, { RT, RA, RB } },
3215{ "a", XO(31,10,0,0), XO_MASK, PWRCOM, { RT, RA, RB } },
3216{ "addc.", XO(31,10,0,1), XO_MASK, PPCCOM, { RT, RA, RB } },
3217{ "a.", XO(31,10,0,1), XO_MASK, PWRCOM, { RT, RA, RB } },
3218{ "addco", XO(31,10,1,0), XO_MASK, PPCCOM, { RT, RA, RB } },
3219{ "ao", XO(31,10,1,0), XO_MASK, PWRCOM, { RT, RA, RB } },
3220{ "addco.", XO(31,10,1,1), XO_MASK, PPCCOM, { RT, RA, RB } },
3221{ "ao.", XO(31,10,1,1), XO_MASK, PWRCOM, { RT, RA, RB } },
3222
3223{ "mulhwu", XO(31,11,0,0), XO_MASK, PPC, { RT, RA, RB } },
3224{ "mulhwu.", XO(31,11,0,1), XO_MASK, PPC, { RT, RA, RB } },
3225
3226{ "isellt", X(31,15), X_MASK, PPCISEL, { RT, RA, RB } },
3227{ "iselgt", X(31,47), X_MASK, PPCISEL, { RT, RA, RB } },
3228{ "iseleq", X(31,79), X_MASK, PPCISEL, { RT, RA, RB } },
3229{ "isel", XISEL(31,15), XISEL_MASK, PPCISEL, { RT, RA, RB, CRB } },
3230
3231{ "mfcr", X(31,19), XRARB_MASK, NOPOWER4, { RT } },
3232{ "mfcr", X(31,19), XFXFXM_MASK, POWER4, { RT, FXM4 } },
3233
3234{ "lwarx", X(31,20), X_MASK, PPC, { RT, RA, RB } },
3235
3236{ "ldx", X(31,21), X_MASK, PPC64, { RT, RA, RB } },
3237
3238{ "icbt", X(31,22), X_MASK, BOOKE, { CT, RA, RB } },
3239{ "icbt", X(31,262), XRT_MASK, PPC403, { RA, RB } },
3240
3241{ "lwzx", X(31,23), X_MASK, PPCCOM, { RT, RA, RB } },
3242{ "lx", X(31,23), X_MASK, PWRCOM, { RT, RA, RB } },
3243
3244{ "slw", XRC(31,24,0), X_MASK, PPCCOM, { RA, RS, RB } },
3245{ "sl", XRC(31,24,0), X_MASK, PWRCOM, { RA, RS, RB } },
3246{ "slw.", XRC(31,24,1), X_MASK, PPCCOM, { RA, RS, RB } },
3247{ "sl.", XRC(31,24,1), X_MASK, PWRCOM, { RA, RS, RB } },
3248
3249{ "cntlzw", XRC(31,26,0), XRB_MASK, PPCCOM, { RA, RS } },
3250{ "cntlz", XRC(31,26,0), XRB_MASK, PWRCOM, { RA, RS } },
3251{ "cntlzw.", XRC(31,26,1), XRB_MASK, PPCCOM, { RA, RS } },
3252{ "cntlz.", XRC(31,26,1), XRB_MASK, PWRCOM, { RA, RS } },
3253
3254{ "sld", XRC(31,27,0), X_MASK, PPC64, { RA, RS, RB } },
3255{ "sld.", XRC(31,27,1), X_MASK, PPC64, { RA, RS, RB } },
3256
3257{ "and", XRC(31,28,0), X_MASK, COM, { RA, RS, RB } },
3258{ "and.", XRC(31,28,1), X_MASK, COM, { RA, RS, RB } },
3259
3260{ "maskg", XRC(31,29,0), X_MASK, M601, { RA, RS, RB } },
3261{ "maskg.", XRC(31,29,1), X_MASK, M601, { RA, RS, RB } },
3262
3263{ "icbte", X(31,30), X_MASK, BOOKE64, { CT, RA, RB } },
3264
3265{ "lwzxe", X(31,31), X_MASK, BOOKE64, { RT, RA, RB } },
3266
3267{ "cmplw", XCMPL(31,32,0), XCMPL_MASK, PPCCOM, { OBF, RA, RB } },
3268{ "cmpld", XCMPL(31,32,1), XCMPL_MASK, PPC64, { OBF, RA, RB } },
3269{ "cmpl", X(31,32), XCMP_MASK, PPC, { BF, L, RA, RB } },
3270{ "cmpl", X(31,32), XCMPL_MASK, PWRCOM, { BF, RA, RB } },
3271
3272{ "subf", XO(31,40,0,0), XO_MASK, PPC, { RT, RA, RB } },
3273{ "sub", XO(31,40,0,0), XO_MASK, PPC, { RT, RB, RA } },
3274{ "subf.", XO(31,40,0,1), XO_MASK, PPC, { RT, RA, RB } },
3275{ "sub.", XO(31,40,0,1), XO_MASK, PPC, { RT, RB, RA } },
3276{ "subfo", XO(31,40,1,0), XO_MASK, PPC, { RT, RA, RB } },
3277{ "subo", XO(31,40,1,0), XO_MASK, PPC, { RT, RB, RA } },
3278{ "subfo.", XO(31,40,1,1), XO_MASK, PPC, { RT, RA, RB } },
3279{ "subo.", XO(31,40,1,1), XO_MASK, PPC, { RT, RB, RA } },
3280
3281{ "ldux", X(31,53), X_MASK, PPC64, { RT, RAL, RB } },
3282
3283{ "dcbst", X(31,54), XRT_MASK, PPC, { RA, RB } },
3284
3285{ "lwzux", X(31,55), X_MASK, PPCCOM, { RT, RAL, RB } },
3286{ "lux", X(31,55), X_MASK, PWRCOM, { RT, RA, RB } },
3287
3288{ "dcbste", X(31,62), XRT_MASK, BOOKE64, { RA, RB } },
3289
3290{ "lwzuxe", X(31,63), X_MASK, BOOKE64, { RT, RAL, RB } },
3291
3292{ "cntlzd", XRC(31,58,0), XRB_MASK, PPC64, { RA, RS } },
3293{ "cntlzd.", XRC(31,58,1), XRB_MASK, PPC64, { RA, RS } },
3294
3295{ "andc", XRC(31,60,0), X_MASK, COM, { RA, RS, RB } },
3296{ "andc.", XRC(31,60,1), X_MASK, COM, { RA, RS, RB } },
3297
3298{ "tdlgt", XTO(31,68,TOLGT), XTO_MASK, PPC64, { RA, RB } },
3299{ "tdllt", XTO(31,68,TOLLT), XTO_MASK, PPC64, { RA, RB } },
3300{ "tdeq", XTO(31,68,TOEQ), XTO_MASK, PPC64, { RA, RB } },
3301{ "tdlge", XTO(31,68,TOLGE), XTO_MASK, PPC64, { RA, RB } },
3302{ "tdlnl", XTO(31,68,TOLNL), XTO_MASK, PPC64, { RA, RB } },
3303{ "tdlle", XTO(31,68,TOLLE), XTO_MASK, PPC64, { RA, RB } },
3304{ "tdlng", XTO(31,68,TOLNG), XTO_MASK, PPC64, { RA, RB } },
3305{ "tdgt", XTO(31,68,TOGT), XTO_MASK, PPC64, { RA, RB } },
3306{ "tdge", XTO(31,68,TOGE), XTO_MASK, PPC64, { RA, RB } },
3307{ "tdnl", XTO(31,68,TONL), XTO_MASK, PPC64, { RA, RB } },
3308{ "tdlt", XTO(31,68,TOLT), XTO_MASK, PPC64, { RA, RB } },
3309{ "tdle", XTO(31,68,TOLE), XTO_MASK, PPC64, { RA, RB } },
3310{ "tdng", XTO(31,68,TONG), XTO_MASK, PPC64, { RA, RB } },
3311{ "tdne", XTO(31,68,TONE), XTO_MASK, PPC64, { RA, RB } },
3312{ "td", X(31,68), X_MASK, PPC64, { TO, RA, RB } },
3313
3314{ "mulhd", XO(31,73,0,0), XO_MASK, PPC64, { RT, RA, RB } },
3315{ "mulhd.", XO(31,73,0,1), XO_MASK, PPC64, { RT, RA, RB } },
3316
3317{ "mulhw", XO(31,75,0,0), XO_MASK, PPC, { RT, RA, RB } },
3318{ "mulhw.", XO(31,75,0,1), XO_MASK, PPC, { RT, RA, RB } },
3319
3320{ "dlmzb", XRC(31,78,0), X_MASK, PPC403|PPC440, { RA, RS, RB } },
3321{ "dlmzb.", XRC(31,78,1), X_MASK, PPC403|PPC440, { RA, RS, RB } },
3322
3323{ "mtsrd", X(31,82), XRB_MASK|(1<<20), PPC64, { SR, RS } },
3324
3325{ "mfmsr", X(31,83), XRARB_MASK, COM, { RT } },
3326
3327{ "ldarx", X(31,84), X_MASK, PPC64, { RT, RA, RB } },
3328
3329{ "dcbf", X(31,86), XRT_MASK, PPC, { RA, RB } },
3330
3331{ "lbzx", X(31,87), X_MASK, COM, { RT, RA, RB } },
3332
3333{ "dcbfe", X(31,94), XRT_MASK, BOOKE64, { RA, RB } },
3334
3335{ "lbzxe", X(31,95), X_MASK, BOOKE64, { RT, RA, RB } },
3336
3337{ "neg", XO(31,104,0,0), XORB_MASK, COM, { RT, RA } },
3338{ "neg.", XO(31,104,0,1), XORB_MASK, COM, { RT, RA } },
3339{ "nego", XO(31,104,1,0), XORB_MASK, COM, { RT, RA } },
3340{ "nego.", XO(31,104,1,1), XORB_MASK, COM, { RT, RA } },
3341
3342{ "mul", XO(31,107,0,0), XO_MASK, M601, { RT, RA, RB } },
3343{ "mul.", XO(31,107,0,1), XO_MASK, M601, { RT, RA, RB } },
3344{ "mulo", XO(31,107,1,0), XO_MASK, M601, { RT, RA, RB } },
3345{ "mulo.", XO(31,107,1,1), XO_MASK, M601, { RT, RA, RB } },
3346
3347{ "mtsrdin", X(31,114), XRA_MASK, PPC64, { RS, RB } },
3348
3349{ "clf", X(31,118), XTO_MASK, POWER, { RA, RB } },
3350
3351{ "lbzux", X(31,119), X_MASK, COM, { RT, RAL, RB } },
3352
3353{ "not", XRC(31,124,0), X_MASK, COM, { RA, RS, RBS } },
3354{ "nor", XRC(31,124,0), X_MASK, COM, { RA, RS, RB } },
3355{ "not.", XRC(31,124,1), X_MASK, COM, { RA, RS, RBS } },
3356{ "nor.", XRC(31,124,1), X_MASK, COM, { RA, RS, RB } },
3357
3358{ "lwarxe", X(31,126), X_MASK, BOOKE64, { RT, RA, RB } },
3359
3360{ "lbzuxe", X(31,127), X_MASK, BOOKE64, { RT, RAL, RB } },
3361
3362{ "wrtee", X(31,131), XRARB_MASK, PPC403 | BOOKE, { RS } },
3363
3364{ "dcbtstls",X(31,134), X_MASK, PPCCHLK, { CT, RA, RB }},
3365
3366{ "subfe", XO(31,136,0,0), XO_MASK, PPCCOM, { RT, RA, RB } },
3367{ "sfe", XO(31,136,0,0), XO_MASK, PWRCOM, { RT, RA, RB } },
3368{ "subfe.", XO(31,136,0,1), XO_MASK, PPCCOM, { RT, RA, RB } },
3369{ "sfe.", XO(31,136,0,1), XO_MASK, PWRCOM, { RT, RA, RB } },
3370{ "subfeo", XO(31,136,1,0), XO_MASK, PPCCOM, { RT, RA, RB } },
3371{ "sfeo", XO(31,136,1,0), XO_MASK, PWRCOM, { RT, RA, RB } },
3372{ "subfeo.", XO(31,136,1,1), XO_MASK, PPCCOM, { RT, RA, RB } },
3373{ "sfeo.", XO(31,136,1,1), XO_MASK, PWRCOM, { RT, RA, RB } },
3374
3375{ "adde", XO(31,138,0,0), XO_MASK, PPCCOM, { RT, RA, RB } },
3376{ "ae", XO(31,138,0,0), XO_MASK, PWRCOM, { RT, RA, RB } },
3377{ "adde.", XO(31,138,0,1), XO_MASK, PPCCOM, { RT, RA, RB } },
3378{ "ae.", XO(31,138,0,1), XO_MASK, PWRCOM, { RT, RA, RB } },
3379{ "addeo", XO(31,138,1,0), XO_MASK, PPCCOM, { RT, RA, RB } },
3380{ "aeo", XO(31,138,1,0), XO_MASK, PWRCOM, { RT, RA, RB } },
3381{ "addeo.", XO(31,138,1,1), XO_MASK, PPCCOM, { RT, RA, RB } },
3382{ "aeo.", XO(31,138,1,1), XO_MASK, PWRCOM, { RT, RA, RB } },
3383
3384{ "dcbtstlse",X(31,142),X_MASK, PPCCHLK64, { CT, RA, RB }},
3385
3386{ "mtcr", XFXM(31,144,0xff), XRARB_MASK, COM, { RS }},
3387{ "mtcrf", X(31,144), XFXFXM_MASK, COM, { FXM, RS } },
3388
3389{ "mtmsr", X(31,146), XRARB_MASK, COM, { RS } },
3390
3391{ "stdx", X(31,149), X_MASK, PPC64, { RS, RA, RB } },
3392
3393{ "stwcx.", XRC(31,150,1), X_MASK, PPC, { RS, RA, RB } },
3394
3395{ "stwx", X(31,151), X_MASK, PPCCOM, { RS, RA, RB } },
3396{ "stx", X(31,151), X_MASK, PWRCOM, { RS, RA, RB } },
3397
3398{ "stwcxe.", XRC(31,158,1), X_MASK, BOOKE64, { RS, RA, RB } },
3399
3400{ "stwxe", X(31,159), X_MASK, BOOKE64, { RS, RA, RB } },
3401
3402{ "slq", XRC(31,152,0), X_MASK, M601, { RA, RS, RB } },
3403{ "slq.", XRC(31,152,1), X_MASK, M601, { RA, RS, RB } },
3404
3405{ "sle", XRC(31,153,0), X_MASK, M601, { RA, RS, RB } },
3406{ "sle.", XRC(31,153,1), X_MASK, M601, { RA, RS, RB } },
3407
3408{ "wrteei", X(31,163), XE_MASK, PPC403 | BOOKE, { E } },
3409
3410{ "dcbtls", X(31,166), X_MASK, PPCCHLK, { CT, RA, RB }},
3411{ "dcbtlse", X(31,174), X_MASK, PPCCHLK64, { CT, RA, RB }},
3412
3413{ "mtmsrd", X(31,178), XRLARB_MASK, PPC64, { RS, MTMSRD_L } },
3414
3415{ "stdux", X(31,181), X_MASK, PPC64, { RS, RAS, RB } },
3416
3417{ "stwux", X(31,183), X_MASK, PPCCOM, { RS, RAS, RB } },
3418{ "stux", X(31,183), X_MASK, PWRCOM, { RS, RA, RB } },
3419
3420{ "sliq", XRC(31,184,0), X_MASK, M601, { RA, RS, SH } },
3421{ "sliq.", XRC(31,184,1), X_MASK, M601, { RA, RS, SH } },
3422
3423{ "stwuxe", X(31,191), X_MASK, BOOKE64, { RS, RAS, RB } },
3424
3425{ "subfze", XO(31,200,0,0), XORB_MASK, PPCCOM, { RT, RA } },
3426{ "sfze", XO(31,200,0,0), XORB_MASK, PWRCOM, { RT, RA } },
3427{ "subfze.", XO(31,200,0,1), XORB_MASK, PPCCOM, { RT, RA } },
3428{ "sfze.", XO(31,200,0,1), XORB_MASK, PWRCOM, { RT, RA } },
3429{ "subfzeo", XO(31,200,1,0), XORB_MASK, PPCCOM, { RT, RA } },
3430{ "sfzeo", XO(31,200,1,0), XORB_MASK, PWRCOM, { RT, RA } },
3431{ "subfzeo.",XO(31,200,1,1), XORB_MASK, PPCCOM, { RT, RA } },
3432{ "sfzeo.", XO(31,200,1,1), XORB_MASK, PWRCOM, { RT, RA } },
3433
3434{ "addze", XO(31,202,0,0), XORB_MASK, PPCCOM, { RT, RA } },
3435{ "aze", XO(31,202,0,0), XORB_MASK, PWRCOM, { RT, RA } },
3436{ "addze.", XO(31,202,0,1), XORB_MASK, PPCCOM, { RT, RA } },
3437{ "aze.", XO(31,202,0,1), XORB_MASK, PWRCOM, { RT, RA } },
3438{ "addzeo", XO(31,202,1,0), XORB_MASK, PPCCOM, { RT, RA } },
3439{ "azeo", XO(31,202,1,0), XORB_MASK, PWRCOM, { RT, RA } },
3440{ "addzeo.", XO(31,202,1,1), XORB_MASK, PPCCOM, { RT, RA } },
3441{ "azeo.", XO(31,202,1,1), XORB_MASK, PWRCOM, { RT, RA } },
3442
3443{ "mtsr", X(31,210), XRB_MASK|(1<<20), COM32, { SR, RS } },
3444
3445{ "stdcx.", XRC(31,214,1), X_MASK, PPC64, { RS, RA, RB } },
3446
3447{ "stbx", X(31,215), X_MASK, COM, { RS, RA, RB } },
3448
3449{ "sllq", XRC(31,216,0), X_MASK, M601, { RA, RS, RB } },
3450{ "sllq.", XRC(31,216,1), X_MASK, M601, { RA, RS, RB } },
3451
3452{ "sleq", XRC(31,217,0), X_MASK, M601, { RA, RS, RB } },
3453{ "sleq.", XRC(31,217,1), X_MASK, M601, { RA, RS, RB } },
3454
3455{ "stbxe", X(31,223), X_MASK, BOOKE64, { RS, RA, RB } },
3456
3457{ "icblc", X(31,230), X_MASK, PPCCHLK, { CT, RA, RB }},
3458
3459{ "subfme", XO(31,232,0,0), XORB_MASK, PPCCOM, { RT, RA } },
3460{ "sfme", XO(31,232,0,0), XORB_MASK, PWRCOM, { RT, RA } },
3461{ "subfme.", XO(31,232,0,1), XORB_MASK, PPCCOM, { RT, RA } },
3462{ "sfme.", XO(31,232,0,1), XORB_MASK, PWRCOM, { RT, RA } },
3463{ "subfmeo", XO(31,232,1,0), XORB_MASK, PPCCOM, { RT, RA } },
3464{ "sfmeo", XO(31,232,1,0), XORB_MASK, PWRCOM, { RT, RA } },
3465{ "subfmeo.",XO(31,232,1,1), XORB_MASK, PPCCOM, { RT, RA } },
3466{ "sfmeo.", XO(31,232,1,1), XORB_MASK, PWRCOM, { RT, RA } },
3467
3468{ "mulld", XO(31,233,0,0), XO_MASK, PPC64, { RT, RA, RB } },
3469{ "mulld.", XO(31,233,0,1), XO_MASK, PPC64, { RT, RA, RB } },
3470{ "mulldo", XO(31,233,1,0), XO_MASK, PPC64, { RT, RA, RB } },
3471{ "mulldo.", XO(31,233,1,1), XO_MASK, PPC64, { RT, RA, RB } },
3472
3473{ "addme", XO(31,234,0,0), XORB_MASK, PPCCOM, { RT, RA } },
3474{ "ame", XO(31,234,0,0), XORB_MASK, PWRCOM, { RT, RA } },
3475{ "addme.", XO(31,234,0,1), XORB_MASK, PPCCOM, { RT, RA } },
3476{ "ame.", XO(31,234,0,1), XORB_MASK, PWRCOM, { RT, RA } },
3477{ "addmeo", XO(31,234,1,0), XORB_MASK, PPCCOM, { RT, RA } },
3478{ "ameo", XO(31,234,1,0), XORB_MASK, PWRCOM, { RT, RA } },
3479{ "addmeo.", XO(31,234,1,1), XORB_MASK, PPCCOM, { RT, RA } },
3480{ "ameo.", XO(31,234,1,1), XORB_MASK, PWRCOM, { RT, RA } },
3481
3482{ "mullw", XO(31,235,0,0), XO_MASK, PPCCOM, { RT, RA, RB } },
3483{ "muls", XO(31,235,0,0), XO_MASK, PWRCOM, { RT, RA, RB } },
3484{ "mullw.", XO(31,235,0,1), XO_MASK, PPCCOM, { RT, RA, RB } },
3485{ "muls.", XO(31,235,0,1), XO_MASK, PWRCOM, { RT, RA, RB } },
3486{ "mullwo", XO(31,235,1,0), XO_MASK, PPCCOM, { RT, RA, RB } },
3487{ "mulso", XO(31,235,1,0), XO_MASK, PWRCOM, { RT, RA, RB } },
3488{ "mullwo.", XO(31,235,1,1), XO_MASK, PPCCOM, { RT, RA, RB } },
3489{ "mulso.", XO(31,235,1,1), XO_MASK, PWRCOM, { RT, RA, RB } },
3490
3491{ "icblce", X(31,238), X_MASK, PPCCHLK64, { CT, RA, RB }},
3492{ "mtsrin", X(31,242), XRA_MASK, PPC32, { RS, RB } },
3493{ "mtsri", X(31,242), XRA_MASK, POWER32, { RS, RB } },
3494
3495{ "dcbtst", X(31,246), XRT_MASK, PPC, { CT, RA, RB } },
3496
3497{ "stbux", X(31,247), X_MASK, COM, { RS, RAS, RB } },
3498
3499{ "slliq", XRC(31,248,0), X_MASK, M601, { RA, RS, SH } },
3500{ "slliq.", XRC(31,248,1), X_MASK, M601, { RA, RS, SH } },
3501
3502{ "dcbtste", X(31,253), X_MASK, BOOKE64, { CT, RA, RB } },
3503
3504{ "stbuxe", X(31,255), X_MASK, BOOKE64, { RS, RAS, RB } },
3505
3506{ "mfdcrx", X(31,259), X_MASK, BOOKE, { RS, RA } },
3507
3508{ "doz", XO(31,264,0,0), XO_MASK, M601, { RT, RA, RB } },
3509{ "doz.", XO(31,264,0,1), XO_MASK, M601, { RT, RA, RB } },
3510{ "dozo", XO(31,264,1,0), XO_MASK, M601, { RT, RA, RB } },
3511{ "dozo.", XO(31,264,1,1), XO_MASK, M601, { RT, RA, RB } },
3512
3513{ "add", XO(31,266,0,0), XO_MASK, PPCCOM, { RT, RA, RB } },
3514{ "cax", XO(31,266,0,0), XO_MASK, PWRCOM, { RT, RA, RB } },
3515{ "add.", XO(31,266,0,1), XO_MASK, PPCCOM, { RT, RA, RB } },
3516{ "cax.", XO(31,266,0,1), XO_MASK, PWRCOM, { RT, RA, RB } },
3517{ "addo", XO(31,266,1,0), XO_MASK, PPCCOM, { RT, RA, RB } },
3518{ "caxo", XO(31,266,1,0), XO_MASK, PWRCOM, { RT, RA, RB } },
3519{ "addo.", XO(31,266,1,1), XO_MASK, PPCCOM, { RT, RA, RB } },
3520{ "caxo.", XO(31,266,1,1), XO_MASK, PWRCOM, { RT, RA, RB } },
3521
3522{ "tlbiel", X(31,274), XRTRA_MASK, POWER4, { RB } },
3523
3524{ "mfapidi", X(31,275), X_MASK, BOOKE, { RT, RA } },
3525
3526{ "lscbx", XRC(31,277,0), X_MASK, M601, { RT, RA, RB } },
3527{ "lscbx.", XRC(31,277,1), X_MASK, M601, { RT, RA, RB } },
3528
3529{ "dcbt", X(31,278), XRT_MASK, PPC, { CT, RA, RB } },
3530
3531{ "lhzx", X(31,279), X_MASK, COM, { RT, RA, RB } },
3532
3533{ "eqv", XRC(31,284,0), X_MASK, COM, { RA, RS, RB } },
3534{ "eqv.", XRC(31,284,1), X_MASK, COM, { RA, RS, RB } },
3535
3536{ "dcbte", X(31,286), X_MASK, BOOKE64, { CT, RA, RB } },
3537
3538{ "lhzxe", X(31,287), X_MASK, BOOKE64, { RT, RA, RB } },
3539
3540{ "tlbie", X(31,306), XRTLRA_MASK, PPC, { RB, L } },
3541{ "tlbi", X(31,306), XRT_MASK, POWER, { RA, RB } },
3542
3543{ "eciwx", X(31,310), X_MASK, PPC, { RT, RA, RB } },
3544
3545{ "lhzux", X(31,311), X_MASK, COM, { RT, RAL, RB } },
3546
3547{ "xor", XRC(31,316,0), X_MASK, COM, { RA, RS, RB } },
3548{ "xor.", XRC(31,316,1), X_MASK, COM, { RA, RS, RB } },
3549
3550{ "lhzuxe", X(31,319), X_MASK, BOOKE64, { RT, RAL, RB } },
3551
3552{ "mfexisr", XSPR(31,323,64), XSPR_MASK, PPC403, { RT } },
3553{ "mfexier", XSPR(31,323,66), XSPR_MASK, PPC403, { RT } },
3554{ "mfbr0", XSPR(31,323,128), XSPR_MASK, PPC403, { RT } },
3555{ "mfbr1", XSPR(31,323,129), XSPR_MASK, PPC403, { RT } },
3556{ "mfbr2", XSPR(31,323,130), XSPR_MASK, PPC403, { RT } },
3557{ "mfbr3", XSPR(31,323,131), XSPR_MASK, PPC403, { RT } },
3558{ "mfbr4", XSPR(31,323,132), XSPR_MASK, PPC403, { RT } },
3559{ "mfbr5", XSPR(31,323,133), XSPR_MASK, PPC403, { RT } },
3560{ "mfbr6", XSPR(31,323,134), XSPR_MASK, PPC403, { RT } },
3561{ "mfbr7", XSPR(31,323,135), XSPR_MASK, PPC403, { RT } },
3562{ "mfbear", XSPR(31,323,144), XSPR_MASK, PPC403, { RT } },
3563{ "mfbesr", XSPR(31,323,145), XSPR_MASK, PPC403, { RT } },
3564{ "mfiocr", XSPR(31,323,160), XSPR_MASK, PPC403, { RT } },
3565{ "mfdmacr0", XSPR(31,323,192), XSPR_MASK, PPC403, { RT } },
3566{ "mfdmact0", XSPR(31,323,193), XSPR_MASK, PPC403, { RT } },
3567{ "mfdmada0", XSPR(31,323,194), XSPR_MASK, PPC403, { RT } },
3568{ "mfdmasa0", XSPR(31,323,195), XSPR_MASK, PPC403, { RT } },
3569{ "mfdmacc0", XSPR(31,323,196), XSPR_MASK, PPC403, { RT } },
3570{ "mfdmacr1", XSPR(31,323,200), XSPR_MASK, PPC403, { RT } },
3571{ "mfdmact1", XSPR(31,323,201), XSPR_MASK, PPC403, { RT } },
3572{ "mfdmada1", XSPR(31,323,202), XSPR_MASK, PPC403, { RT } },
3573{ "mfdmasa1", XSPR(31,323,203), XSPR_MASK, PPC403, { RT } },
3574{ "mfdmacc1", XSPR(31,323,204), XSPR_MASK, PPC403, { RT } },
3575{ "mfdmacr2", XSPR(31,323,208), XSPR_MASK, PPC403, { RT } },
3576{ "mfdmact2", XSPR(31,323,209), XSPR_MASK, PPC403, { RT } },
3577{ "mfdmada2", XSPR(31,323,210), XSPR_MASK, PPC403, { RT } },
3578{ "mfdmasa2", XSPR(31,323,211), XSPR_MASK, PPC403, { RT } },
3579{ "mfdmacc2", XSPR(31,323,212), XSPR_MASK, PPC403, { RT } },
3580{ "mfdmacr3", XSPR(31,323,216), XSPR_MASK, PPC403, { RT } },
3581{ "mfdmact3", XSPR(31,323,217), XSPR_MASK, PPC403, { RT } },
3582{ "mfdmada3", XSPR(31,323,218), XSPR_MASK, PPC403, { RT } },
3583{ "mfdmasa3", XSPR(31,323,219), XSPR_MASK, PPC403, { RT } },
3584{ "mfdmacc3", XSPR(31,323,220), XSPR_MASK, PPC403, { RT } },
3585{ "mfdmasr", XSPR(31,323,224), XSPR_MASK, PPC403, { RT } },
3586{ "mfdcr", X(31,323), X_MASK, PPC403 | BOOKE, { RT, SPR } },
3587
3588{ "div", XO(31,331,0,0), XO_MASK, M601, { RT, RA, RB } },
3589{ "div.", XO(31,331,0,1), XO_MASK, M601, { RT, RA, RB } },
3590{ "divo", XO(31,331,1,0), XO_MASK, M601, { RT, RA, RB } },
3591{ "divo.", XO(31,331,1,1), XO_MASK, M601, { RT, RA, RB } },
3592
3593{ "mfpmr", X(31,334), X_MASK, PPCPMR, { RT, PMR }},
3594
3595{ "mfmq", XSPR(31,339,0), XSPR_MASK, M601, { RT } },
3596{ "mfxer", XSPR(31,339,1), XSPR_MASK, COM, { RT } },
3597{ "mfrtcu", XSPR(31,339,4), XSPR_MASK, COM, { RT } },
3598{ "mfrtcl", XSPR(31,339,5), XSPR_MASK, COM, { RT } },
3599{ "mfdec", XSPR(31,339,6), XSPR_MASK, MFDEC1, { RT } },
3600{ "mfdec", XSPR(31,339,22), XSPR_MASK, MFDEC2, { RT } },
3601{ "mflr", XSPR(31,339,8), XSPR_MASK, COM, { RT } },
3602{ "mfctr", XSPR(31,339,9), XSPR_MASK, COM, { RT } },
3603{ "mftid", XSPR(31,339,17), XSPR_MASK, POWER, { RT } },
3604{ "mfdsisr", XSPR(31,339,18), XSPR_MASK, COM, { RT } },
3605{ "mfdar", XSPR(31,339,19), XSPR_MASK, COM, { RT } },
3606{ "mfsdr0", XSPR(31,339,24), XSPR_MASK, POWER, { RT } },
3607{ "mfsdr1", XSPR(31,339,25), XSPR_MASK, COM, { RT } },
3608{ "mfsrr0", XSPR(31,339,26), XSPR_MASK, COM, { RT } },
3609{ "mfsrr1", XSPR(31,339,27), XSPR_MASK, COM, { RT } },
3610{ "mfpid", XSPR(31,339,48), XSPR_MASK, BOOKE, { RT } },
3611{ "mfpid", XSPR(31,339,945), XSPR_MASK, PPC403, { RT } },
3612{ "mfcsrr0", XSPR(31,339,58), XSPR_MASK, BOOKE, { RT } },
3613{ "mfcsrr1", XSPR(31,339,59), XSPR_MASK, BOOKE, { RT } },
3614{ "mfdear", XSPR(31,339,61), XSPR_MASK, BOOKE, { RT } },
3615{ "mfdear", XSPR(31,339,981), XSPR_MASK, PPC403, { RT } },
3616{ "mfesr", XSPR(31,339,62), XSPR_MASK, BOOKE, { RT } },
3617{ "mfesr", XSPR(31,339,980), XSPR_MASK, PPC403, { RT } },
3618{ "mfivpr", XSPR(31,339,63), XSPR_MASK, BOOKE, { RT } },
3619{ "mfcmpa", XSPR(31,339,144), XSPR_MASK, PPC860, { RT } },
3620{ "mfcmpb", XSPR(31,339,145), XSPR_MASK, PPC860, { RT } },
3621{ "mfcmpc", XSPR(31,339,146), XSPR_MASK, PPC860, { RT } },
3622{ "mfcmpd", XSPR(31,339,147), XSPR_MASK, PPC860, { RT } },
3623{ "mficr", XSPR(31,339,148), XSPR_MASK, PPC860, { RT } },
3624{ "mfder", XSPR(31,339,149), XSPR_MASK, PPC860, { RT } },
3625{ "mfcounta", XSPR(31,339,150), XSPR_MASK, PPC860, { RT } },
3626{ "mfcountb", XSPR(31,339,151), XSPR_MASK, PPC860, { RT } },
3627{ "mfcmpe", XSPR(31,339,152), XSPR_MASK, PPC860, { RT } },
3628{ "mfcmpf", XSPR(31,339,153), XSPR_MASK, PPC860, { RT } },
3629{ "mfcmpg", XSPR(31,339,154), XSPR_MASK, PPC860, { RT } },
3630{ "mfcmph", XSPR(31,339,155), XSPR_MASK, PPC860, { RT } },
3631{ "mflctrl1", XSPR(31,339,156), XSPR_MASK, PPC860, { RT } },
3632{ "mflctrl2", XSPR(31,339,157), XSPR_MASK, PPC860, { RT } },
3633{ "mfictrl", XSPR(31,339,158), XSPR_MASK, PPC860, { RT } },
3634{ "mfbar", XSPR(31,339,159), XSPR_MASK, PPC860, { RT } },
3635{ "mfvrsave", XSPR(31,339,256), XSPR_MASK, PPCVEC, { RT } },
3636{ "mfusprg0", XSPR(31,339,256), XSPR_MASK, BOOKE, { RT } },
3637{ "mfsprg4", XSPR(31,339,260), XSPR_MASK, PPC405, { RT } },
3638{ "mfsprg5", XSPR(31,339,261), XSPR_MASK, PPC405, { RT } },
3639{ "mfsprg6", XSPR(31,339,262), XSPR_MASK, PPC405, { RT } },
3640{ "mfsprg7", XSPR(31,339,263), XSPR_MASK, PPC405, { RT } },
3641{ "mftb", X(31,371), X_MASK, CLASSIC, { RT, TBR } },
3642{ "mftb", XSPR(31,339,268), XSPR_MASK, BOOKE, { RT } },
3643{ "mftbl", XSPR(31,371,268), XSPR_MASK, CLASSIC, { RT } },
3644{ "mftbl", XSPR(31,339,268), XSPR_MASK, BOOKE, { RT } },
3645{ "mftbu", XSPR(31,371,269), XSPR_MASK, CLASSIC, { RT } },
3646{ "mftbu", XSPR(31,339,269), XSPR_MASK, BOOKE, { RT } },
3647{ "mfsprg", XSPR(31,339,272), XSPRG_MASK, PPC, { RT, SPRG } },
3648{ "mfsprg0", XSPR(31,339,272), XSPR_MASK, PPC, { RT } },
3649{ "mfsprg1", XSPR(31,339,273), XSPR_MASK, PPC, { RT } },
3650{ "mfsprg2", XSPR(31,339,274), XSPR_MASK, PPC, { RT } },
3651{ "mfsprg3", XSPR(31,339,275), XSPR_MASK, PPC, { RT } },
3652{ "mfasr", XSPR(31,339,280), XSPR_MASK, PPC64, { RT } },
3653{ "mfear", XSPR(31,339,282), XSPR_MASK, PPC, { RT } },
3654{ "mfpir", XSPR(31,339,286), XSPR_MASK, BOOKE, { RT } },
3655{ "mfpvr", XSPR(31,339,287), XSPR_MASK, PPC, { RT } },
3656{ "mfdbsr", XSPR(31,339,304), XSPR_MASK, BOOKE, { RT } },
3657{ "mfdbsr", XSPR(31,339,1008), XSPR_MASK, PPC403, { RT } },
3658{ "mfdbcr0", XSPR(31,339,308), XSPR_MASK, BOOKE, { RT } },
3659{ "mfdbcr0", XSPR(31,339,1010), XSPR_MASK, PPC405, { RT } },
3660{ "mfdbcr1", XSPR(31,339,309), XSPR_MASK, BOOKE, { RT } },
3661{ "mfdbcr1", XSPR(31,339,957), XSPR_MASK, PPC405, { RT } },
3662{ "mfdbcr2", XSPR(31,339,310), XSPR_MASK, BOOKE, { RT } },
3663{ "mfiac1", XSPR(31,339,312), XSPR_MASK, BOOKE, { RT } },
3664{ "mfiac1", XSPR(31,339,1012), XSPR_MASK, PPC403, { RT } },
3665{ "mfiac2", XSPR(31,339,313), XSPR_MASK, BOOKE, { RT } },
3666{ "mfiac2", XSPR(31,339,1013), XSPR_MASK, PPC403, { RT } },
3667{ "mfiac3", XSPR(31,339,314), XSPR_MASK, BOOKE, { RT } },
3668{ "mfiac3", XSPR(31,339,948), XSPR_MASK, PPC405, { RT } },
3669{ "mfiac4", XSPR(31,339,315), XSPR_MASK, BOOKE, { RT } },
3670{ "mfiac4", XSPR(31,339,949), XSPR_MASK, PPC405, { RT } },
3671{ "mfdac1", XSPR(31,339,316), XSPR_MASK, BOOKE, { RT } },
3672{ "mfdac1", XSPR(31,339,1014), XSPR_MASK, PPC403, { RT } },
3673{ "mfdac2", XSPR(31,339,317), XSPR_MASK, BOOKE, { RT } },
3674{ "mfdac2", XSPR(31,339,1015), XSPR_MASK, PPC403, { RT } },
3675{ "mfdvc1", XSPR(31,339,318), XSPR_MASK, BOOKE, { RT } },
3676{ "mfdvc1", XSPR(31,339,950), XSPR_MASK, PPC405, { RT } },
3677{ "mfdvc2", XSPR(31,339,319), XSPR_MASK, BOOKE, { RT } },
3678{ "mfdvc2", XSPR(31,339,951), XSPR_MASK, PPC405, { RT } },
3679{ "mftsr", XSPR(31,339,336), XSPR_MASK, BOOKE, { RT } },
3680{ "mftsr", XSPR(31,339,984), XSPR_MASK, PPC403, { RT } },
3681{ "mftcr", XSPR(31,339,340), XSPR_MASK, BOOKE, { RT } },
3682{ "mftcr", XSPR(31,339,986), XSPR_MASK, PPC403, { RT } },
3683{ "mfivor0", XSPR(31,339,400), XSPR_MASK, BOOKE, { RT } },
3684{ "mfivor1", XSPR(31,339,401), XSPR_MASK, BOOKE, { RT } },
3685{ "mfivor2", XSPR(31,339,402), XSPR_MASK, BOOKE, { RT } },
3686{ "mfivor3", XSPR(31,339,403), XSPR_MASK, BOOKE, { RT } },
3687{ "mfivor4", XSPR(31,339,404), XSPR_MASK, BOOKE, { RT } },
3688{ "mfivor5", XSPR(31,339,405), XSPR_MASK, BOOKE, { RT } },
3689{ "mfivor6", XSPR(31,339,406), XSPR_MASK, BOOKE, { RT } },
3690{ "mfivor7", XSPR(31,339,407), XSPR_MASK, BOOKE, { RT } },
3691{ "mfivor8", XSPR(31,339,408), XSPR_MASK, BOOKE, { RT } },
3692{ "mfivor9", XSPR(31,339,409), XSPR_MASK, BOOKE, { RT } },
3693{ "mfivor10", XSPR(31,339,410), XSPR_MASK, BOOKE, { RT } },
3694{ "mfivor11", XSPR(31,339,411), XSPR_MASK, BOOKE, { RT } },
3695{ "mfivor12", XSPR(31,339,412), XSPR_MASK, BOOKE, { RT } },
3696{ "mfivor13", XSPR(31,339,413), XSPR_MASK, BOOKE, { RT } },
3697{ "mfivor14", XSPR(31,339,414), XSPR_MASK, BOOKE, { RT } },
3698{ "mfivor15", XSPR(31,339,415), XSPR_MASK, BOOKE, { RT } },
3699{ "mfspefscr", XSPR(31,339,512), XSPR_MASK, PPCSPE, { RT } },
3700{ "mfbbear", XSPR(31,339,513), XSPR_MASK, PPCBRLK, { RT } },
3701{ "mfbbtar", XSPR(31,339,514), XSPR_MASK, PPCBRLK, { RT } },
3702{ "mfibatu", XSPR(31,339,528), XSPRBAT_MASK, PPC, { RT, SPRBAT } },
3703{ "mfibatl", XSPR(31,339,529), XSPRBAT_MASK, PPC, { RT, SPRBAT } },
3704{ "mfdbatu", XSPR(31,339,536), XSPRBAT_MASK, PPC, { RT, SPRBAT } },
3705{ "mfdbatl", XSPR(31,339,537), XSPRBAT_MASK, PPC, { RT, SPRBAT } },
3706{ "mfic_cst", XSPR(31,339,560), XSPR_MASK, PPC860, { RT } },
3707{ "mfic_adr", XSPR(31,339,561), XSPR_MASK, PPC860, { RT } },
3708{ "mfic_dat", XSPR(31,339,562), XSPR_MASK, PPC860, { RT } },
3709{ "mfdc_cst", XSPR(31,339,568), XSPR_MASK, PPC860, { RT } },
3710{ "mfdc_adr", XSPR(31,339,569), XSPR_MASK, PPC860, { RT } },
3711{ "mfdc_dat", XSPR(31,339,570), XSPR_MASK, PPC860, { RT } },
3712{ "mfmcsrr0", XSPR(31,339,570), XSPR_MASK, PPCRFMCI, { RT } },
3713{ "mfmcsrr1", XSPR(31,339,571), XSPR_MASK, PPCRFMCI, { RT } },
3714{ "mfmcsr", XSPR(31,339,572), XSPR_MASK, PPCRFMCI, { RT } },
3715{ "mfdpdr", XSPR(31,339,630), XSPR_MASK, PPC860, { RT } },
3716{ "mfdpir", XSPR(31,339,631), XSPR_MASK, PPC860, { RT } },
3717{ "mfimmr", XSPR(31,339,638), XSPR_MASK, PPC860, { RT } },
3718{ "mfmi_ctr", XSPR(31,339,784), XSPR_MASK, PPC860, { RT } },
3719{ "mfmi_ap", XSPR(31,339,786), XSPR_MASK, PPC860, { RT } },
3720{ "mfmi_epn", XSPR(31,339,787), XSPR_MASK, PPC860, { RT } },
3721{ "mfmi_twc", XSPR(31,339,789), XSPR_MASK, PPC860, { RT } },
3722{ "mfmi_rpn", XSPR(31,339,790), XSPR_MASK, PPC860, { RT } },
3723{ "mfmd_ctr", XSPR(31,339,792), XSPR_MASK, PPC860, { RT } },
3724{ "mfm_casid", XSPR(31,339,793), XSPR_MASK, PPC860, { RT } },
3725{ "mfmd_ap", XSPR(31,339,794), XSPR_MASK, PPC860, { RT } },
3726{ "mfmd_epn", XSPR(31,339,795), XSPR_MASK, PPC860, { RT } },
3727{ "mfmd_twb", XSPR(31,339,796), XSPR_MASK, PPC860, { RT } },
3728{ "mfmd_twc", XSPR(31,339,797), XSPR_MASK, PPC860, { RT } },
3729{ "mfmd_rpn", XSPR(31,339,798), XSPR_MASK, PPC860, { RT } },
3730{ "mfm_tw", XSPR(31,339,799), XSPR_MASK, PPC860, { RT } },
3731{ "mfmi_dbcam", XSPR(31,339,816), XSPR_MASK, PPC860, { RT } },
3732{ "mfmi_dbram0",XSPR(31,339,817), XSPR_MASK, PPC860, { RT } },
3733{ "mfmi_dbram1",XSPR(31,339,818), XSPR_MASK, PPC860, { RT } },
3734{ "mfmd_dbcam", XSPR(31,339,824), XSPR_MASK, PPC860, { RT } },
3735{ "mfmd_dbram0",XSPR(31,339,825), XSPR_MASK, PPC860, { RT } },
3736{ "mfmd_dbram1",XSPR(31,339,826), XSPR_MASK, PPC860, { RT } },
3737{ "mfummcr0", XSPR(31,339,936), XSPR_MASK, PPC750, { RT } },
3738{ "mfupmc1", XSPR(31,339,937), XSPR_MASK, PPC750, { RT } },
3739{ "mfupmc2", XSPR(31,339,938), XSPR_MASK, PPC750, { RT } },
3740{ "mfusia", XSPR(31,339,939), XSPR_MASK, PPC750, { RT } },
3741{ "mfummcr1", XSPR(31,339,940), XSPR_MASK, PPC750, { RT } },
3742{ "mfupmc3", XSPR(31,339,941), XSPR_MASK, PPC750, { RT } },
3743{ "mfupmc4", XSPR(31,339,942), XSPR_MASK, PPC750, { RT } },
3744{ "mfzpr", XSPR(31,339,944), XSPR_MASK, PPC403, { RT } },
3745{ "mfccr0", XSPR(31,339,947), XSPR_MASK, PPC405, { RT } },
3746{ "mfmmcr0", XSPR(31,339,952), XSPR_MASK, PPC750, { RT } },
3747{ "mfpmc1", XSPR(31,339,953), XSPR_MASK, PPC750, { RT } },
3748{ "mfsgr", XSPR(31,339,953), XSPR_MASK, PPC403, { RT } },
3749{ "mfpmc2", XSPR(31,339,954), XSPR_MASK, PPC750, { RT } },
3750{ "mfdcwr", XSPR(31,339,954), XSPR_MASK, PPC403, { RT } },
3751{ "mfsia", XSPR(31,339,955), XSPR_MASK, PPC750, { RT } },
3752{ "mfsler", XSPR(31,339,955), XSPR_MASK, PPC405, { RT } },
3753{ "mfmmcr1", XSPR(31,339,956), XSPR_MASK, PPC750, { RT } },
3754{ "mfsu0r", XSPR(31,339,956), XSPR_MASK, PPC405, { RT } },
3755{ "mfpmc3", XSPR(31,339,957), XSPR_MASK, PPC750, { RT } },
3756{ "mfpmc4", XSPR(31,339,958), XSPR_MASK, PPC750, { RT } },
3757{ "mficdbdr", XSPR(31,339,979), XSPR_MASK, PPC403, { RT } },
3758{ "mfevpr", XSPR(31,339,982), XSPR_MASK, PPC403, { RT } },
3759{ "mfcdbcr", XSPR(31,339,983), XSPR_MASK, PPC403, { RT } },
3760{ "mfpit", XSPR(31,339,987), XSPR_MASK, PPC403, { RT } },
3761{ "mftbhi", XSPR(31,339,988), XSPR_MASK, PPC403, { RT } },
3762{ "mftblo", XSPR(31,339,989), XSPR_MASK, PPC403, { RT } },
3763{ "mfsrr2", XSPR(31,339,990), XSPR_MASK, PPC403, { RT } },
3764{ "mfsrr3", XSPR(31,339,991), XSPR_MASK, PPC403, { RT } },
3765{ "mfl2cr", XSPR(31,339,1017), XSPR_MASK, PPC750, { RT } },
3766{ "mfdccr", XSPR(31,339,1018), XSPR_MASK, PPC403, { RT } },
3767{ "mficcr", XSPR(31,339,1019), XSPR_MASK, PPC403, { RT } },
3768{ "mfictc", XSPR(31,339,1019), XSPR_MASK, PPC750, { RT } },
3769{ "mfpbl1", XSPR(31,339,1020), XSPR_MASK, PPC403, { RT } },
3770{ "mfthrm1", XSPR(31,339,1020), XSPR_MASK, PPC750, { RT } },
3771{ "mfpbu1", XSPR(31,339,1021), XSPR_MASK, PPC403, { RT } },
3772{ "mfthrm2", XSPR(31,339,1021), XSPR_MASK, PPC750, { RT } },
3773{ "mfpbl2", XSPR(31,339,1022), XSPR_MASK, PPC403, { RT } },
3774{ "mfthrm3", XSPR(31,339,1022), XSPR_MASK, PPC750, { RT } },
3775{ "mfpbu2", XSPR(31,339,1023), XSPR_MASK, PPC403, { RT } },
3776{ "mfspr", X(31,339), X_MASK, COM, { RT, SPR } },
3777
3778{ "lwax", X(31,341), X_MASK, PPC64, { RT, RA, RB } },
3779
3780{ "dst", XDSS(31,342,0), XDSS_MASK, PPCVEC, { RA, RB, STRM } },
3781{ "dstt", XDSS(31,342,1), XDSS_MASK, PPCVEC, { RA, RB, STRM } },
3782
3783{ "lhax", X(31,343), X_MASK, COM, { RT, RA, RB } },
3784
3785{ "lhaxe", X(31,351), X_MASK, BOOKE64, { RT, RA, RB } },
3786
3787{ "dstst", XDSS(31,374,0), XDSS_MASK, PPCVEC, { RA, RB, STRM } },
3788{ "dststt", XDSS(31,374,1), XDSS_MASK, PPCVEC, { RA, RB, STRM } },
3789
3790{ "dccci", X(31,454), XRT_MASK, PPC403|PPC440, { RA, RB } },
3791
3792{ "abs", XO(31,360,0,0), XORB_MASK, M601, { RT, RA } },
3793{ "abs.", XO(31,360,0,1), XORB_MASK, M601, { RT, RA } },
3794{ "abso", XO(31,360,1,0), XORB_MASK, M601, { RT, RA } },
3795{ "abso.", XO(31,360,1,1), XORB_MASK, M601, { RT, RA } },
3796
3797{ "divs", XO(31,363,0,0), XO_MASK, M601, { RT, RA, RB } },
3798{ "divs.", XO(31,363,0,1), XO_MASK, M601, { RT, RA, RB } },
3799{ "divso", XO(31,363,1,0), XO_MASK, M601, { RT, RA, RB } },
3800{ "divso.", XO(31,363,1,1), XO_MASK, M601, { RT, RA, RB } },
3801
3802{ "tlbia", X(31,370), 0xffffffff, PPC, { 0 } },
3803
3804{ "lwaux", X(31,373), X_MASK, PPC64, { RT, RAL, RB } },
3805
3806{ "lhaux", X(31,375), X_MASK, COM, { RT, RAL, RB } },
3807
3808{ "lhauxe", X(31,383), X_MASK, BOOKE64, { RT, RAL, RB } },
3809
3810{ "mtdcrx", X(31,387), X_MASK, BOOKE, { RA, RS } },
3811
3812{ "dcblc", X(31,390), X_MASK, PPCCHLK, { CT, RA, RB }},
3813
3814{ "subfe64", XO(31,392,0,0), XO_MASK, BOOKE64, { RT, RA, RB } },
3815{ "subfe64o",XO(31,392,1,0), XO_MASK, BOOKE64, { RT, RA, RB } },
3816
3817{ "adde64", XO(31,394,0,0), XO_MASK, BOOKE64, { RT, RA, RB } },
3818{ "adde64o", XO(31,394,1,0), XO_MASK, BOOKE64, { RT, RA, RB } },
3819
3820{ "dcblce", X(31,398), X_MASK, PPCCHLK64, { CT, RA, RB }},
3821
3822{ "slbmte", X(31,402), XRA_MASK, PPC64, { RS, RB } },
3823
3824{ "sthx", X(31,407), X_MASK, COM, { RS, RA, RB } },
3825
3826{ "lfqx", X(31,791), X_MASK, POWER2, { FRT, RA, RB } },
3827
3828{ "lfqux", X(31,823), X_MASK, POWER2, { FRT, RA, RB } },
3829
3830{ "stfqx", X(31,919), X_MASK, POWER2, { FRS, RA, RB } },
3831
3832{ "stfqux", X(31,951), X_MASK, POWER2, { FRS, RA, RB } },
3833
3834{ "orc", XRC(31,412,0), X_MASK, COM, { RA, RS, RB } },
3835{ "orc.", XRC(31,412,1), X_MASK, COM, { RA, RS, RB } },
3836
3837{ "sradi", XS(31,413,0), XS_MASK, PPC64, { RA, RS, SH6 } },
3838{ "sradi.", XS(31,413,1), XS_MASK, PPC64, { RA, RS, SH6 } },
3839
3840{ "sthxe", X(31,415), X_MASK, BOOKE64, { RS, RA, RB } },
3841
3842{ "slbie", X(31,434), XRTRA_MASK, PPC64, { RB } },
3843
3844{ "ecowx", X(31,438), X_MASK, PPC, { RT, RA, RB } },
3845
3846{ "sthux", X(31,439), X_MASK, COM, { RS, RAS, RB } },
3847
3848{ "sthuxe", X(31,447), X_MASK, BOOKE64, { RS, RAS, RB } },
3849
3850{ "mr", XRC(31,444,0), X_MASK, COM, { RA, RS, RBS } },
3851{ "or", XRC(31,444,0), X_MASK, COM, { RA, RS, RB } },
3852{ "mr.", XRC(31,444,1), X_MASK, COM, { RA, RS, RBS } },
3853{ "or.", XRC(31,444,1), X_MASK, COM, { RA, RS, RB } },
3854
3855{ "mtexisr", XSPR(31,451,64), XSPR_MASK, PPC403, { RS } },
3856{ "mtexier", XSPR(31,451,66), XSPR_MASK, PPC403, { RS } },
3857{ "mtbr0", XSPR(31,451,128), XSPR_MASK, PPC403, { RS } },
3858{ "mtbr1", XSPR(31,451,129), XSPR_MASK, PPC403, { RS } },
3859{ "mtbr2", XSPR(31,451,130), XSPR_MASK, PPC403, { RS } },
3860{ "mtbr3", XSPR(31,451,131), XSPR_MASK, PPC403, { RS } },
3861{ "mtbr4", XSPR(31,451,132), XSPR_MASK, PPC403, { RS } },
3862{ "mtbr5", XSPR(31,451,133), XSPR_MASK, PPC403, { RS } },
3863{ "mtbr6", XSPR(31,451,134), XSPR_MASK, PPC403, { RS } },
3864{ "mtbr7", XSPR(31,451,135), XSPR_MASK, PPC403, { RS } },
3865{ "mtbear", XSPR(31,451,144), XSPR_MASK, PPC403, { RS } },
3866{ "mtbesr", XSPR(31,451,145), XSPR_MASK, PPC403, { RS } },
3867{ "mtiocr", XSPR(31,451,160), XSPR_MASK, PPC403, { RS } },
3868{ "mtdmacr0", XSPR(31,451,192), XSPR_MASK, PPC403, { RS } },
3869{ "mtdmact0", XSPR(31,451,193), XSPR_MASK, PPC403, { RS } },
3870{ "mtdmada0", XSPR(31,451,194), XSPR_MASK, PPC403, { RS } },
3871{ "mtdmasa0", XSPR(31,451,195), XSPR_MASK, PPC403, { RS } },
3872{ "mtdmacc0", XSPR(31,451,196), XSPR_MASK, PPC403, { RS } },
3873{ "mtdmacr1", XSPR(31,451,200), XSPR_MASK, PPC403, { RS } },
3874{ "mtdmact1", XSPR(31,451,201), XSPR_MASK, PPC403, { RS } },
3875{ "mtdmada1", XSPR(31,451,202), XSPR_MASK, PPC403, { RS } },
3876{ "mtdmasa1", XSPR(31,451,203), XSPR_MASK, PPC403, { RS } },
3877{ "mtdmacc1", XSPR(31,451,204), XSPR_MASK, PPC403, { RS } },
3878{ "mtdmacr2", XSPR(31,451,208), XSPR_MASK, PPC403, { RS } },
3879{ "mtdmact2", XSPR(31,451,209), XSPR_MASK, PPC403, { RS } },
3880{ "mtdmada2", XSPR(31,451,210), XSPR_MASK, PPC403, { RS } },
3881{ "mtdmasa2", XSPR(31,451,211), XSPR_MASK, PPC403, { RS } },
3882{ "mtdmacc2", XSPR(31,451,212), XSPR_MASK, PPC403, { RS } },
3883{ "mtdmacr3", XSPR(31,451,216), XSPR_MASK, PPC403, { RS } },
3884{ "mtdmact3", XSPR(31,451,217), XSPR_MASK, PPC403, { RS } },
3885{ "mtdmada3", XSPR(31,451,218), XSPR_MASK, PPC403, { RS } },
3886{ "mtdmasa3", XSPR(31,451,219), XSPR_MASK, PPC403, { RS } },
3887{ "mtdmacc3", XSPR(31,451,220), XSPR_MASK, PPC403, { RS } },
3888{ "mtdmasr", XSPR(31,451,224), XSPR_MASK, PPC403, { RS } },
3889{ "mtdcr", X(31,451), X_MASK, PPC403 | BOOKE, { SPR, RS } },
3890
3891{ "subfze64",XO(31,456,0,0), XORB_MASK, BOOKE64, { RT, RA } },
3892{ "subfze64o",XO(31,456,1,0), XORB_MASK, BOOKE64, { RT, RA } },
3893
3894{ "divdu", XO(31,457,0,0), XO_MASK, PPC64, { RT, RA, RB } },
3895{ "divdu.", XO(31,457,0,1), XO_MASK, PPC64, { RT, RA, RB } },
3896{ "divduo", XO(31,457,1,0), XO_MASK, PPC64, { RT, RA, RB } },
3897{ "divduo.", XO(31,457,1,1), XO_MASK, PPC64, { RT, RA, RB } },
3898
3899{ "addze64", XO(31,458,0,0), XORB_MASK, BOOKE64, { RT, RA } },
3900{ "addze64o",XO(31,458,1,0), XORB_MASK, BOOKE64, { RT, RA } },
3901
3902{ "divwu", XO(31,459,0,0), XO_MASK, PPC, { RT, RA, RB } },
3903{ "divwu.", XO(31,459,0,1), XO_MASK, PPC, { RT, RA, RB } },
3904{ "divwuo", XO(31,459,1,0), XO_MASK, PPC, { RT, RA, RB } },
3905{ "divwuo.", XO(31,459,1,1), XO_MASK, PPC, { RT, RA, RB } },
3906
3907{ "mtmq", XSPR(31,467,0), XSPR_MASK, M601, { RS } },
3908{ "mtxer", XSPR(31,467,1), XSPR_MASK, COM, { RS } },
3909{ "mtlr", XSPR(31,467,8), XSPR_MASK, COM, { RS } },
3910{ "mtctr", XSPR(31,467,9), XSPR_MASK, COM, { RS } },
3911{ "mttid", XSPR(31,467,17), XSPR_MASK, POWER, { RS } },
3912{ "mtdsisr", XSPR(31,467,18), XSPR_MASK, COM, { RS } },
3913{ "mtdar", XSPR(31,467,19), XSPR_MASK, COM, { RS } },
3914{ "mtrtcu", XSPR(31,467,20), XSPR_MASK, COM, { RS } },
3915{ "mtrtcl", XSPR(31,467,21), XSPR_MASK, COM, { RS } },
3916{ "mtdec", XSPR(31,467,22), XSPR_MASK, COM, { RS } },
3917{ "mtsdr0", XSPR(31,467,24), XSPR_MASK, POWER, { RS } },
3918{ "mtsdr1", XSPR(31,467,25), XSPR_MASK, COM, { RS } },
3919{ "mtsrr0", XSPR(31,467,26), XSPR_MASK, COM, { RS } },
3920{ "mtsrr1", XSPR(31,467,27), XSPR_MASK, COM, { RS } },
3921{ "mtpid", XSPR(31,467,48), XSPR_MASK, BOOKE, { RS } },
3922{ "mtpid", XSPR(31,467,945), XSPR_MASK, PPC403, { RS } },
3923{ "mtdecar", XSPR(31,467,54), XSPR_MASK, BOOKE, { RS } },
3924{ "mtcsrr0", XSPR(31,467,58), XSPR_MASK, BOOKE, { RS } },
3925{ "mtcsrr1", XSPR(31,467,59), XSPR_MASK, BOOKE, { RS } },
3926{ "mtdear", XSPR(31,467,61), XSPR_MASK, BOOKE, { RS } },
3927{ "mtdear", XSPR(31,467,981), XSPR_MASK, PPC403, { RS } },
3928{ "mtesr", XSPR(31,467,62), XSPR_MASK, BOOKE, { RS } },
3929{ "mtesr", XSPR(31,467,980), XSPR_MASK, PPC403, { RS } },
3930{ "mtivpr", XSPR(31,467,63), XSPR_MASK, BOOKE, { RS } },
3931{ "mtcmpa", XSPR(31,467,144), XSPR_MASK, PPC860, { RS } },
3932{ "mtcmpb", XSPR(31,467,145), XSPR_MASK, PPC860, { RS } },
3933{ "mtcmpc", XSPR(31,467,146), XSPR_MASK, PPC860, { RS } },
3934{ "mtcmpd", XSPR(31,467,147), XSPR_MASK, PPC860, { RS } },
3935{ "mticr", XSPR(31,467,148), XSPR_MASK, PPC860, { RS } },
3936{ "mtder", XSPR(31,467,149), XSPR_MASK, PPC860, { RS } },
3937{ "mtcounta", XSPR(31,467,150), XSPR_MASK, PPC860, { RS } },
3938{ "mtcountb", XSPR(31,467,151), XSPR_MASK, PPC860, { RS } },
3939{ "mtcmpe", XSPR(31,467,152), XSPR_MASK, PPC860, { RS } },
3940{ "mtcmpf", XSPR(31,467,153), XSPR_MASK, PPC860, { RS } },
3941{ "mtcmpg", XSPR(31,467,154), XSPR_MASK, PPC860, { RS } },
3942{ "mtcmph", XSPR(31,467,155), XSPR_MASK, PPC860, { RS } },
3943{ "mtlctrl1", XSPR(31,467,156), XSPR_MASK, PPC860, { RS } },
3944{ "mtlctrl2", XSPR(31,467,157), XSPR_MASK, PPC860, { RS } },
3945{ "mtictrl", XSPR(31,467,158), XSPR_MASK, PPC860, { RS } },
3946{ "mtbar", XSPR(31,467,159), XSPR_MASK, PPC860, { RS } },
3947{ "mtvrsave", XSPR(31,467,256), XSPR_MASK, PPCVEC, { RS } },
3948{ "mtusprg0", XSPR(31,467,256), XSPR_MASK, BOOKE, { RS } },
3949{ "mtsprg", XSPR(31,467,272), XSPRG_MASK,PPC, { SPRG, RS } },
3950{ "mtsprg0", XSPR(31,467,272), XSPR_MASK, PPC, { RS } },
3951{ "mtsprg1", XSPR(31,467,273), XSPR_MASK, PPC, { RS } },
3952{ "mtsprg2", XSPR(31,467,274), XSPR_MASK, PPC, { RS } },
3953{ "mtsprg3", XSPR(31,467,275), XSPR_MASK, PPC, { RS } },
3954{ "mtsprg4", XSPR(31,467,276), XSPR_MASK, PPC405 | BOOKE, { RS } },
3955{ "mtsprg5", XSPR(31,467,277), XSPR_MASK, PPC405 | BOOKE, { RS } },
3956{ "mtsprg6", XSPR(31,467,278), XSPR_MASK, PPC405 | BOOKE, { RS } },
3957{ "mtsprg7", XSPR(31,467,279), XSPR_MASK, PPC405 | BOOKE, { RS } },
3958{ "mtasr", XSPR(31,467,280), XSPR_MASK, PPC64, { RS } },
3959{ "mtear", XSPR(31,467,282), XSPR_MASK, PPC, { RS } },
3960{ "mttbl", XSPR(31,467,284), XSPR_MASK, PPC, { RS } },
3961{ "mttbu", XSPR(31,467,285), XSPR_MASK, PPC, { RS } },
3962{ "mtdbsr", XSPR(31,467,304), XSPR_MASK, BOOKE, { RS } },
3963{ "mtdbsr", XSPR(31,467,1008), XSPR_MASK, PPC403, { RS } },
3964{ "mtdbcr0", XSPR(31,467,308), XSPR_MASK, BOOKE, { RS } },
3965{ "mtdbcr0", XSPR(31,467,1010), XSPR_MASK, PPC405, { RS } },
3966{ "mtdbcr1", XSPR(31,467,309), XSPR_MASK, BOOKE, { RS } },
3967{ "mtdbcr1", XSPR(31,467,957), XSPR_MASK, PPC405, { RS } },
3968{ "mtdbcr2", XSPR(31,467,310), XSPR_MASK, BOOKE, { RS } },
3969{ "mtiac1", XSPR(31,467,312), XSPR_MASK, BOOKE, { RS } },
3970{ "mtiac1", XSPR(31,467,1012), XSPR_MASK, PPC403, { RS } },
3971{ "mtiac2", XSPR(31,467,313), XSPR_MASK, BOOKE, { RS } },
3972{ "mtiac2", XSPR(31,467,1013), XSPR_MASK, PPC403, { RS } },
3973{ "mtiac3", XSPR(31,467,314), XSPR_MASK, BOOKE, { RS } },
3974{ "mtiac3", XSPR(31,467,948), XSPR_MASK, PPC405, { RS } },
3975{ "mtiac4", XSPR(31,467,315), XSPR_MASK, BOOKE, { RS } },
3976{ "mtiac4", XSPR(31,467,949), XSPR_MASK, PPC405, { RS } },
3977{ "mtdac1", XSPR(31,467,316), XSPR_MASK, BOOKE, { RS } },
3978{ "mtdac1", XSPR(31,467,1014), XSPR_MASK, PPC403, { RS } },
3979{ "mtdac2", XSPR(31,467,317), XSPR_MASK, BOOKE, { RS } },
3980{ "mtdac2", XSPR(31,467,1015), XSPR_MASK, PPC403, { RS } },
3981{ "mtdvc1", XSPR(31,467,318), XSPR_MASK, BOOKE, { RS } },
3982{ "mtdvc1", XSPR(31,467,950), XSPR_MASK, PPC405, { RS } },
3983{ "mtdvc2", XSPR(31,467,319), XSPR_MASK, BOOKE, { RS } },
3984{ "mtdvc2", XSPR(31,467,951), XSPR_MASK, PPC405, { RS } },
3985{ "mttsr", XSPR(31,467,336), XSPR_MASK, BOOKE, { RS } },
3986{ "mttsr", XSPR(31,467,984), XSPR_MASK, PPC403, { RS } },
3987{ "mttcr", XSPR(31,467,340), XSPR_MASK, BOOKE, { RS } },
3988{ "mttcr", XSPR(31,467,986), XSPR_MASK, PPC403, { RS } },
3989{ "mtivor0", XSPR(31,467,400), XSPR_MASK, BOOKE, { RS } },
3990{ "mtivor1", XSPR(31,467,401), XSPR_MASK, BOOKE, { RS } },
3991{ "mtivor2", XSPR(31,467,402), XSPR_MASK, BOOKE, { RS } },
3992{ "mtivor3", XSPR(31,467,403), XSPR_MASK, BOOKE, { RS } },
3993{ "mtivor4", XSPR(31,467,404), XSPR_MASK, BOOKE, { RS } },
3994{ "mtivor5", XSPR(31,467,405), XSPR_MASK, BOOKE, { RS } },
3995{ "mtivor6", XSPR(31,467,406), XSPR_MASK, BOOKE, { RS } },
3996{ "mtivor7", XSPR(31,467,407), XSPR_MASK, BOOKE, { RS } },
3997{ "mtivor8", XSPR(31,467,408), XSPR_MASK, BOOKE, { RS } },
3998{ "mtivor9", XSPR(31,467,409), XSPR_MASK, BOOKE, { RS } },
3999{ "mtivor10", XSPR(31,467,410), XSPR_MASK, BOOKE, { RS } },
4000{ "mtivor11", XSPR(31,467,411), XSPR_MASK, BOOKE, { RS } },
4001{ "mtivor12", XSPR(31,467,412), XSPR_MASK, BOOKE, { RS } },
4002{ "mtivor13", XSPR(31,467,413), XSPR_MASK, BOOKE, { RS } },
4003{ "mtivor14", XSPR(31,467,414), XSPR_MASK, BOOKE, { RS } },
4004{ "mtivor15", XSPR(31,467,415), XSPR_MASK, BOOKE, { RS } },
4005{ "mtspefscr", XSPR(31,467,512), XSPR_MASK, PPCSPE, { RS } },
4006{ "mtbbear", XSPR(31,467,513), XSPR_MASK, PPCBRLK, { RS } },
4007{ "mtbbtar", XSPR(31,467,514), XSPR_MASK, PPCBRLK, { RS } },
4008{ "mtibatu", XSPR(31,467,528), XSPRBAT_MASK, PPC, { SPRBAT, RS } },
4009{ "mtibatl", XSPR(31,467,529), XSPRBAT_MASK, PPC, { SPRBAT, RS } },
4010{ "mtdbatu", XSPR(31,467,536), XSPRBAT_MASK, PPC, { SPRBAT, RS } },
4011{ "mtdbatl", XSPR(31,467,537), XSPRBAT_MASK, PPC, { SPRBAT, RS } },
4012{ "mtmcsrr0", XSPR(31,467,570), XSPR_MASK, PPCRFMCI, { RS } },
4013{ "mtmcsrr1", XSPR(31,467,571), XSPR_MASK, PPCRFMCI, { RS } },
4014{ "mtmcsr", XSPR(31,467,572), XSPR_MASK, PPCRFMCI, { RS } },
4015{ "mtummcr0", XSPR(31,467,936), XSPR_MASK, PPC750, { RS } },
4016{ "mtupmc1", XSPR(31,467,937), XSPR_MASK, PPC750, { RS } },
4017{ "mtupmc2", XSPR(31,467,938), XSPR_MASK, PPC750, { RS } },
4018{ "mtusia", XSPR(31,467,939), XSPR_MASK, PPC750, { RS } },
4019{ "mtummcr1", XSPR(31,467,940), XSPR_MASK, PPC750, { RS } },
4020{ "mtupmc3", XSPR(31,467,941), XSPR_MASK, PPC750, { RS } },
4021{ "mtupmc4", XSPR(31,467,942), XSPR_MASK, PPC750, { RS } },
4022{ "mtzpr", XSPR(31,467,944), XSPR_MASK, PPC403, { RS } },
4023{ "mtccr0", XSPR(31,467,947), XSPR_MASK, PPC405, { RS } },
4024{ "mtmmcr0", XSPR(31,467,952), XSPR_MASK, PPC750, { RS } },
4025{ "mtsgr", XSPR(31,467,953), XSPR_MASK, PPC403, { RS } },
4026{ "mtpmc1", XSPR(31,467,953), XSPR_MASK, PPC750, { RS } },
4027{ "mtdcwr", XSPR(31,467,954), XSPR_MASK, PPC403, { RS } },
4028{ "mtpmc2", XSPR(31,467,954), XSPR_MASK, PPC750, { RS } },
4029{ "mtsler", XSPR(31,467,955), XSPR_MASK, PPC405, { RS } },
4030{ "mtsia", XSPR(31,467,955), XSPR_MASK, PPC750, { RS } },
4031{ "mtsu0r", XSPR(31,467,956), XSPR_MASK, PPC405, { RS } },
4032{ "mtmmcr1", XSPR(31,467,956), XSPR_MASK, PPC750, { RS } },
4033{ "mtpmc3", XSPR(31,467,957), XSPR_MASK, PPC750, { RS } },
4034{ "mtpmc4", XSPR(31,467,958), XSPR_MASK, PPC750, { RS } },
4035{ "mticdbdr", XSPR(31,467,979), XSPR_MASK, PPC403, { RS } },
4036{ "mtevpr", XSPR(31,467,982), XSPR_MASK, PPC403, { RS } },
4037{ "mtcdbcr", XSPR(31,467,983), XSPR_MASK, PPC403, { RS } },
4038{ "mtpit", XSPR(31,467,987), XSPR_MASK, PPC403, { RS } },
4039{ "mttbhi", XSPR(31,467,988), XSPR_MASK, PPC403, { RS } },
4040{ "mttblo", XSPR(31,467,989), XSPR_MASK, PPC403, { RS } },
4041{ "mtsrr2", XSPR(31,467,990), XSPR_MASK, PPC403, { RS } },
4042{ "mtsrr3", XSPR(31,467,991), XSPR_MASK, PPC403, { RS } },
4043{ "mtl2cr", XSPR(31,467,1017), XSPR_MASK, PPC750, { RS } },
4044{ "mtdccr", XSPR(31,467,1018), XSPR_MASK, PPC403, { RS } },
4045{ "mticcr", XSPR(31,467,1019), XSPR_MASK, PPC403, { RS } },
4046{ "mtictc", XSPR(31,467,1019), XSPR_MASK, PPC750, { RS } },
4047{ "mtpbl1", XSPR(31,467,1020), XSPR_MASK, PPC403, { RS } },
4048{ "mtthrm1", XSPR(31,467,1020), XSPR_MASK, PPC750, { RS } },
4049{ "mtpbu1", XSPR(31,467,1021), XSPR_MASK, PPC403, { RS } },
4050{ "mtthrm2", XSPR(31,467,1021), XSPR_MASK, PPC750, { RS } },
4051{ "mtpbl2", XSPR(31,467,1022), XSPR_MASK, PPC403, { RS } },
4052{ "mtthrm3", XSPR(31,467,1022), XSPR_MASK, PPC750, { RS } },
4053{ "mtpbu2", XSPR(31,467,1023), XSPR_MASK, PPC403, { RS } },
4054{ "mtspr", X(31,467), X_MASK, COM, { SPR, RS } },
4055
4056{ "dcbi", X(31,470), XRT_MASK, PPC, { RA, RB } },
4057
4058{ "nand", XRC(31,476,0), X_MASK, COM, { RA, RS, RB } },
4059{ "nand.", XRC(31,476,1), X_MASK, COM, { RA, RS, RB } },
4060
4061{ "dcbie", X(31,478), XRT_MASK, BOOKE64, { RA, RB } },
4062
4063{ "dcread", X(31,486), X_MASK, PPC403|PPC440, { RT, RA, RB }},
4064
4065{ "mtpmr", X(31,462), X_MASK, PPCPMR, { PMR, RS }},
4066
4067{ "icbtls", X(31,486), X_MASK, PPCCHLK, { CT, RA, RB }},
4068
4069{ "nabs", XO(31,488,0,0), XORB_MASK, M601, { RT, RA } },
4070{ "subfme64",XO(31,488,0,0), XORB_MASK, BOOKE64, { RT, RA } },
4071{ "nabs.", XO(31,488,0,1), XORB_MASK, M601, { RT, RA } },
4072{ "nabso", XO(31,488,1,0), XORB_MASK, M601, { RT, RA } },
4073{ "subfme64o",XO(31,488,1,0), XORB_MASK, BOOKE64, { RT, RA } },
4074{ "nabso.", XO(31,488,1,1), XORB_MASK, M601, { RT, RA } },
4075
4076{ "divd", XO(31,489,0,0), XO_MASK, PPC64, { RT, RA, RB } },
4077{ "divd.", XO(31,489,0,1), XO_MASK, PPC64, { RT, RA, RB } },
4078{ "divdo", XO(31,489,1,0), XO_MASK, PPC64, { RT, RA, RB } },
4079{ "divdo.", XO(31,489,1,1), XO_MASK, PPC64, { RT, RA, RB } },
4080
4081{ "addme64", XO(31,490,0,0), XORB_MASK, BOOKE64, { RT, RA } },
4082{ "addme64o",XO(31,490,1,0), XORB_MASK, BOOKE64, { RT, RA } },
4083
4084{ "divw", XO(31,491,0,0), XO_MASK, PPC, { RT, RA, RB } },
4085{ "divw.", XO(31,491,0,1), XO_MASK, PPC, { RT, RA, RB } },
4086{ "divwo", XO(31,491,1,0), XO_MASK, PPC, { RT, RA, RB } },
4087{ "divwo.", XO(31,491,1,1), XO_MASK, PPC, { RT, RA, RB } },
4088
4089{ "icbtlse", X(31,494), X_MASK, PPCCHLK64, { CT, RA, RB }},
4090
4091{ "slbia", X(31,498), 0xffffffff, PPC64, { 0 } },
4092
4093{ "cli", X(31,502), XRB_MASK, POWER, { RT, RA } },
4094
4095{ "stdcxe.", XRC(31,511,1), X_MASK, BOOKE64, { RS, RA, RB } },
4096
4097{ "mcrxr", X(31,512), XRARB_MASK|(3<<21), COM, { BF } },
4098
4099{ "bblels", X(31,518), X_MASK, PPCBRLK, { 0 }},
4100{ "mcrxr64", X(31,544), XRARB_MASK|(3<<21), BOOKE64, { BF } },
4101
4102{ "clcs", X(31,531), XRB_MASK, M601, { RT, RA } },
4103
4104{ "lswx", X(31,533), X_MASK, PPCCOM, { RT, RA, RB } },
4105{ "lsx", X(31,533), X_MASK, PWRCOM, { RT, RA, RB } },
4106
4107{ "lwbrx", X(31,534), X_MASK, PPCCOM, { RT, RA, RB } },
4108{ "lbrx", X(31,534), X_MASK, PWRCOM, { RT, RA, RB } },
4109
4110{ "lfsx", X(31,535), X_MASK, COM, { FRT, RA, RB } },
4111
4112{ "srw", XRC(31,536,0), X_MASK, PPCCOM, { RA, RS, RB } },
4113{ "sr", XRC(31,536,0), X_MASK, PWRCOM, { RA, RS, RB } },
4114{ "srw.", XRC(31,536,1), X_MASK, PPCCOM, { RA, RS, RB } },
4115{ "sr.", XRC(31,536,1), X_MASK, PWRCOM, { RA, RS, RB } },
4116
4117{ "rrib", XRC(31,537,0), X_MASK, M601, { RA, RS, RB } },
4118{ "rrib.", XRC(31,537,1), X_MASK, M601, { RA, RS, RB } },
4119
4120{ "srd", XRC(31,539,0), X_MASK, PPC64, { RA, RS, RB } },
4121{ "srd.", XRC(31,539,1), X_MASK, PPC64, { RA, RS, RB } },
4122
4123{ "maskir", XRC(31,541,0), X_MASK, M601, { RA, RS, RB } },
4124{ "maskir.", XRC(31,541,1), X_MASK, M601, { RA, RS, RB } },
4125
4126{ "lwbrxe", X(31,542), X_MASK, BOOKE64, { RT, RA, RB } },
4127
4128{ "lfsxe", X(31,543), X_MASK, BOOKE64, { FRT, RA, RB } },
4129
4130{ "bbelr", X(31,550), X_MASK, PPCBRLK, { 0 }},
4131{ "tlbsync", X(31,566), 0xffffffff, PPC, { 0 } },
4132
4133{ "lfsux", X(31,567), X_MASK, COM, { FRT, RAS, RB } },
4134
4135{ "lfsuxe", X(31,575), X_MASK, BOOKE64, { FRT, RAS, RB } },
4136
4137{ "mfsr", X(31,595), XRB_MASK|(1<<20), COM32, { RT, SR } },
4138
4139{ "lswi", X(31,597), X_MASK, PPCCOM, { RT, RA, NB } },
4140{ "lsi", X(31,597), X_MASK, PWRCOM, { RT, RA, NB } },
4141
4142{ "lwsync", XSYNC(31,598,1), 0xffffffff, PPC, { 0 } },
4143{ "ptesync", XSYNC(31,598,2), 0xffffffff, PPC64, { 0 } },
4144{ "msync", X(31,598), 0xffffffff, BOOKE, { 0 } },
4145{ "sync", X(31,598), XSYNC_MASK, PPCCOM, { LS } },
4146{ "dcs", X(31,598), 0xffffffff, PWRCOM, { 0 } },
4147
4148{ "lfdx", X(31,599), X_MASK, COM, { FRT, RA, RB } },
4149
4150{ "lfdxe", X(31,607), X_MASK, BOOKE64, { FRT, RA, RB } },
4151
4152{ "mfsri", X(31,627), X_MASK, PWRCOM, { RT, RA, RB } },
4153
4154{ "dclst", X(31,630), XRB_MASK, PWRCOM, { RS, RA } },
4155
4156{ "lfdux", X(31,631), X_MASK, COM, { FRT, RAS, RB } },
4157
4158{ "lfduxe", X(31,639), X_MASK, BOOKE64, { FRT, RAS, RB } },
4159
4160{ "mfsrin", X(31,659), XRA_MASK, PPC32, { RT, RB } },
4161
4162{ "stswx", X(31,661), X_MASK, PPCCOM, { RS, RA, RB } },
4163{ "stsx", X(31,661), X_MASK, PWRCOM, { RS, RA, RB } },
4164
4165{ "stwbrx", X(31,662), X_MASK, PPCCOM, { RS, RA, RB } },
4166{ "stbrx", X(31,662), X_MASK, PWRCOM, { RS, RA, RB } },
4167
4168{ "stfsx", X(31,663), X_MASK, COM, { FRS, RA, RB } },
4169
4170{ "srq", XRC(31,664,0), X_MASK, M601, { RA, RS, RB } },
4171{ "srq.", XRC(31,664,1), X_MASK, M601, { RA, RS, RB } },
4172
4173{ "sre", XRC(31,665,0), X_MASK, M601, { RA, RS, RB } },
4174{ "sre.", XRC(31,665,1), X_MASK, M601, { RA, RS, RB } },
4175
4176{ "stwbrxe", X(31,670), X_MASK, BOOKE64, { RS, RA, RB } },
4177
4178{ "stfsxe", X(31,671), X_MASK, BOOKE64, { FRS, RA, RB } },
4179
4180{ "stfsux", X(31,695), X_MASK, COM, { FRS, RAS, RB } },
4181
4182{ "sriq", XRC(31,696,0), X_MASK, M601, { RA, RS, SH } },
4183{ "sriq.", XRC(31,696,1), X_MASK, M601, { RA, RS, SH } },
4184
4185{ "stfsuxe", X(31,703), X_MASK, BOOKE64, { FRS, RAS, RB } },
4186
4187{ "stswi", X(31,725), X_MASK, PPCCOM, { RS, RA, NB } },
4188{ "stsi", X(31,725), X_MASK, PWRCOM, { RS, RA, NB } },
4189
4190{ "stfdx", X(31,727), X_MASK, COM, { FRS, RA, RB } },
4191
4192{ "srlq", XRC(31,728,0), X_MASK, M601, { RA, RS, RB } },
4193{ "srlq.", XRC(31,728,1), X_MASK, M601, { RA, RS, RB } },
4194
4195{ "sreq", XRC(31,729,0), X_MASK, M601, { RA, RS, RB } },
4196{ "sreq.", XRC(31,729,1), X_MASK, M601, { RA, RS, RB } },
4197
4198{ "stfdxe", X(31,735), X_MASK, BOOKE64, { FRS, RA, RB } },
4199
4200{ "dcba", X(31,758), XRT_MASK, PPC405 | BOOKE, { RA, RB } },
4201
4202{ "stfdux", X(31,759), X_MASK, COM, { FRS, RAS, RB } },
4203
4204{ "srliq", XRC(31,760,0), X_MASK, M601, { RA, RS, SH } },
4205{ "srliq.", XRC(31,760,1), X_MASK, M601, { RA, RS, SH } },
4206
4207{ "dcbae", X(31,766), XRT_MASK, BOOKE64, { RA, RB } },
4208
4209{ "stfduxe", X(31,767), X_MASK, BOOKE64, { FRS, RAS, RB } },
4210
4211{ "tlbivax", X(31,786), XRT_MASK, BOOKE, { RA, RB } },
4212{ "tlbivaxe",X(31,787), XRT_MASK, BOOKE64, { RA, RB } },
4213
4214{ "lhbrx", X(31,790), X_MASK, COM, { RT, RA, RB } },
4215
4216{ "sraw", XRC(31,792,0), X_MASK, PPCCOM, { RA, RS, RB } },
4217{ "sra", XRC(31,792,0), X_MASK, PWRCOM, { RA, RS, RB } },
4218{ "sraw.", XRC(31,792,1), X_MASK, PPCCOM, { RA, RS, RB } },
4219{ "sra.", XRC(31,792,1), X_MASK, PWRCOM, { RA, RS, RB } },
4220
4221{ "srad", XRC(31,794,0), X_MASK, PPC64, { RA, RS, RB } },
4222{ "srad.", XRC(31,794,1), X_MASK, PPC64, { RA, RS, RB } },
4223
4224{ "lhbrxe", X(31,798), X_MASK, BOOKE64, { RT, RA, RB } },
4225
4226{ "ldxe", X(31,799), X_MASK, BOOKE64, { RT, RA, RB } },
4227{ "lduxe", X(31,831), X_MASK, BOOKE64, { RT, RA, RB } },
4228
4229{ "rac", X(31,818), X_MASK, PWRCOM, { RT, RA, RB } },
4230
4231{ "dss", XDSS(31,822,0), XDSS_MASK, PPCVEC, { STRM } },
4232{ "dssall", XDSS(31,822,1), XDSS_MASK, PPCVEC, { 0 } },
4233
4234{ "srawi", XRC(31,824,0), X_MASK, PPCCOM, { RA, RS, SH } },
4235{ "srai", XRC(31,824,0), X_MASK, PWRCOM, { RA, RS, SH } },
4236{ "srawi.", XRC(31,824,1), X_MASK, PPCCOM, { RA, RS, SH } },
4237{ "srai.", XRC(31,824,1), X_MASK, PWRCOM, { RA, RS, SH } },
4238
4239{ "slbmfev", X(31,851), XRA_MASK, PPC64, { RT, RB } },
4240
4241{ "mbar", X(31,854), X_MASK, BOOKE, { MO } },
4242{ "eieio", X(31,854), 0xffffffff, PPC, { 0 } },
4243
4244{ "tlbsx", XRC(31,914,0), X_MASK, BOOKE, { RA, RB } },
4245{ "tlbsx", XRC(31,914,0), X_MASK, PPC403, { RT, RA, RB } },
4246{ "tlbsx.", XRC(31,914,1), X_MASK, BOOKE, { RA, RB } },
4247{ "tlbsx.", XRC(31,914,1), X_MASK, PPC403, { RT, RA, RB } },
4248{ "tlbsxe", XRC(31,915,0), X_MASK, BOOKE64, { RA, RB } },
4249{ "tlbsxe.", XRC(31,915,1), X_MASK, BOOKE64, { RA, RB } },
4250
4251{ "slbmfee", X(31,915), XRA_MASK, PPC64, { RT, RB } },
4252
4253{ "sthbrx", X(31,918), X_MASK, COM, { RS, RA, RB } },
4254
4255{ "sraq", XRC(31,920,0), X_MASK, M601, { RA, RS, RB } },
4256{ "sraq.", XRC(31,920,1), X_MASK, M601, { RA, RS, RB } },
4257
4258{ "srea", XRC(31,921,0), X_MASK, M601, { RA, RS, RB } },
4259{ "srea.", XRC(31,921,1), X_MASK, M601, { RA, RS, RB } },
4260
4261{ "extsh", XRC(31,922,0), XRB_MASK, PPCCOM, { RA, RS } },
4262{ "exts", XRC(31,922,0), XRB_MASK, PWRCOM, { RA, RS } },
4263{ "extsh.", XRC(31,922,1), XRB_MASK, PPCCOM, { RA, RS } },
4264{ "exts.", XRC(31,922,1), XRB_MASK, PWRCOM, { RA, RS } },
4265
4266{ "sthbrxe", X(31,926), X_MASK, BOOKE64, { RS, RA, RB } },
4267
4268{ "stdxe", X(31,927), X_MASK, BOOKE64, { RS, RA, RB } },
4269
4270{ "tlbrehi", XTLB(31,946,0), XTLB_MASK, PPC403, { RT, RA } },
4271{ "tlbrelo", XTLB(31,946,1), XTLB_MASK, PPC403, { RT, RA } },
4272{ "tlbre", X(31,946), X_MASK, BOOKE, { 0 } },
4273{ "tlbre", X(31,946), X_MASK, PPC403, { RS, RA, SH } },
4274
4275{ "sraiq", XRC(31,952,0), X_MASK, M601, { RA, RS, SH } },
4276{ "sraiq.", XRC(31,952,1), X_MASK, M601, { RA, RS, SH } },
4277
4278{ "extsb", XRC(31,954,0), XRB_MASK, PPC, { RA, RS} },
4279{ "extsb.", XRC(31,954,1), XRB_MASK, PPC, { RA, RS} },
4280
4281{ "stduxe", X(31,959), X_MASK, BOOKE64, { RS, RAS, RB } },
4282
4283{ "iccci", X(31,966), XRT_MASK, PPC403|PPC440, { RA, RB } },
4284
4285{ "tlbwehi", XTLB(31,978,0), XTLB_MASK, PPC403, { RT, RA } },
4286{ "tlbwelo", XTLB(31,978,1), XTLB_MASK, PPC403, { RT, RA } },
4287{ "tlbwe", X(31,978), X_MASK, BOOKE, { 0 } },
4288{ "tlbwe", X(31,978), X_MASK, PPC403, { RS, RA, SH } },
4289{ "tlbld", X(31,978), XRTRA_MASK, PPC, { RB } },
4290
4291{ "icbi", X(31,982), XRT_MASK, PPC, { RA, RB } },
4292
4293{ "stfiwx", X(31,983), X_MASK, PPC, { FRS, RA, RB } },
4294
4295{ "extsw", XRC(31,986,0), XRB_MASK, PPC64 | BOOKE64,{ RA, RS } },
4296{ "extsw.", XRC(31,986,1), XRB_MASK, PPC64, { RA, RS } },
4297
4298{ "icread", X(31,998), XRT_MASK, PPC403|PPC440, { RA, RB } },
4299
4300{ "icbie", X(31,990), XRT_MASK, BOOKE64, { RA, RB } },
4301{ "stfiwxe", X(31,991), X_MASK, BOOKE64, { FRS, RA, RB } },
4302
4303{ "tlbli", X(31,1010), XRTRA_MASK, PPC, { RB } },
4304
4305{ "dcbz", X(31,1014), XRT_MASK, PPC, { RA, RB } },
4306{ "dclz", X(31,1014), XRT_MASK, PPC, { RA, RB } },
4307
4308{ "dcbze", X(31,1022), XRT_MASK, BOOKE64, { RA, RB } },
4309
4310{ "lvebx", X(31, 7), X_MASK, PPCVEC, { VD, RA, RB } },
4311{ "lvehx", X(31, 39), X_MASK, PPCVEC, { VD, RA, RB } },
4312{ "lvewx", X(31, 71), X_MASK, PPCVEC, { VD, RA, RB } },
4313{ "lvsl", X(31, 6), X_MASK, PPCVEC, { VD, RA, RB } },
4314{ "lvsr", X(31, 38), X_MASK, PPCVEC, { VD, RA, RB } },
4315{ "lvx", X(31, 103), X_MASK, PPCVEC, { VD, RA, RB } },
4316{ "lvxl", X(31, 359), X_MASK, PPCVEC, { VD, RA, RB } },
4317{ "stvebx", X(31, 135), X_MASK, PPCVEC, { VS, RA, RB } },
4318{ "stvehx", X(31, 167), X_MASK, PPCVEC, { VS, RA, RB } },
4319{ "stvewx", X(31, 199), X_MASK, PPCVEC, { VS, RA, RB } },
4320{ "stvx", X(31, 231), X_MASK, PPCVEC, { VS, RA, RB } },
4321{ "stvxl", X(31, 487), X_MASK, PPCVEC, { VS, RA, RB } },
4322
4323{ "lwz", OP(32), OP_MASK, PPCCOM, { RT, D, RA } },
4324{ "l", OP(32), OP_MASK, PWRCOM, { RT, D, RA } },
4325
4326{ "lwzu", OP(33), OP_MASK, PPCCOM, { RT, D, RAL } },
4327{ "lu", OP(33), OP_MASK, PWRCOM, { RT, D, RA } },
4328
4329{ "lbz", OP(34), OP_MASK, COM, { RT, D, RA } },
4330
4331{ "lbzu", OP(35), OP_MASK, COM, { RT, D, RAL } },
4332
4333{ "stw", OP(36), OP_MASK, PPCCOM, { RS, D, RA } },
4334{ "st", OP(36), OP_MASK, PWRCOM, { RS, D, RA } },
4335
4336{ "stwu", OP(37), OP_MASK, PPCCOM, { RS, D, RAS } },
4337{ "stu", OP(37), OP_MASK, PWRCOM, { RS, D, RA } },
4338
4339{ "stb", OP(38), OP_MASK, COM, { RS, D, RA } },
4340
4341{ "stbu", OP(39), OP_MASK, COM, { RS, D, RAS } },
4342
4343{ "lhz", OP(40), OP_MASK, COM, { RT, D, RA } },
4344
4345{ "lhzu", OP(41), OP_MASK, COM, { RT, D, RAL } },
4346
4347{ "lha", OP(42), OP_MASK, COM, { RT, D, RA } },
4348
4349{ "lhau", OP(43), OP_MASK, COM, { RT, D, RAL } },
4350
4351{ "sth", OP(44), OP_MASK, COM, { RS, D, RA } },
4352
4353{ "sthu", OP(45), OP_MASK, COM, { RS, D, RAS } },
4354
4355{ "lmw", OP(46), OP_MASK, PPCCOM, { RT, D, RAM } },
4356{ "lm", OP(46), OP_MASK, PWRCOM, { RT, D, RA } },
4357
4358{ "stmw", OP(47), OP_MASK, PPCCOM, { RS, D, RA } },
4359{ "stm", OP(47), OP_MASK, PWRCOM, { RS, D, RA } },
4360
4361{ "lfs", OP(48), OP_MASK, COM, { FRT, D, RA } },
4362
4363{ "lfsu", OP(49), OP_MASK, COM, { FRT, D, RAS } },
4364
4365{ "lfd", OP(50), OP_MASK, COM, { FRT, D, RA } },
4366
4367{ "lfdu", OP(51), OP_MASK, COM, { FRT, D, RAS } },
4368
4369{ "stfs", OP(52), OP_MASK, COM, { FRS, D, RA } },
4370
4371{ "stfsu", OP(53), OP_MASK, COM, { FRS, D, RAS } },
4372
4373{ "stfd", OP(54), OP_MASK, COM, { FRS, D, RA } },
4374
4375{ "stfdu", OP(55), OP_MASK, COM, { FRS, D, RAS } },
4376
4377{ "lq", OP(56), OP_MASK, POWER4, { RTQ, DQ, RAQ } },
4378
4379{ "lfq", OP(56), OP_MASK, POWER2, { FRT, D, RA } },
4380
4381{ "lfqu", OP(57), OP_MASK, POWER2, { FRT, D, RA } },
4382
4383{ "lbze", DEO(58,0), DE_MASK, BOOKE64, { RT, DE, RA } },
4384{ "lbzue", DEO(58,1), DE_MASK, BOOKE64, { RT, DE, RAL } },
4385{ "lhze", DEO(58,2), DE_MASK, BOOKE64, { RT, DE, RA } },
4386{ "lhzue", DEO(58,3), DE_MASK, BOOKE64, { RT, DE, RAL } },
4387{ "lhae", DEO(58,4), DE_MASK, BOOKE64, { RT, DE, RA } },
4388{ "lhaue", DEO(58,5), DE_MASK, BOOKE64, { RT, DE, RAL } },
4389{ "lwze", DEO(58,6), DE_MASK, BOOKE64, { RT, DE, RA } },
4390{ "lwzue", DEO(58,7), DE_MASK, BOOKE64, { RT, DE, RAL } },
4391{ "stbe", DEO(58,8), DE_MASK, BOOKE64, { RS, DE, RA } },
4392{ "stbue", DEO(58,9), DE_MASK, BOOKE64, { RS, DE, RAS } },
4393{ "sthe", DEO(58,10), DE_MASK, BOOKE64, { RS, DE, RA } },
4394{ "sthue", DEO(58,11), DE_MASK, BOOKE64, { RS, DE, RAS } },
4395{ "stwe", DEO(58,14), DE_MASK, BOOKE64, { RS, DE, RA } },
4396{ "stwue", DEO(58,15), DE_MASK, BOOKE64, { RS, DE, RAS } },
4397
4398{ "ld", DSO(58,0), DS_MASK, PPC64, { RT, DS, RA } },
4399
4400{ "ldu", DSO(58,1), DS_MASK, PPC64, { RT, DS, RAL } },
4401
4402{ "lwa", DSO(58,2), DS_MASK, PPC64, { RT, DS, RA } },
4403
4404{ "fdivs", A(59,18,0), AFRC_MASK, PPC, { FRT, FRA, FRB } },
4405{ "fdivs.", A(59,18,1), AFRC_MASK, PPC, { FRT, FRA, FRB } },
4406
4407{ "fsubs", A(59,20,0), AFRC_MASK, PPC, { FRT, FRA, FRB } },
4408{ "fsubs.", A(59,20,1), AFRC_MASK, PPC, { FRT, FRA, FRB } },
4409
4410{ "fadds", A(59,21,0), AFRC_MASK, PPC, { FRT, FRA, FRB } },
4411{ "fadds.", A(59,21,1), AFRC_MASK, PPC, { FRT, FRA, FRB } },
4412
4413{ "fsqrts", A(59,22,0), AFRAFRC_MASK, PPC, { FRT, FRB } },
4414{ "fsqrts.", A(59,22,1), AFRAFRC_MASK, PPC, { FRT, FRB } },
4415
4416{ "fres", A(59,24,0), AFRAFRC_MASK, PPC, { FRT, FRB } },
4417{ "fres.", A(59,24,1), AFRAFRC_MASK, PPC, { FRT, FRB } },
4418
4419{ "fmuls", A(59,25,0), AFRB_MASK, PPC, { FRT, FRA, FRC } },
4420{ "fmuls.", A(59,25,1), AFRB_MASK, PPC, { FRT, FRA, FRC } },
4421
4422{ "fmsubs", A(59,28,0), A_MASK, PPC, { FRT,FRA,FRC,FRB } },
4423{ "fmsubs.", A(59,28,1), A_MASK, PPC, { FRT,FRA,FRC,FRB } },
4424
4425{ "fmadds", A(59,29,0), A_MASK, PPC, { FRT,FRA,FRC,FRB } },
4426{ "fmadds.", A(59,29,1), A_MASK, PPC, { FRT,FRA,FRC,FRB } },
4427
4428{ "fnmsubs", A(59,30,0), A_MASK, PPC, { FRT,FRA,FRC,FRB } },
4429{ "fnmsubs.",A(59,30,1), A_MASK, PPC, { FRT,FRA,FRC,FRB } },
4430
4431{ "fnmadds", A(59,31,0), A_MASK, PPC, { FRT,FRA,FRC,FRB } },
4432{ "fnmadds.",A(59,31,1), A_MASK, PPC, { FRT,FRA,FRC,FRB } },
4433
4434{ "stfq", OP(60), OP_MASK, POWER2, { FRS, D, RA } },
4435
4436{ "stfqu", OP(61), OP_MASK, POWER2, { FRS, D, RA } },
4437
4438{ "lde", DEO(62,0), DE_MASK, BOOKE64, { RT, DES, RA } },
4439{ "ldue", DEO(62,1), DE_MASK, BOOKE64, { RT, DES, RA } },
4440{ "lfse", DEO(62,4), DE_MASK, BOOKE64, { FRT, DES, RA } },
4441{ "lfsue", DEO(62,5), DE_MASK, BOOKE64, { FRT, DES, RAS } },
4442{ "lfde", DEO(62,6), DE_MASK, BOOKE64, { FRT, DES, RA } },
4443{ "lfdue", DEO(62,7), DE_MASK, BOOKE64, { FRT, DES, RAS } },
4444{ "stde", DEO(62,8), DE_MASK, BOOKE64, { RS, DES, RA } },
4445{ "stdue", DEO(62,9), DE_MASK, BOOKE64, { RS, DES, RAS } },
4446{ "stfse", DEO(62,12), DE_MASK, BOOKE64, { FRS, DES, RA } },
4447{ "stfsue", DEO(62,13), DE_MASK, BOOKE64, { FRS, DES, RAS } },
4448{ "stfde", DEO(62,14), DE_MASK, BOOKE64, { FRS, DES, RA } },
4449{ "stfdue", DEO(62,15), DE_MASK, BOOKE64, { FRS, DES, RAS } },
4450
4451{ "std", DSO(62,0), DS_MASK, PPC64, { RS, DS, RA } },
4452
4453{ "stdu", DSO(62,1), DS_MASK, PPC64, { RS, DS, RAS } },
4454
4455{ "stq", DSO(62,2), DS_MASK, POWER4, { RSQ, DS, RA } },
4456
4457{ "fcmpu", X(63,0), X_MASK|(3<<21), COM, { BF, FRA, FRB } },
4458
4459{ "frsp", XRC(63,12,0), XRA_MASK, COM, { FRT, FRB } },
4460{ "frsp.", XRC(63,12,1), XRA_MASK, COM, { FRT, FRB } },
4461
4462{ "fctiw", XRC(63,14,0), XRA_MASK, PPCCOM, { FRT, FRB } },
4463{ "fcir", XRC(63,14,0), XRA_MASK, POWER2, { FRT, FRB } },
4464{ "fctiw.", XRC(63,14,1), XRA_MASK, PPCCOM, { FRT, FRB } },
4465{ "fcir.", XRC(63,14,1), XRA_MASK, POWER2, { FRT, FRB } },
4466
4467{ "fctiwz", XRC(63,15,0), XRA_MASK, PPCCOM, { FRT, FRB } },
4468{ "fcirz", XRC(63,15,0), XRA_MASK, POWER2, { FRT, FRB } },
4469{ "fctiwz.", XRC(63,15,1), XRA_MASK, PPCCOM, { FRT, FRB } },
4470{ "fcirz.", XRC(63,15,1), XRA_MASK, POWER2, { FRT, FRB } },
4471
4472{ "fdiv", A(63,18,0), AFRC_MASK, PPCCOM, { FRT, FRA, FRB } },
4473{ "fd", A(63,18,0), AFRC_MASK, PWRCOM, { FRT, FRA, FRB } },
4474{ "fdiv.", A(63,18,1), AFRC_MASK, PPCCOM, { FRT, FRA, FRB } },
4475{ "fd.", A(63,18,1), AFRC_MASK, PWRCOM, { FRT, FRA, FRB } },
4476
4477{ "fsub", A(63,20,0), AFRC_MASK, PPCCOM, { FRT, FRA, FRB } },
4478{ "fs", A(63,20,0), AFRC_MASK, PWRCOM, { FRT, FRA, FRB } },
4479{ "fsub.", A(63,20,1), AFRC_MASK, PPCCOM, { FRT, FRA, FRB } },
4480{ "fs.", A(63,20,1), AFRC_MASK, PWRCOM, { FRT, FRA, FRB } },
4481
4482{ "fadd", A(63,21,0), AFRC_MASK, PPCCOM, { FRT, FRA, FRB } },
4483{ "fa", A(63,21,0), AFRC_MASK, PWRCOM, { FRT, FRA, FRB } },
4484{ "fadd.", A(63,21,1), AFRC_MASK, PPCCOM, { FRT, FRA, FRB } },
4485{ "fa.", A(63,21,1), AFRC_MASK, PWRCOM, { FRT, FRA, FRB } },
4486
4487{ "fsqrt", A(63,22,0), AFRAFRC_MASK, PPCPWR2, { FRT, FRB } },
4488{ "fsqrt.", A(63,22,1), AFRAFRC_MASK, PPCPWR2, { FRT, FRB } },
4489
4490{ "fsel", A(63,23,0), A_MASK, PPC, { FRT,FRA,FRC,FRB } },
4491{ "fsel.", A(63,23,1), A_MASK, PPC, { FRT,FRA,FRC,FRB } },
4492
4493{ "fmul", A(63,25,0), AFRB_MASK, PPCCOM, { FRT, FRA, FRC } },
4494{ "fm", A(63,25,0), AFRB_MASK, PWRCOM, { FRT, FRA, FRC } },
4495{ "fmul.", A(63,25,1), AFRB_MASK, PPCCOM, { FRT, FRA, FRC } },
4496{ "fm.", A(63,25,1), AFRB_MASK, PWRCOM, { FRT, FRA, FRC } },
4497
4498{ "frsqrte", A(63,26,0), AFRAFRC_MASK, PPC, { FRT, FRB } },
4499{ "frsqrte.",A(63,26,1), AFRAFRC_MASK, PPC, { FRT, FRB } },
4500
4501{ "fmsub", A(63,28,0), A_MASK, PPCCOM, { FRT,FRA,FRC,FRB } },
4502{ "fms", A(63,28,0), A_MASK, PWRCOM, { FRT,FRA,FRC,FRB } },
4503{ "fmsub.", A(63,28,1), A_MASK, PPCCOM, { FRT,FRA,FRC,FRB } },
4504{ "fms.", A(63,28,1), A_MASK, PWRCOM, { FRT,FRA,FRC,FRB } },
4505
4506{ "fmadd", A(63,29,0), A_MASK, PPCCOM, { FRT,FRA,FRC,FRB } },
4507{ "fma", A(63,29,0), A_MASK, PWRCOM, { FRT,FRA,FRC,FRB } },
4508{ "fmadd.", A(63,29,1), A_MASK, PPCCOM, { FRT,FRA,FRC,FRB } },
4509{ "fma.", A(63,29,1), A_MASK, PWRCOM, { FRT,FRA,FRC,FRB } },
4510
4511{ "fnmsub", A(63,30,0), A_MASK, PPCCOM, { FRT,FRA,FRC,FRB } },
4512{ "fnms", A(63,30,0), A_MASK, PWRCOM, { FRT,FRA,FRC,FRB } },
4513{ "fnmsub.", A(63,30,1), A_MASK, PPCCOM, { FRT,FRA,FRC,FRB } },
4514{ "fnms.", A(63,30,1), A_MASK, PWRCOM, { FRT,FRA,FRC,FRB } },
4515
4516{ "fnmadd", A(63,31,0), A_MASK, PPCCOM, { FRT,FRA,FRC,FRB } },
4517{ "fnma", A(63,31,0), A_MASK, PWRCOM, { FRT,FRA,FRC,FRB } },
4518{ "fnmadd.", A(63,31,1), A_MASK, PPCCOM, { FRT,FRA,FRC,FRB } },
4519{ "fnma.", A(63,31,1), A_MASK, PWRCOM, { FRT,FRA,FRC,FRB } },
4520
4521{ "fcmpo", X(63,32), X_MASK|(3<<21), COM, { BF, FRA, FRB } },
4522
4523{ "mtfsb1", XRC(63,38,0), XRARB_MASK, COM, { BT } },
4524{ "mtfsb1.", XRC(63,38,1), XRARB_MASK, COM, { BT } },
4525
4526{ "fneg", XRC(63,40,0), XRA_MASK, COM, { FRT, FRB } },
4527{ "fneg.", XRC(63,40,1), XRA_MASK, COM, { FRT, FRB } },
4528
4529{ "mcrfs", X(63,64), XRB_MASK|(3<<21)|(3<<16), COM, { BF, BFA } },
4530
4531{ "mtfsb0", XRC(63,70,0), XRARB_MASK, COM, { BT } },
4532{ "mtfsb0.", XRC(63,70,1), XRARB_MASK, COM, { BT } },
4533
4534{ "fmr", XRC(63,72,0), XRA_MASK, COM, { FRT, FRB } },
4535{ "fmr.", XRC(63,72,1), XRA_MASK, COM, { FRT, FRB } },
4536
4537{ "mtfsfi", XRC(63,134,0), XRA_MASK|(3<<21)|(1<<11), COM, { BF, U } },
4538{ "mtfsfi.", XRC(63,134,1), XRA_MASK|(3<<21)|(1<<11), COM, { BF, U } },
4539
4540{ "fnabs", XRC(63,136,0), XRA_MASK, COM, { FRT, FRB } },
4541{ "fnabs.", XRC(63,136,1), XRA_MASK, COM, { FRT, FRB } },
4542
4543{ "fabs", XRC(63,264,0), XRA_MASK, COM, { FRT, FRB } },
4544{ "fabs.", XRC(63,264,1), XRA_MASK, COM, { FRT, FRB } },
4545
4546{ "mffs", XRC(63,583,0), XRARB_MASK, COM, { FRT } },
4547{ "mffs.", XRC(63,583,1), XRARB_MASK, COM, { FRT } },
4548
4549{ "mtfsf", XFL(63,711,0), XFL_MASK, COM, { FLM, FRB } },
4550{ "mtfsf.", XFL(63,711,1), XFL_MASK, COM, { FLM, FRB } },
4551
4552{ "fctid", XRC(63,814,0), XRA_MASK, PPC64, { FRT, FRB } },
4553{ "fctid.", XRC(63,814,1), XRA_MASK, PPC64, { FRT, FRB } },
4554
4555{ "fctidz", XRC(63,815,0), XRA_MASK, PPC64, { FRT, FRB } },
4556{ "fctidz.", XRC(63,815,1), XRA_MASK, PPC64, { FRT, FRB } },
4557
4558{ "fcfid", XRC(63,846,0), XRA_MASK, PPC64, { FRT, FRB } },
4559{ "fcfid.", XRC(63,846,1), XRA_MASK, PPC64, { FRT, FRB } },
4560
4561};
4562
4563const int powerpc_num_opcodes =
4564 sizeof (powerpc_opcodes) / sizeof (powerpc_opcodes[0]);
4565
4566/* The macro table. This is only used by the assembler. */
4567
4568/* The expressions of the form (-x ! 31) & (x | 31) have the value 0
4569 when x=0; 32-x when x is between 1 and 31; are negative if x is
4570 negative; and are 32 or more otherwise. This is what you want
4571 when, for instance, you are emulating a right shift by a
4572 rotate-left-and-mask, because the underlying instructions support
4573 shifts of size 0 but not shifts of size 32. By comparison, when
4574 extracting x bits from some word you want to use just 32-x, because
4575 the underlying instructions don't support extracting 0 bits but do
4576 support extracting the whole word (32 bits in this case). */
4577
4578const struct powerpc_macro powerpc_macros[] = {
4579{ "extldi", 4, PPC64, "rldicr %0,%1,%3,(%2)-1" },
4580{ "extldi.", 4, PPC64, "rldicr. %0,%1,%3,(%2)-1" },
4581{ "extrdi", 4, PPC64, "rldicl %0,%1,(%2)+(%3),64-(%2)" },
4582{ "extrdi.", 4, PPC64, "rldicl. %0,%1,(%2)+(%3),64-(%2)" },
4583{ "insrdi", 4, PPC64, "rldimi %0,%1,64-((%2)+(%3)),%3" },
4584{ "insrdi.", 4, PPC64, "rldimi. %0,%1,64-((%2)+(%3)),%3" },
4585{ "rotrdi", 3, PPC64, "rldicl %0,%1,(-(%2)!63)&((%2)|63),0" },
4586{ "rotrdi.", 3, PPC64, "rldicl. %0,%1,(-(%2)!63)&((%2)|63),0" },
4587{ "sldi", 3, PPC64, "rldicr %0,%1,%2,63-(%2)" },
4588{ "sldi.", 3, PPC64, "rldicr. %0,%1,%2,63-(%2)" },
4589{ "srdi", 3, PPC64, "rldicl %0,%1,(-(%2)!63)&((%2)|63),%2" },
4590{ "srdi.", 3, PPC64, "rldicl. %0,%1,(-(%2)!63)&((%2)|63),%2" },
4591{ "clrrdi", 3, PPC64, "rldicr %0,%1,0,63-(%2)" },
4592{ "clrrdi.", 3, PPC64, "rldicr. %0,%1,0,63-(%2)" },
4593{ "clrlsldi",4, PPC64, "rldic %0,%1,%3,(%2)-(%3)" },
4594{ "clrlsldi.",4, PPC64, "rldic. %0,%1,%3,(%2)-(%3)" },
4595
4596{ "extlwi", 4, PPCCOM, "rlwinm %0,%1,%3,0,(%2)-1" },
4597{ "extlwi.", 4, PPCCOM, "rlwinm. %0,%1,%3,0,(%2)-1" },
4598{ "extrwi", 4, PPCCOM, "rlwinm %0,%1,((%2)+(%3))&((%2)+(%3)<>32),32-(%2),31" },
4599{ "extrwi.", 4, PPCCOM, "rlwinm. %0,%1,((%2)+(%3))&((%2)+(%3)<>32),32-(%2),31" },
4600{ "inslwi", 4, PPCCOM, "rlwimi %0,%1,(-(%3)!31)&((%3)|31),%3,(%2)+(%3)-1" },
4601{ "inslwi.", 4, PPCCOM, "rlwimi. %0,%1,(-(%3)!31)&((%3)|31),%3,(%2)+(%3)-1"},
4602{ "insrwi", 4, PPCCOM, "rlwimi %0,%1,32-((%2)+(%3)),%3,(%2)+(%3)-1" },
4603{ "insrwi.", 4, PPCCOM, "rlwimi. %0,%1,32-((%2)+(%3)),%3,(%2)+(%3)-1"},
4604{ "rotrwi", 3, PPCCOM, "rlwinm %0,%1,(-(%2)!31)&((%2)|31),0,31" },
4605{ "rotrwi.", 3, PPCCOM, "rlwinm. %0,%1,(-(%2)!31)&((%2)|31),0,31" },
4606{ "slwi", 3, PPCCOM, "rlwinm %0,%1,%2,0,31-(%2)" },
4607{ "sli", 3, PWRCOM, "rlinm %0,%1,%2,0,31-(%2)" },
4608{ "slwi.", 3, PPCCOM, "rlwinm. %0,%1,%2,0,31-(%2)" },
4609{ "sli.", 3, PWRCOM, "rlinm. %0,%1,%2,0,31-(%2)" },
4610{ "srwi", 3, PPCCOM, "rlwinm %0,%1,(-(%2)!31)&((%2)|31),%2,31" },
4611{ "sri", 3, PWRCOM, "rlinm %0,%1,(-(%2)!31)&((%2)|31),%2,31" },
4612{ "srwi.", 3, PPCCOM, "rlwinm. %0,%1,(-(%2)!31)&((%2)|31),%2,31" },
4613{ "sri.", 3, PWRCOM, "rlinm. %0,%1,(-(%2)!31)&((%2)|31),%2,31" },
4614{ "clrrwi", 3, PPCCOM, "rlwinm %0,%1,0,0,31-(%2)" },
4615{ "clrrwi.", 3, PPCCOM, "rlwinm. %0,%1,0,0,31-(%2)" },
4616{ "clrlslwi",4, PPCCOM, "rlwinm %0,%1,%3,(%2)-(%3),31-(%3)" },
4617{ "clrlslwi.",4, PPCCOM, "rlwinm. %0,%1,%3,(%2)-(%3),31-(%3)" },
4618};
4619
4620const int powerpc_num_macros =
4621 sizeof (powerpc_macros) / sizeof (powerpc_macros[0]);
diff --git a/arch/ppc64/xmon/ppc.h b/arch/ppc64/xmon/ppc.h
deleted file mode 100644
index 342237e8dd69..000000000000
--- a/arch/ppc64/xmon/ppc.h
+++ /dev/null
@@ -1,307 +0,0 @@
1/* ppc.h -- Header file for PowerPC opcode table
2 Copyright 1994, 1995, 1999, 2000, 2001, 2002, 2003
3 Free Software Foundation, Inc.
4 Written by Ian Lance Taylor, Cygnus Support
5
6This file is part of GDB, GAS, and the GNU binutils.
7
8GDB, GAS, and the GNU binutils are free software; you can redistribute
9them and/or modify them under the terms of the GNU General Public
10License as published by the Free Software Foundation; either version
111, or (at your option) any later version.
12
13GDB, GAS, and the GNU binutils are distributed in the hope that they
14will be useful, but WITHOUT ANY WARRANTY; without even the implied
15warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
16the GNU General Public License for more details.
17
18You should have received a copy of the GNU General Public License
19along with this file; see the file COPYING. If not, write to the Free
20Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */
21
22#ifndef PPC_H
23#define PPC_H
24
25/* The opcode table is an array of struct powerpc_opcode. */
26
27struct powerpc_opcode
28{
29 /* The opcode name. */
30 const char *name;
31
32 /* The opcode itself. Those bits which will be filled in with
33 operands are zeroes. */
34 unsigned long opcode;
35
36 /* The opcode mask. This is used by the disassembler. This is a
37 mask containing ones indicating those bits which must match the
38 opcode field, and zeroes indicating those bits which need not
39 match (and are presumably filled in by operands). */
40 unsigned long mask;
41
42 /* One bit flags for the opcode. These are used to indicate which
43 specific processors support the instructions. The defined values
44 are listed below. */
45 unsigned long flags;
46
47 /* An array of operand codes. Each code is an index into the
48 operand table. They appear in the order which the operands must
49 appear in assembly code, and are terminated by a zero. */
50 unsigned char operands[8];
51};
52
53/* The table itself is sorted by major opcode number, and is otherwise
54 in the order in which the disassembler should consider
55 instructions. */
56extern const struct powerpc_opcode powerpc_opcodes[];
57extern const int powerpc_num_opcodes;
58
59/* Values defined for the flags field of a struct powerpc_opcode. */
60
61/* Opcode is defined for the PowerPC architecture. */
62#define PPC_OPCODE_PPC 1
63
64/* Opcode is defined for the POWER (RS/6000) architecture. */
65#define PPC_OPCODE_POWER 2
66
67/* Opcode is defined for the POWER2 (Rios 2) architecture. */
68#define PPC_OPCODE_POWER2 4
69
70/* Opcode is only defined on 32 bit architectures. */
71#define PPC_OPCODE_32 8
72
73/* Opcode is only defined on 64 bit architectures. */
74#define PPC_OPCODE_64 0x10
75
76/* Opcode is supported by the Motorola PowerPC 601 processor. The 601
77 is assumed to support all PowerPC (PPC_OPCODE_PPC) instructions,
78 but it also supports many additional POWER instructions. */
79#define PPC_OPCODE_601 0x20
80
81/* Opcode is supported in both the Power and PowerPC architectures
82 (ie, compiler's -mcpu=common or assembler's -mcom). */
83#define PPC_OPCODE_COMMON 0x40
84
85/* Opcode is supported for any Power or PowerPC platform (this is
86 for the assembler's -many option, and it eliminates duplicates). */
87#define PPC_OPCODE_ANY 0x80
88
89/* Opcode is supported as part of the 64-bit bridge. */
90#define PPC_OPCODE_64_BRIDGE 0x100
91
92/* Opcode is supported by Altivec Vector Unit */
93#define PPC_OPCODE_ALTIVEC 0x200
94
95/* Opcode is supported by PowerPC 403 processor. */
96#define PPC_OPCODE_403 0x400
97
98/* Opcode is supported by PowerPC BookE processor. */
99#define PPC_OPCODE_BOOKE 0x800
100
101/* Opcode is only supported by 64-bit PowerPC BookE processor. */
102#define PPC_OPCODE_BOOKE64 0x1000
103
104/* Opcode is supported by PowerPC 440 processor. */
105#define PPC_OPCODE_440 0x2000
106
107/* Opcode is only supported by Power4 architecture. */
108#define PPC_OPCODE_POWER4 0x4000
109
110/* Opcode isn't supported by Power4 architecture. */
111#define PPC_OPCODE_NOPOWER4 0x8000
112
113/* Opcode is only supported by POWERPC Classic architecture. */
114#define PPC_OPCODE_CLASSIC 0x10000
115
116/* Opcode is only supported by e500x2 Core. */
117#define PPC_OPCODE_SPE 0x20000
118
119/* Opcode is supported by e500x2 Integer select APU. */
120#define PPC_OPCODE_ISEL 0x40000
121
122/* Opcode is an e500 SPE floating point instruction. */
123#define PPC_OPCODE_EFS 0x80000
124
125/* Opcode is supported by branch locking APU. */
126#define PPC_OPCODE_BRLOCK 0x100000
127
128/* Opcode is supported by performance monitor APU. */
129#define PPC_OPCODE_PMR 0x200000
130
131/* Opcode is supported by cache locking APU. */
132#define PPC_OPCODE_CACHELCK 0x400000
133
134/* Opcode is supported by machine check APU. */
135#define PPC_OPCODE_RFMCI 0x800000
136
137/* A macro to extract the major opcode from an instruction. */
138#define PPC_OP(i) (((i) >> 26) & 0x3f)
139
140/* The operands table is an array of struct powerpc_operand. */
141
142struct powerpc_operand
143{
144 /* The number of bits in the operand. */
145 int bits;
146
147 /* How far the operand is left shifted in the instruction. */
148 int shift;
149
150 /* Insertion function. This is used by the assembler. To insert an
151 operand value into an instruction, check this field.
152
153 If it is NULL, execute
154 i |= (op & ((1 << o->bits) - 1)) << o->shift;
155 (i is the instruction which we are filling in, o is a pointer to
156 this structure, and op is the opcode value; this assumes twos
157 complement arithmetic).
158
159 If this field is not NULL, then simply call it with the
160 instruction and the operand value. It will return the new value
161 of the instruction. If the ERRMSG argument is not NULL, then if
162 the operand value is illegal, *ERRMSG will be set to a warning
163 string (the operand will be inserted in any case). If the
164 operand value is legal, *ERRMSG will be unchanged (most operands
165 can accept any value). */
166 unsigned long (*insert)
167 (unsigned long instruction, long op, int dialect, const char **errmsg);
168
169 /* Extraction function. This is used by the disassembler. To
170 extract this operand type from an instruction, check this field.
171
172 If it is NULL, compute
173 op = ((i) >> o->shift) & ((1 << o->bits) - 1);
174 if ((o->flags & PPC_OPERAND_SIGNED) != 0
175 && (op & (1 << (o->bits - 1))) != 0)
176 op -= 1 << o->bits;
177 (i is the instruction, o is a pointer to this structure, and op
178 is the result; this assumes twos complement arithmetic).
179
180 If this field is not NULL, then simply call it with the
181 instruction value. It will return the value of the operand. If
182 the INVALID argument is not NULL, *INVALID will be set to
183 non-zero if this operand type can not actually be extracted from
184 this operand (i.e., the instruction does not match). If the
185 operand is valid, *INVALID will not be changed. */
186 long (*extract) (unsigned long instruction, int dialect, int *invalid);
187
188 /* One bit syntax flags. */
189 unsigned long flags;
190};
191
192/* Elements in the table are retrieved by indexing with values from
193 the operands field of the powerpc_opcodes table. */
194
195extern const struct powerpc_operand powerpc_operands[];
196
197/* Values defined for the flags field of a struct powerpc_operand. */
198
199/* This operand takes signed values. */
200#define PPC_OPERAND_SIGNED (01)
201
202/* This operand takes signed values, but also accepts a full positive
203 range of values when running in 32 bit mode. That is, if bits is
204 16, it takes any value from -0x8000 to 0xffff. In 64 bit mode,
205 this flag is ignored. */
206#define PPC_OPERAND_SIGNOPT (02)
207
208/* This operand does not actually exist in the assembler input. This
209 is used to support extended mnemonics such as mr, for which two
210 operands fields are identical. The assembler should call the
211 insert function with any op value. The disassembler should call
212 the extract function, ignore the return value, and check the value
213 placed in the valid argument. */
214#define PPC_OPERAND_FAKE (04)
215
216/* The next operand should be wrapped in parentheses rather than
217 separated from this one by a comma. This is used for the load and
218 store instructions which want their operands to look like
219 reg,displacement(reg)
220 */
221#define PPC_OPERAND_PARENS (010)
222
223/* This operand may use the symbolic names for the CR fields, which
224 are
225 lt 0 gt 1 eq 2 so 3 un 3
226 cr0 0 cr1 1 cr2 2 cr3 3
227 cr4 4 cr5 5 cr6 6 cr7 7
228 These may be combined arithmetically, as in cr2*4+gt. These are
229 only supported on the PowerPC, not the POWER. */
230#define PPC_OPERAND_CR (020)
231
232/* This operand names a register. The disassembler uses this to print
233 register names with a leading 'r'. */
234#define PPC_OPERAND_GPR (040)
235
236/* This operand names a floating point register. The disassembler
237 prints these with a leading 'f'. */
238#define PPC_OPERAND_FPR (0100)
239
240/* This operand is a relative branch displacement. The disassembler
241 prints these symbolically if possible. */
242#define PPC_OPERAND_RELATIVE (0200)
243
244/* This operand is an absolute branch address. The disassembler
245 prints these symbolically if possible. */
246#define PPC_OPERAND_ABSOLUTE (0400)
247
248/* This operand is optional, and is zero if omitted. This is used for
249 the optional BF and L fields in the comparison instructions. The
250 assembler must count the number of operands remaining on the line,
251 and the number of operands remaining for the opcode, and decide
252 whether this operand is present or not. The disassembler should
253 print this operand out only if it is not zero. */
254#define PPC_OPERAND_OPTIONAL (01000)
255
256/* This flag is only used with PPC_OPERAND_OPTIONAL. If this operand
257 is omitted, then for the next operand use this operand value plus
258 1, ignoring the next operand field for the opcode. This wretched
259 hack is needed because the Power rotate instructions can take
260 either 4 or 5 operands. The disassembler should print this operand
261 out regardless of the PPC_OPERAND_OPTIONAL field. */
262#define PPC_OPERAND_NEXT (02000)
263
264/* This operand should be regarded as a negative number for the
265 purposes of overflow checking (i.e., the normal most negative
266 number is disallowed and one more than the normal most positive
267 number is allowed). This flag will only be set for a signed
268 operand. */
269#define PPC_OPERAND_NEGATIVE (04000)
270
271/* This operand names a vector unit register. The disassembler
272 prints these with a leading 'v'. */
273#define PPC_OPERAND_VR (010000)
274
275/* This operand is for the DS field in a DS form instruction. */
276#define PPC_OPERAND_DS (020000)
277
278/* This operand is for the DQ field in a DQ form instruction. */
279#define PPC_OPERAND_DQ (040000)
280
281/* The POWER and PowerPC assemblers use a few macros. We keep them
282 with the operands table for simplicity. The macro table is an
283 array of struct powerpc_macro. */
284
285struct powerpc_macro
286{
287 /* The macro name. */
288 const char *name;
289
290 /* The number of operands the macro takes. */
291 unsigned int operands;
292
293 /* One bit flags for the opcode. These are used to indicate which
294 specific processors support the instructions. The values are the
295 same as those for the struct powerpc_opcode flags field. */
296 unsigned long flags;
297
298 /* A format string to turn the macro into a normal instruction.
299 Each %N in the string is replaced with operand number N (zero
300 based). */
301 const char *format;
302};
303
304extern const struct powerpc_macro powerpc_macros[];
305extern const int powerpc_num_macros;
306
307#endif /* PPC_H */
diff --git a/arch/ppc64/xmon/setjmp.S b/arch/ppc64/xmon/setjmp.S
deleted file mode 100644
index 30ee643d557c..000000000000
--- a/arch/ppc64/xmon/setjmp.S
+++ /dev/null
@@ -1,73 +0,0 @@
1/*
2 * Copyright (C) 1996 Paul Mackerras.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * NOTE: assert(sizeof(buf) > 184)
10 */
11#include <asm/processor.h>
12#include <asm/ppc_asm.h>
13
14_GLOBAL(xmon_setjmp)
15 mflr r0
16 std r0,0(r3)
17 std r1,8(r3)
18 std r2,16(r3)
19 mfcr r0
20 std r0,24(r3)
21 std r13,32(r3)
22 std r14,40(r3)
23 std r15,48(r3)
24 std r16,56(r3)
25 std r17,64(r3)
26 std r18,72(r3)
27 std r19,80(r3)
28 std r20,88(r3)
29 std r21,96(r3)
30 std r22,104(r3)
31 std r23,112(r3)
32 std r24,120(r3)
33 std r25,128(r3)
34 std r26,136(r3)
35 std r27,144(r3)
36 std r28,152(r3)
37 std r29,160(r3)
38 std r30,168(r3)
39 std r31,176(r3)
40 li r3,0
41 blr
42
43_GLOBAL(xmon_longjmp)
44 cmpdi r4,0
45 bne 1f
46 li r4,1
471: ld r13,32(r3)
48 ld r14,40(r3)
49 ld r15,48(r3)
50 ld r16,56(r3)
51 ld r17,64(r3)
52 ld r18,72(r3)
53 ld r19,80(r3)
54 ld r20,88(r3)
55 ld r21,96(r3)
56 ld r22,104(r3)
57 ld r23,112(r3)
58 ld r24,120(r3)
59 ld r25,128(r3)
60 ld r26,136(r3)
61 ld r27,144(r3)
62 ld r28,152(r3)
63 ld r29,160(r3)
64 ld r30,168(r3)
65 ld r31,176(r3)
66 ld r0,24(r3)
67 mtcrf 56,r0
68 ld r0,0(r3)
69 ld r1,8(r3)
70 ld r2,16(r3)
71 mtlr r0
72 mr r3,r4
73 blr
diff --git a/arch/ppc64/xmon/start.c b/arch/ppc64/xmon/start.c
deleted file mode 100644
index e50c158191e1..000000000000
--- a/arch/ppc64/xmon/start.c
+++ /dev/null
@@ -1,187 +0,0 @@
1/*
2 * Copyright (C) 1996 Paul Mackerras.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 */
9#include <linux/config.h>
10#include <linux/string.h>
11#include <linux/kernel.h>
12#include <linux/errno.h>
13#include <linux/sysrq.h>
14#include <linux/init.h>
15#include <asm/machdep.h>
16#include <asm/io.h>
17#include <asm/page.h>
18#include <asm/prom.h>
19#include <asm/processor.h>
20#include <asm/udbg.h>
21#include <asm/system.h>
22#include "nonstdio.h"
23
24#ifdef CONFIG_MAGIC_SYSRQ
25
26static void sysrq_handle_xmon(int key, struct pt_regs *pt_regs,
27 struct tty_struct *tty)
28{
29 /* ensure xmon is enabled */
30 xmon_init(1);
31 debugger(pt_regs);
32}
33
34static struct sysrq_key_op sysrq_xmon_op =
35{
36 .handler = sysrq_handle_xmon,
37 .help_msg = "Xmon",
38 .action_msg = "Entering xmon",
39};
40
41static int __init setup_xmon_sysrq(void)
42{
43 register_sysrq_key('x', &sysrq_xmon_op);
44 return 0;
45}
46__initcall(setup_xmon_sysrq);
47#endif /* CONFIG_MAGIC_SYSRQ */
48
49int
50xmon_write(void *handle, void *ptr, int nb)
51{
52 return udbg_write(ptr, nb);
53}
54
55int
56xmon_read(void *handle, void *ptr, int nb)
57{
58 return udbg_read(ptr, nb);
59}
60
61int
62xmon_read_poll(void)
63{
64 if (udbg_getc_poll)
65 return udbg_getc_poll();
66 return -1;
67}
68
69FILE *xmon_stdin;
70FILE *xmon_stdout;
71
72int
73xmon_putc(int c, void *f)
74{
75 char ch = c;
76
77 if (c == '\n')
78 xmon_putc('\r', f);
79 return xmon_write(f, &ch, 1) == 1? c: -1;
80}
81
82int
83xmon_putchar(int c)
84{
85 return xmon_putc(c, xmon_stdout);
86}
87
88int
89xmon_fputs(char *str, void *f)
90{
91 int n = strlen(str);
92
93 return xmon_write(f, str, n) == n? 0: -1;
94}
95
96int
97xmon_readchar(void)
98{
99 char ch;
100
101 for (;;) {
102 switch (xmon_read(xmon_stdin, &ch, 1)) {
103 case 1:
104 return ch;
105 case -1:
106 xmon_printf("read(stdin) returned -1\r\n", 0, 0);
107 return -1;
108 }
109 }
110}
111
112static char line[256];
113static char *lineptr;
114static int lineleft;
115
116int
117xmon_getchar(void)
118{
119 int c;
120
121 if (lineleft == 0) {
122 lineptr = line;
123 for (;;) {
124 c = xmon_readchar();
125 if (c == -1 || c == 4)
126 break;
127 if (c == '\r' || c == '\n') {
128 *lineptr++ = '\n';
129 xmon_putchar('\n');
130 break;
131 }
132 switch (c) {
133 case 0177:
134 case '\b':
135 if (lineptr > line) {
136 xmon_putchar('\b');
137 xmon_putchar(' ');
138 xmon_putchar('\b');
139 --lineptr;
140 }
141 break;
142 case 'U' & 0x1F:
143 while (lineptr > line) {
144 xmon_putchar('\b');
145 xmon_putchar(' ');
146 xmon_putchar('\b');
147 --lineptr;
148 }
149 break;
150 default:
151 if (lineptr >= &line[sizeof(line) - 1])
152 xmon_putchar('\a');
153 else {
154 xmon_putchar(c);
155 *lineptr++ = c;
156 }
157 }
158 }
159 lineleft = lineptr - line;
160 lineptr = line;
161 }
162 if (lineleft == 0)
163 return -1;
164 --lineleft;
165 return *lineptr++;
166}
167
168char *
169xmon_fgets(char *str, int nb, void *f)
170{
171 char *p;
172 int c;
173
174 for (p = str; p < str + nb - 1; ) {
175 c = xmon_getchar();
176 if (c == -1) {
177 if (p == str)
178 return NULL;
179 break;
180 }
181 *p++ = c;
182 if (c == '\n')
183 break;
184 }
185 *p = 0;
186 return str;
187}
diff --git a/arch/ppc64/xmon/subr_prf.c b/arch/ppc64/xmon/subr_prf.c
deleted file mode 100644
index 5242bd7d0959..000000000000
--- a/arch/ppc64/xmon/subr_prf.c
+++ /dev/null
@@ -1,55 +0,0 @@
1/*
2 * Written by Cort Dougan to replace the version originally used
3 * by Paul Mackerras, which came from NetBSD and thus had copyright
4 * conflicts with Linux.
5 *
6 * This file makes liberal use of the standard linux utility
7 * routines to reduce the size of the binary. We assume we can
8 * trust some parts of Linux inside the debugger.
9 * -- Cort (cort@cs.nmt.edu)
10 *
11 * Copyright (C) 1999 Cort Dougan.
12 *
13 * This program is free software; you can redistribute it and/or
14 * modify it under the terms of the GNU General Public License
15 * as published by the Free Software Foundation; either version
16 * 2 of the License, or (at your option) any later version.
17 */
18
19#include <linux/kernel.h>
20#include <linux/string.h>
21#include <stdarg.h>
22#include "nonstdio.h"
23
24extern int xmon_write(void *, void *, int);
25
26void
27xmon_vfprintf(void *f, const char *fmt, va_list ap)
28{
29 static char xmon_buf[2048];
30 int n;
31
32 n = vsprintf(xmon_buf, fmt, ap);
33 xmon_write(f, xmon_buf, n);
34}
35
36void
37xmon_printf(const char *fmt, ...)
38{
39 va_list ap;
40
41 va_start(ap, fmt);
42 xmon_vfprintf(stdout, fmt, ap);
43 va_end(ap);
44}
45
46void
47xmon_fprintf(void *f, const char *fmt, ...)
48{
49 va_list ap;
50
51 va_start(ap, fmt);
52 xmon_vfprintf(f, fmt, ap);
53 va_end(ap);
54}
55
diff --git a/arch/ppc64/xmon/xmon.c b/arch/ppc64/xmon/xmon.c
deleted file mode 100644
index 74e63a886a69..000000000000
--- a/arch/ppc64/xmon/xmon.c
+++ /dev/null
@@ -1,2514 +0,0 @@
1/*
2 * Routines providing a simple monitor for use on the PowerMac.
3 *
4 * Copyright (C) 1996 Paul Mackerras.
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11#include <linux/config.h>
12#include <linux/errno.h>
13#include <linux/sched.h>
14#include <linux/smp.h>
15#include <linux/mm.h>
16#include <linux/reboot.h>
17#include <linux/delay.h>
18#include <linux/kallsyms.h>
19#include <linux/cpumask.h>
20
21#include <asm/ptrace.h>
22#include <asm/string.h>
23#include <asm/prom.h>
24#include <asm/machdep.h>
25#include <asm/processor.h>
26#include <asm/pgtable.h>
27#include <asm/mmu.h>
28#include <asm/mmu_context.h>
29#include <asm/paca.h>
30#include <asm/ppcdebug.h>
31#include <asm/cputable.h>
32#include <asm/rtas.h>
33#include <asm/sstep.h>
34#include <asm/bug.h>
35#include <asm/hvcall.h>
36
37#include "nonstdio.h"
38#include "privinst.h"
39
40#define scanhex xmon_scanhex
41#define skipbl xmon_skipbl
42
43#ifdef CONFIG_SMP
44cpumask_t cpus_in_xmon = CPU_MASK_NONE;
45static unsigned long xmon_taken = 1;
46static int xmon_owner;
47static int xmon_gate;
48#endif /* CONFIG_SMP */
49
50static unsigned long in_xmon = 0;
51
52static unsigned long adrs;
53static int size = 1;
54#define MAX_DUMP (128 * 1024)
55static unsigned long ndump = 64;
56static unsigned long nidump = 16;
57static unsigned long ncsum = 4096;
58static int termch;
59static char tmpstr[128];
60
61#define JMP_BUF_LEN (184/sizeof(long))
62static long bus_error_jmp[JMP_BUF_LEN];
63static int catch_memory_errors;
64static long *xmon_fault_jmp[NR_CPUS];
65#define setjmp xmon_setjmp
66#define longjmp xmon_longjmp
67
68/* Breakpoint stuff */
69struct bpt {
70 unsigned long address;
71 unsigned int instr[2];
72 atomic_t ref_count;
73 int enabled;
74 unsigned long pad;
75};
76
77/* Bits in bpt.enabled */
78#define BP_IABR_TE 1 /* IABR translation enabled */
79#define BP_IABR 2
80#define BP_TRAP 8
81#define BP_DABR 0x10
82
83#define NBPTS 256
84static struct bpt bpts[NBPTS];
85static struct bpt dabr;
86static struct bpt *iabr;
87static unsigned bpinstr = 0x7fe00008; /* trap */
88
89#define BP_NUM(bp) ((bp) - bpts + 1)
90
91/* Prototypes */
92static int cmds(struct pt_regs *);
93static int mread(unsigned long, void *, int);
94static int mwrite(unsigned long, void *, int);
95static int handle_fault(struct pt_regs *);
96static void byterev(unsigned char *, int);
97static void memex(void);
98static int bsesc(void);
99static void dump(void);
100static void prdump(unsigned long, long);
101static int ppc_inst_dump(unsigned long, long, int);
102void print_address(unsigned long);
103static void backtrace(struct pt_regs *);
104static void excprint(struct pt_regs *);
105static void prregs(struct pt_regs *);
106static void memops(int);
107static void memlocate(void);
108static void memzcan(void);
109static void memdiffs(unsigned char *, unsigned char *, unsigned, unsigned);
110int skipbl(void);
111int scanhex(unsigned long *valp);
112static void scannl(void);
113static int hexdigit(int);
114void getstring(char *, int);
115static void flush_input(void);
116static int inchar(void);
117static void take_input(char *);
118static unsigned long read_spr(int);
119static void write_spr(int, unsigned long);
120static void super_regs(void);
121static void remove_bpts(void);
122static void insert_bpts(void);
123static void remove_cpu_bpts(void);
124static void insert_cpu_bpts(void);
125static struct bpt *at_breakpoint(unsigned long pc);
126static struct bpt *in_breakpoint_table(unsigned long pc, unsigned long *offp);
127static int do_step(struct pt_regs *);
128static void bpt_cmds(void);
129static void cacheflush(void);
130static int cpu_cmd(void);
131static void csum(void);
132static void bootcmds(void);
133void dump_segments(void);
134static void symbol_lookup(void);
135static void xmon_print_symbol(unsigned long address, const char *mid,
136 const char *after);
137static const char *getvecname(unsigned long vec);
138
139static void debug_trace(void);
140
141extern int print_insn_powerpc(unsigned long, unsigned long, int);
142extern void printf(const char *fmt, ...);
143extern void xmon_vfprintf(void *f, const char *fmt, va_list ap);
144extern int xmon_putc(int c, void *f);
145extern int putchar(int ch);
146extern int xmon_read_poll(void);
147extern int setjmp(long *);
148extern void longjmp(long *, int);
149extern unsigned long _ASR;
150
151#define GETWORD(v) (((v)[0] << 24) + ((v)[1] << 16) + ((v)[2] << 8) + (v)[3])
152
153#define isxdigit(c) (('0' <= (c) && (c) <= '9') \
154 || ('a' <= (c) && (c) <= 'f') \
155 || ('A' <= (c) && (c) <= 'F'))
156#define isalnum(c) (('0' <= (c) && (c) <= '9') \
157 || ('a' <= (c) && (c) <= 'z') \
158 || ('A' <= (c) && (c) <= 'Z'))
159#define isspace(c) (c == ' ' || c == '\t' || c == 10 || c == 13 || c == 0)
160
161static char *help_string = "\
162Commands:\n\
163 b show breakpoints\n\
164 bd set data breakpoint\n\
165 bi set instruction breakpoint\n\
166 bc clear breakpoint\n"
167#ifdef CONFIG_SMP
168 "\
169 c print cpus stopped in xmon\n\
170 c# try to switch to cpu number h (in hex)\n"
171#endif
172 "\
173 C checksum\n\
174 d dump bytes\n\
175 di dump instructions\n\
176 df dump float values\n\
177 dd dump double values\n\
178 e print exception information\n\
179 f flush cache\n\
180 la lookup symbol+offset of specified address\n\
181 ls lookup address of specified symbol\n\
182 m examine/change memory\n\
183 mm move a block of memory\n\
184 ms set a block of memory\n\
185 md compare two blocks of memory\n\
186 ml locate a block of memory\n\
187 mz zero a block of memory\n\
188 mi show information about memory allocation\n\
189 p show the task list\n\
190 r print registers\n\
191 s single step\n\
192 S print special registers\n\
193 t print backtrace\n\
194 T Enable/Disable PPCDBG flags\n\
195 x exit monitor and recover\n\
196 X exit monitor and dont recover\n\
197 u dump segment table or SLB\n\
198 ? help\n"
199 "\
200 zr reboot\n\
201 zh halt\n"
202;
203
204static struct pt_regs *xmon_regs;
205
206extern inline void sync(void)
207{
208 asm volatile("sync; isync");
209}
210
211/* (Ref: 64-bit PowerPC ELF ABI Spplement; Ian Lance Taylor, Zembu Labs).
212 A PPC stack frame looks like this:
213
214 High Address
215 Back Chain
216 FP reg save area
217 GP reg save area
218 Local var space
219 Parameter save area (SP+48)
220 TOC save area (SP+40)
221 link editor doubleword (SP+32)
222 compiler doubleword (SP+24)
223 LR save (SP+16)
224 CR save (SP+8)
225 Back Chain (SP+0)
226
227 Note that the LR (ret addr) may not be saved in the current frame if
228 no functions have been called from the current function.
229 */
230
231/*
232 * Disable surveillance (the service processor watchdog function)
233 * while we are in xmon.
234 * XXX we should re-enable it when we leave. :)
235 */
236#define SURVEILLANCE_TOKEN 9000
237
238static inline void disable_surveillance(void)
239{
240#ifdef CONFIG_PPC_PSERIES
241 /* Since this can't be a module, args should end up below 4GB. */
242 static struct rtas_args args;
243
244 /*
245 * At this point we have got all the cpus we can into
246 * xmon, so there is hopefully no other cpu calling RTAS
247 * at the moment, even though we don't take rtas.lock.
248 * If we did try to take rtas.lock there would be a
249 * real possibility of deadlock.
250 */
251 args.token = rtas_token("set-indicator");
252 if (args.token == RTAS_UNKNOWN_SERVICE)
253 return;
254 args.nargs = 3;
255 args.nret = 1;
256 args.rets = &args.args[3];
257 args.args[0] = SURVEILLANCE_TOKEN;
258 args.args[1] = 0;
259 args.args[2] = 0;
260 enter_rtas(__pa(&args));
261#endif /* CONFIG_PPC_PSERIES */
262}
263
264#ifdef CONFIG_SMP
265static int xmon_speaker;
266
267static void get_output_lock(void)
268{
269 int me = smp_processor_id() + 0x100;
270 int last_speaker = 0, prev;
271 long timeout;
272
273 if (xmon_speaker == me)
274 return;
275 for (;;) {
276 if (xmon_speaker == 0) {
277 last_speaker = cmpxchg(&xmon_speaker, 0, me);
278 if (last_speaker == 0)
279 return;
280 }
281 timeout = 10000000;
282 while (xmon_speaker == last_speaker) {
283 if (--timeout > 0)
284 continue;
285 /* hostile takeover */
286 prev = cmpxchg(&xmon_speaker, last_speaker, me);
287 if (prev == last_speaker)
288 return;
289 break;
290 }
291 }
292}
293
294static void release_output_lock(void)
295{
296 xmon_speaker = 0;
297}
298#endif
299
300int xmon_core(struct pt_regs *regs, int fromipi)
301{
302 int cmd = 0;
303 unsigned long msr;
304 struct bpt *bp;
305 long recurse_jmp[JMP_BUF_LEN];
306 unsigned long offset;
307#ifdef CONFIG_SMP
308 int cpu;
309 int secondary;
310 unsigned long timeout;
311#endif
312
313 msr = get_msr();
314 set_msrd(msr & ~MSR_EE); /* disable interrupts */
315
316 bp = in_breakpoint_table(regs->nip, &offset);
317 if (bp != NULL) {
318 regs->nip = bp->address + offset;
319 atomic_dec(&bp->ref_count);
320 }
321
322 remove_cpu_bpts();
323
324#ifdef CONFIG_SMP
325 cpu = smp_processor_id();
326 if (cpu_isset(cpu, cpus_in_xmon)) {
327 get_output_lock();
328 excprint(regs);
329 printf("cpu 0x%x: Exception %lx %s in xmon, "
330 "returning to main loop\n",
331 cpu, regs->trap, getvecname(TRAP(regs)));
332 release_output_lock();
333 longjmp(xmon_fault_jmp[cpu], 1);
334 }
335
336 if (setjmp(recurse_jmp) != 0) {
337 if (!in_xmon || !xmon_gate) {
338 get_output_lock();
339 printf("xmon: WARNING: bad recursive fault "
340 "on cpu 0x%x\n", cpu);
341 release_output_lock();
342 goto waiting;
343 }
344 secondary = !(xmon_taken && cpu == xmon_owner);
345 goto cmdloop;
346 }
347
348 xmon_fault_jmp[cpu] = recurse_jmp;
349 cpu_set(cpu, cpus_in_xmon);
350
351 bp = NULL;
352 if ((regs->msr & (MSR_IR|MSR_PR|MSR_SF)) == (MSR_IR|MSR_SF))
353 bp = at_breakpoint(regs->nip);
354 if (bp || (regs->msr & MSR_RI) == 0)
355 fromipi = 0;
356
357 if (!fromipi) {
358 get_output_lock();
359 excprint(regs);
360 if (bp) {
361 printf("cpu 0x%x stopped at breakpoint 0x%x (",
362 cpu, BP_NUM(bp));
363 xmon_print_symbol(regs->nip, " ", ")\n");
364 }
365 if ((regs->msr & MSR_RI) == 0)
366 printf("WARNING: exception is not recoverable, "
367 "can't continue\n");
368 release_output_lock();
369 }
370
371 waiting:
372 secondary = 1;
373 while (secondary && !xmon_gate) {
374 if (in_xmon == 0) {
375 if (fromipi)
376 goto leave;
377 secondary = test_and_set_bit(0, &in_xmon);
378 }
379 barrier();
380 }
381
382 if (!secondary && !xmon_gate) {
383 /* we are the first cpu to come in */
384 /* interrupt other cpu(s) */
385 int ncpus = num_online_cpus();
386
387 xmon_owner = cpu;
388 mb();
389 if (ncpus > 1) {
390 smp_send_debugger_break(MSG_ALL_BUT_SELF);
391 /* wait for other cpus to come in */
392 for (timeout = 100000000; timeout != 0; --timeout) {
393 if (cpus_weight(cpus_in_xmon) >= ncpus)
394 break;
395 barrier();
396 }
397 }
398 remove_bpts();
399 disable_surveillance();
400 /* for breakpoint or single step, print the current instr. */
401 if (bp || TRAP(regs) == 0xd00)
402 ppc_inst_dump(regs->nip, 1, 0);
403 printf("enter ? for help\n");
404 mb();
405 xmon_gate = 1;
406 barrier();
407 }
408
409 cmdloop:
410 while (in_xmon) {
411 if (secondary) {
412 if (cpu == xmon_owner) {
413 if (!test_and_set_bit(0, &xmon_taken)) {
414 secondary = 0;
415 continue;
416 }
417 /* missed it */
418 while (cpu == xmon_owner)
419 barrier();
420 }
421 barrier();
422 } else {
423 cmd = cmds(regs);
424 if (cmd != 0) {
425 /* exiting xmon */
426 insert_bpts();
427 xmon_gate = 0;
428 wmb();
429 in_xmon = 0;
430 break;
431 }
432 /* have switched to some other cpu */
433 secondary = 1;
434 }
435 }
436 leave:
437 cpu_clear(cpu, cpus_in_xmon);
438 xmon_fault_jmp[cpu] = NULL;
439
440#else
441 /* UP is simple... */
442 if (in_xmon) {
443 printf("Exception %lx %s in xmon, returning to main loop\n",
444 regs->trap, getvecname(TRAP(regs)));
445 longjmp(xmon_fault_jmp[0], 1);
446 }
447 if (setjmp(recurse_jmp) == 0) {
448 xmon_fault_jmp[0] = recurse_jmp;
449 in_xmon = 1;
450
451 excprint(regs);
452 bp = at_breakpoint(regs->nip);
453 if (bp) {
454 printf("Stopped at breakpoint %x (", BP_NUM(bp));
455 xmon_print_symbol(regs->nip, " ", ")\n");
456 }
457 if ((regs->msr & MSR_RI) == 0)
458 printf("WARNING: exception is not recoverable, "
459 "can't continue\n");
460 remove_bpts();
461 disable_surveillance();
462 /* for breakpoint or single step, print the current instr. */
463 if (bp || TRAP(regs) == 0xd00)
464 ppc_inst_dump(regs->nip, 1, 0);
465 printf("enter ? for help\n");
466 }
467
468 cmd = cmds(regs);
469
470 insert_bpts();
471 in_xmon = 0;
472#endif
473
474 if ((regs->msr & (MSR_IR|MSR_PR|MSR_SF)) == (MSR_IR|MSR_SF)) {
475 bp = at_breakpoint(regs->nip);
476 if (bp != NULL) {
477 int stepped = emulate_step(regs, bp->instr[0]);
478 if (stepped == 0) {
479 regs->nip = (unsigned long) &bp->instr[0];
480 atomic_inc(&bp->ref_count);
481 } else if (stepped < 0) {
482 printf("Couldn't single-step %s instruction\n",
483 (IS_RFID(bp->instr[0])? "rfid": "mtmsrd"));
484 }
485 }
486 }
487
488 insert_cpu_bpts();
489
490 set_msrd(msr); /* restore interrupt enable */
491
492 return cmd != 'X';
493}
494
495int xmon(struct pt_regs *excp)
496{
497 struct pt_regs regs;
498
499 if (excp == NULL) {
500 /* Ok, grab regs as they are now.
501 This won't do a particularily good job because the
502 prologue has already been executed.
503 ToDo: We could reach back into the callers save
504 area to do a better job of representing the
505 caller's state.
506 */
507 asm volatile ("std 0,0(%0)\n\
508 std 1,8(%0)\n\
509 std 2,16(%0)\n\
510 std 3,24(%0)\n\
511 std 4,32(%0)\n\
512 std 5,40(%0)\n\
513 std 6,48(%0)\n\
514 std 7,56(%0)\n\
515 std 8,64(%0)\n\
516 std 9,72(%0)\n\
517 std 10,80(%0)\n\
518 std 11,88(%0)\n\
519 std 12,96(%0)\n\
520 std 13,104(%0)\n\
521 std 14,112(%0)\n\
522 std 15,120(%0)\n\
523 std 16,128(%0)\n\
524 std 17,136(%0)\n\
525 std 18,144(%0)\n\
526 std 19,152(%0)\n\
527 std 20,160(%0)\n\
528 std 21,168(%0)\n\
529 std 22,176(%0)\n\
530 std 23,184(%0)\n\
531 std 24,192(%0)\n\
532 std 25,200(%0)\n\
533 std 26,208(%0)\n\
534 std 27,216(%0)\n\
535 std 28,224(%0)\n\
536 std 29,232(%0)\n\
537 std 30,240(%0)\n\
538 std 31,248(%0)" : : "b" (&regs));
539
540 regs.nip = regs.link = ((unsigned long *)(regs.gpr[1]))[2];
541 regs.msr = get_msr();
542 regs.ctr = get_ctr();
543 regs.xer = get_xer();
544 regs.ccr = get_cr();
545 regs.trap = 0;
546 excp = &regs;
547 }
548 return xmon_core(excp, 0);
549}
550
551int xmon_bpt(struct pt_regs *regs)
552{
553 struct bpt *bp;
554 unsigned long offset;
555
556 if ((regs->msr & (MSR_IR|MSR_PR|MSR_SF)) != (MSR_IR|MSR_SF))
557 return 0;
558
559 /* Are we at the trap at bp->instr[1] for some bp? */
560 bp = in_breakpoint_table(regs->nip, &offset);
561 if (bp != NULL && offset == 4) {
562 regs->nip = bp->address + 4;
563 atomic_dec(&bp->ref_count);
564 return 1;
565 }
566
567 /* Are we at a breakpoint? */
568 bp = at_breakpoint(regs->nip);
569 if (!bp)
570 return 0;
571
572 xmon_core(regs, 0);
573
574 return 1;
575}
576
577int xmon_sstep(struct pt_regs *regs)
578{
579 if (user_mode(regs))
580 return 0;
581 xmon_core(regs, 0);
582 return 1;
583}
584
585int xmon_dabr_match(struct pt_regs *regs)
586{
587 if ((regs->msr & (MSR_IR|MSR_PR|MSR_SF)) != (MSR_IR|MSR_SF))
588 return 0;
589 if (dabr.enabled == 0)
590 return 0;
591 xmon_core(regs, 0);
592 return 1;
593}
594
595int xmon_iabr_match(struct pt_regs *regs)
596{
597 if ((regs->msr & (MSR_IR|MSR_PR|MSR_SF)) != (MSR_IR|MSR_SF))
598 return 0;
599 if (iabr == 0)
600 return 0;
601 xmon_core(regs, 0);
602 return 1;
603}
604
605int xmon_ipi(struct pt_regs *regs)
606{
607#ifdef CONFIG_SMP
608 if (in_xmon && !cpu_isset(smp_processor_id(), cpus_in_xmon))
609 xmon_core(regs, 1);
610#endif
611 return 0;
612}
613
614int xmon_fault_handler(struct pt_regs *regs)
615{
616 struct bpt *bp;
617 unsigned long offset;
618
619 if (in_xmon && catch_memory_errors)
620 handle_fault(regs); /* doesn't return */
621
622 if ((regs->msr & (MSR_IR|MSR_PR|MSR_SF)) == (MSR_IR|MSR_SF)) {
623 bp = in_breakpoint_table(regs->nip, &offset);
624 if (bp != NULL) {
625 regs->nip = bp->address + offset;
626 atomic_dec(&bp->ref_count);
627 }
628 }
629
630 return 0;
631}
632
633static struct bpt *at_breakpoint(unsigned long pc)
634{
635 int i;
636 struct bpt *bp;
637
638 bp = bpts;
639 for (i = 0; i < NBPTS; ++i, ++bp)
640 if (bp->enabled && pc == bp->address)
641 return bp;
642 return NULL;
643}
644
645static struct bpt *in_breakpoint_table(unsigned long nip, unsigned long *offp)
646{
647 unsigned long off;
648
649 off = nip - (unsigned long) bpts;
650 if (off >= sizeof(bpts))
651 return NULL;
652 off %= sizeof(struct bpt);
653 if (off != offsetof(struct bpt, instr[0])
654 && off != offsetof(struct bpt, instr[1]))
655 return NULL;
656 *offp = off - offsetof(struct bpt, instr[0]);
657 return (struct bpt *) (nip - off);
658}
659
660static struct bpt *new_breakpoint(unsigned long a)
661{
662 struct bpt *bp;
663
664 a &= ~3UL;
665 bp = at_breakpoint(a);
666 if (bp)
667 return bp;
668
669 for (bp = bpts; bp < &bpts[NBPTS]; ++bp) {
670 if (!bp->enabled && atomic_read(&bp->ref_count) == 0) {
671 bp->address = a;
672 bp->instr[1] = bpinstr;
673 store_inst(&bp->instr[1]);
674 return bp;
675 }
676 }
677
678 printf("Sorry, no free breakpoints. Please clear one first.\n");
679 return NULL;
680}
681
682static void insert_bpts(void)
683{
684 int i;
685 struct bpt *bp;
686
687 bp = bpts;
688 for (i = 0; i < NBPTS; ++i, ++bp) {
689 if ((bp->enabled & (BP_TRAP|BP_IABR)) == 0)
690 continue;
691 if (mread(bp->address, &bp->instr[0], 4) != 4) {
692 printf("Couldn't read instruction at %lx, "
693 "disabling breakpoint there\n", bp->address);
694 bp->enabled = 0;
695 continue;
696 }
697 if (IS_MTMSRD(bp->instr[0]) || IS_RFID(bp->instr[0])) {
698 printf("Breakpoint at %lx is on an mtmsrd or rfid "
699 "instruction, disabling it\n", bp->address);
700 bp->enabled = 0;
701 continue;
702 }
703 store_inst(&bp->instr[0]);
704 if (bp->enabled & BP_IABR)
705 continue;
706 if (mwrite(bp->address, &bpinstr, 4) != 4) {
707 printf("Couldn't write instruction at %lx, "
708 "disabling breakpoint there\n", bp->address);
709 bp->enabled &= ~BP_TRAP;
710 continue;
711 }
712 store_inst((void *)bp->address);
713 }
714}
715
716static void insert_cpu_bpts(void)
717{
718 if (dabr.enabled)
719 set_dabr(dabr.address | (dabr.enabled & 7));
720 if (iabr && cpu_has_feature(CPU_FTR_IABR))
721 set_iabr(iabr->address
722 | (iabr->enabled & (BP_IABR|BP_IABR_TE)));
723}
724
725static void remove_bpts(void)
726{
727 int i;
728 struct bpt *bp;
729 unsigned instr;
730
731 bp = bpts;
732 for (i = 0; i < NBPTS; ++i, ++bp) {
733 if ((bp->enabled & (BP_TRAP|BP_IABR)) != BP_TRAP)
734 continue;
735 if (mread(bp->address, &instr, 4) == 4
736 && instr == bpinstr
737 && mwrite(bp->address, &bp->instr, 4) != 4)
738 printf("Couldn't remove breakpoint at %lx\n",
739 bp->address);
740 else
741 store_inst((void *)bp->address);
742 }
743}
744
745static void remove_cpu_bpts(void)
746{
747 set_dabr(0);
748 if (cpu_has_feature(CPU_FTR_IABR))
749 set_iabr(0);
750}
751
752/* Command interpreting routine */
753static char *last_cmd;
754
755static int
756cmds(struct pt_regs *excp)
757{
758 int cmd = 0;
759
760 last_cmd = NULL;
761 xmon_regs = excp;
762 for(;;) {
763#ifdef CONFIG_SMP
764 printf("%x:", smp_processor_id());
765#endif /* CONFIG_SMP */
766 printf("mon> ");
767 fflush(stdout);
768 flush_input();
769 termch = 0;
770 cmd = skipbl();
771 if( cmd == '\n' ) {
772 if (last_cmd == NULL)
773 continue;
774 take_input(last_cmd);
775 last_cmd = NULL;
776 cmd = inchar();
777 }
778 switch (cmd) {
779 case 'm':
780 cmd = inchar();
781 switch (cmd) {
782 case 'm':
783 case 's':
784 case 'd':
785 memops(cmd);
786 break;
787 case 'l':
788 memlocate();
789 break;
790 case 'z':
791 memzcan();
792 break;
793 case 'i':
794 show_mem();
795 break;
796 default:
797 termch = cmd;
798 memex();
799 }
800 break;
801 case 'd':
802 dump();
803 break;
804 case 'l':
805 symbol_lookup();
806 break;
807 case 'r':
808 prregs(excp); /* print regs */
809 break;
810 case 'e':
811 excprint(excp);
812 break;
813 case 'S':
814 super_regs();
815 break;
816 case 't':
817 backtrace(excp);
818 break;
819 case 'f':
820 cacheflush();
821 break;
822 case 's':
823 if (do_step(excp))
824 return cmd;
825 break;
826 case 'x':
827 case 'X':
828 case EOF:
829 return cmd;
830 case '?':
831 printf(help_string);
832 break;
833 case 'p':
834 show_state();
835 break;
836 case 'b':
837 bpt_cmds();
838 break;
839 case 'C':
840 csum();
841 break;
842 case 'c':
843 if (cpu_cmd())
844 return 0;
845 break;
846 case 'z':
847 bootcmds();
848 break;
849 case 'T':
850 debug_trace();
851 break;
852 case 'u':
853 dump_segments();
854 break;
855 default:
856 printf("Unrecognized command: ");
857 do {
858 if (' ' < cmd && cmd <= '~')
859 putchar(cmd);
860 else
861 printf("\\x%x", cmd);
862 cmd = inchar();
863 } while (cmd != '\n');
864 printf(" (type ? for help)\n");
865 break;
866 }
867 }
868}
869
870/*
871 * Step a single instruction.
872 * Some instructions we emulate, others we execute with MSR_SE set.
873 */
874static int do_step(struct pt_regs *regs)
875{
876 unsigned int instr;
877 int stepped;
878
879 /* check we are in 64-bit kernel mode, translation enabled */
880 if ((regs->msr & (MSR_SF|MSR_PR|MSR_IR)) == (MSR_SF|MSR_IR)) {
881 if (mread(regs->nip, &instr, 4) == 4) {
882 stepped = emulate_step(regs, instr);
883 if (stepped < 0) {
884 printf("Couldn't single-step %s instruction\n",
885 (IS_RFID(instr)? "rfid": "mtmsrd"));
886 return 0;
887 }
888 if (stepped > 0) {
889 regs->trap = 0xd00 | (regs->trap & 1);
890 printf("stepped to ");
891 xmon_print_symbol(regs->nip, " ", "\n");
892 ppc_inst_dump(regs->nip, 1, 0);
893 return 0;
894 }
895 }
896 }
897 regs->msr |= MSR_SE;
898 return 1;
899}
900
901static void bootcmds(void)
902{
903 int cmd;
904
905 cmd = inchar();
906 if (cmd == 'r')
907 ppc_md.restart(NULL);
908 else if (cmd == 'h')
909 ppc_md.halt();
910 else if (cmd == 'p')
911 ppc_md.power_off();
912}
913
914static int cpu_cmd(void)
915{
916#ifdef CONFIG_SMP
917 unsigned long cpu;
918 int timeout;
919 int count;
920
921 if (!scanhex(&cpu)) {
922 /* print cpus waiting or in xmon */
923 printf("cpus stopped:");
924 count = 0;
925 for (cpu = 0; cpu < NR_CPUS; ++cpu) {
926 if (cpu_isset(cpu, cpus_in_xmon)) {
927 if (count == 0)
928 printf(" %x", cpu);
929 ++count;
930 } else {
931 if (count > 1)
932 printf("-%x", cpu - 1);
933 count = 0;
934 }
935 }
936 if (count > 1)
937 printf("-%x", NR_CPUS - 1);
938 printf("\n");
939 return 0;
940 }
941 /* try to switch to cpu specified */
942 if (!cpu_isset(cpu, cpus_in_xmon)) {
943 printf("cpu 0x%x isn't in xmon\n", cpu);
944 return 0;
945 }
946 xmon_taken = 0;
947 mb();
948 xmon_owner = cpu;
949 timeout = 10000000;
950 while (!xmon_taken) {
951 if (--timeout == 0) {
952 if (test_and_set_bit(0, &xmon_taken))
953 break;
954 /* take control back */
955 mb();
956 xmon_owner = smp_processor_id();
957 printf("cpu %u didn't take control\n", cpu);
958 return 0;
959 }
960 barrier();
961 }
962 return 1;
963#else
964 return 0;
965#endif /* CONFIG_SMP */
966}
967
968static unsigned short fcstab[256] = {
969 0x0000, 0x1189, 0x2312, 0x329b, 0x4624, 0x57ad, 0x6536, 0x74bf,
970 0x8c48, 0x9dc1, 0xaf5a, 0xbed3, 0xca6c, 0xdbe5, 0xe97e, 0xf8f7,
971 0x1081, 0x0108, 0x3393, 0x221a, 0x56a5, 0x472c, 0x75b7, 0x643e,
972 0x9cc9, 0x8d40, 0xbfdb, 0xae52, 0xdaed, 0xcb64, 0xf9ff, 0xe876,
973 0x2102, 0x308b, 0x0210, 0x1399, 0x6726, 0x76af, 0x4434, 0x55bd,
974 0xad4a, 0xbcc3, 0x8e58, 0x9fd1, 0xeb6e, 0xfae7, 0xc87c, 0xd9f5,
975 0x3183, 0x200a, 0x1291, 0x0318, 0x77a7, 0x662e, 0x54b5, 0x453c,
976 0xbdcb, 0xac42, 0x9ed9, 0x8f50, 0xfbef, 0xea66, 0xd8fd, 0xc974,
977 0x4204, 0x538d, 0x6116, 0x709f, 0x0420, 0x15a9, 0x2732, 0x36bb,
978 0xce4c, 0xdfc5, 0xed5e, 0xfcd7, 0x8868, 0x99e1, 0xab7a, 0xbaf3,
979 0x5285, 0x430c, 0x7197, 0x601e, 0x14a1, 0x0528, 0x37b3, 0x263a,
980 0xdecd, 0xcf44, 0xfddf, 0xec56, 0x98e9, 0x8960, 0xbbfb, 0xaa72,
981 0x6306, 0x728f, 0x4014, 0x519d, 0x2522, 0x34ab, 0x0630, 0x17b9,
982 0xef4e, 0xfec7, 0xcc5c, 0xddd5, 0xa96a, 0xb8e3, 0x8a78, 0x9bf1,
983 0x7387, 0x620e, 0x5095, 0x411c, 0x35a3, 0x242a, 0x16b1, 0x0738,
984 0xffcf, 0xee46, 0xdcdd, 0xcd54, 0xb9eb, 0xa862, 0x9af9, 0x8b70,
985 0x8408, 0x9581, 0xa71a, 0xb693, 0xc22c, 0xd3a5, 0xe13e, 0xf0b7,
986 0x0840, 0x19c9, 0x2b52, 0x3adb, 0x4e64, 0x5fed, 0x6d76, 0x7cff,
987 0x9489, 0x8500, 0xb79b, 0xa612, 0xd2ad, 0xc324, 0xf1bf, 0xe036,
988 0x18c1, 0x0948, 0x3bd3, 0x2a5a, 0x5ee5, 0x4f6c, 0x7df7, 0x6c7e,
989 0xa50a, 0xb483, 0x8618, 0x9791, 0xe32e, 0xf2a7, 0xc03c, 0xd1b5,
990 0x2942, 0x38cb, 0x0a50, 0x1bd9, 0x6f66, 0x7eef, 0x4c74, 0x5dfd,
991 0xb58b, 0xa402, 0x9699, 0x8710, 0xf3af, 0xe226, 0xd0bd, 0xc134,
992 0x39c3, 0x284a, 0x1ad1, 0x0b58, 0x7fe7, 0x6e6e, 0x5cf5, 0x4d7c,
993 0xc60c, 0xd785, 0xe51e, 0xf497, 0x8028, 0x91a1, 0xa33a, 0xb2b3,
994 0x4a44, 0x5bcd, 0x6956, 0x78df, 0x0c60, 0x1de9, 0x2f72, 0x3efb,
995 0xd68d, 0xc704, 0xf59f, 0xe416, 0x90a9, 0x8120, 0xb3bb, 0xa232,
996 0x5ac5, 0x4b4c, 0x79d7, 0x685e, 0x1ce1, 0x0d68, 0x3ff3, 0x2e7a,
997 0xe70e, 0xf687, 0xc41c, 0xd595, 0xa12a, 0xb0a3, 0x8238, 0x93b1,
998 0x6b46, 0x7acf, 0x4854, 0x59dd, 0x2d62, 0x3ceb, 0x0e70, 0x1ff9,
999 0xf78f, 0xe606, 0xd49d, 0xc514, 0xb1ab, 0xa022, 0x92b9, 0x8330,
1000 0x7bc7, 0x6a4e, 0x58d5, 0x495c, 0x3de3, 0x2c6a, 0x1ef1, 0x0f78
1001};
1002
1003#define FCS(fcs, c) (((fcs) >> 8) ^ fcstab[((fcs) ^ (c)) & 0xff])
1004
1005static void
1006csum(void)
1007{
1008 unsigned int i;
1009 unsigned short fcs;
1010 unsigned char v;
1011
1012 if (!scanhex(&adrs))
1013 return;
1014 if (!scanhex(&ncsum))
1015 return;
1016 fcs = 0xffff;
1017 for (i = 0; i < ncsum; ++i) {
1018 if (mread(adrs+i, &v, 1) == 0) {
1019 printf("csum stopped at %x\n", adrs+i);
1020 break;
1021 }
1022 fcs = FCS(fcs, v);
1023 }
1024 printf("%x\n", fcs);
1025}
1026
1027/*
1028 * Check if this is a suitable place to put a breakpoint.
1029 */
1030static long check_bp_loc(unsigned long addr)
1031{
1032 unsigned int instr;
1033
1034 addr &= ~3;
1035 if (addr < KERNELBASE) {
1036 printf("Breakpoints may only be placed at kernel addresses\n");
1037 return 0;
1038 }
1039 if (!mread(addr, &instr, sizeof(instr))) {
1040 printf("Can't read instruction at address %lx\n", addr);
1041 return 0;
1042 }
1043 if (IS_MTMSRD(instr) || IS_RFID(instr)) {
1044 printf("Breakpoints may not be placed on mtmsrd or rfid "
1045 "instructions\n");
1046 return 0;
1047 }
1048 return 1;
1049}
1050
1051static char *breakpoint_help_string =
1052 "Breakpoint command usage:\n"
1053 "b show breakpoints\n"
1054 "b <addr> [cnt] set breakpoint at given instr addr\n"
1055 "bc clear all breakpoints\n"
1056 "bc <n/addr> clear breakpoint number n or at addr\n"
1057 "bi <addr> [cnt] set hardware instr breakpoint (POWER3/RS64 only)\n"
1058 "bd <addr> [cnt] set hardware data breakpoint\n"
1059 "";
1060
1061static void
1062bpt_cmds(void)
1063{
1064 int cmd;
1065 unsigned long a;
1066 int mode, i;
1067 struct bpt *bp;
1068 const char badaddr[] = "Only kernel addresses are permitted "
1069 "for breakpoints\n";
1070
1071 cmd = inchar();
1072 switch (cmd) {
1073 case 'd': /* bd - hardware data breakpoint */
1074 mode = 7;
1075 cmd = inchar();
1076 if (cmd == 'r')
1077 mode = 5;
1078 else if (cmd == 'w')
1079 mode = 6;
1080 else
1081 termch = cmd;
1082 dabr.address = 0;
1083 dabr.enabled = 0;
1084 if (scanhex(&dabr.address)) {
1085 if (dabr.address < KERNELBASE) {
1086 printf(badaddr);
1087 break;
1088 }
1089 dabr.address &= ~7;
1090 dabr.enabled = mode | BP_DABR;
1091 }
1092 break;
1093
1094 case 'i': /* bi - hardware instr breakpoint */
1095 if (!cpu_has_feature(CPU_FTR_IABR)) {
1096 printf("Hardware instruction breakpoint "
1097 "not supported on this cpu\n");
1098 break;
1099 }
1100 if (iabr) {
1101 iabr->enabled &= ~(BP_IABR | BP_IABR_TE);
1102 iabr = NULL;
1103 }
1104 if (!scanhex(&a))
1105 break;
1106 if (!check_bp_loc(a))
1107 break;
1108 bp = new_breakpoint(a);
1109 if (bp != NULL) {
1110 bp->enabled |= BP_IABR | BP_IABR_TE;
1111 iabr = bp;
1112 }
1113 break;
1114
1115 case 'c':
1116 if (!scanhex(&a)) {
1117 /* clear all breakpoints */
1118 for (i = 0; i < NBPTS; ++i)
1119 bpts[i].enabled = 0;
1120 iabr = NULL;
1121 dabr.enabled = 0;
1122 printf("All breakpoints cleared\n");
1123 break;
1124 }
1125
1126 if (a <= NBPTS && a >= 1) {
1127 /* assume a breakpoint number */
1128 bp = &bpts[a-1]; /* bp nums are 1 based */
1129 } else {
1130 /* assume a breakpoint address */
1131 bp = at_breakpoint(a);
1132 if (bp == 0) {
1133 printf("No breakpoint at %x\n", a);
1134 break;
1135 }
1136 }
1137
1138 printf("Cleared breakpoint %x (", BP_NUM(bp));
1139 xmon_print_symbol(bp->address, " ", ")\n");
1140 bp->enabled = 0;
1141 break;
1142
1143 default:
1144 termch = cmd;
1145 cmd = skipbl();
1146 if (cmd == '?') {
1147 printf(breakpoint_help_string);
1148 break;
1149 }
1150 termch = cmd;
1151 if (!scanhex(&a)) {
1152 /* print all breakpoints */
1153 printf(" type address\n");
1154 if (dabr.enabled) {
1155 printf(" data %.16lx [", dabr.address);
1156 if (dabr.enabled & 1)
1157 printf("r");
1158 if (dabr.enabled & 2)
1159 printf("w");
1160 printf("]\n");
1161 }
1162 for (bp = bpts; bp < &bpts[NBPTS]; ++bp) {
1163 if (!bp->enabled)
1164 continue;
1165 printf("%2x %s ", BP_NUM(bp),
1166 (bp->enabled & BP_IABR)? "inst": "trap");
1167 xmon_print_symbol(bp->address, " ", "\n");
1168 }
1169 break;
1170 }
1171
1172 if (!check_bp_loc(a))
1173 break;
1174 bp = new_breakpoint(a);
1175 if (bp != NULL)
1176 bp->enabled |= BP_TRAP;
1177 break;
1178 }
1179}
1180
1181/* Very cheap human name for vector lookup. */
1182static
1183const char *getvecname(unsigned long vec)
1184{
1185 char *ret;
1186
1187 switch (vec) {
1188 case 0x100: ret = "(System Reset)"; break;
1189 case 0x200: ret = "(Machine Check)"; break;
1190 case 0x300: ret = "(Data Access)"; break;
1191 case 0x380: ret = "(Data SLB Access)"; break;
1192 case 0x400: ret = "(Instruction Access)"; break;
1193 case 0x480: ret = "(Instruction SLB Access)"; break;
1194 case 0x500: ret = "(Hardware Interrupt)"; break;
1195 case 0x600: ret = "(Alignment)"; break;
1196 case 0x700: ret = "(Program Check)"; break;
1197 case 0x800: ret = "(FPU Unavailable)"; break;
1198 case 0x900: ret = "(Decrementer)"; break;
1199 case 0xc00: ret = "(System Call)"; break;
1200 case 0xd00: ret = "(Single Step)"; break;
1201 case 0xf00: ret = "(Performance Monitor)"; break;
1202 case 0xf20: ret = "(Altivec Unavailable)"; break;
1203 case 0x1300: ret = "(Instruction Breakpoint)"; break;
1204 default: ret = "";
1205 }
1206 return ret;
1207}
1208
1209static void get_function_bounds(unsigned long pc, unsigned long *startp,
1210 unsigned long *endp)
1211{
1212 unsigned long size, offset;
1213 const char *name;
1214 char *modname;
1215
1216 *startp = *endp = 0;
1217 if (pc == 0)
1218 return;
1219 if (setjmp(bus_error_jmp) == 0) {
1220 catch_memory_errors = 1;
1221 sync();
1222 name = kallsyms_lookup(pc, &size, &offset, &modname, tmpstr);
1223 if (name != NULL) {
1224 *startp = pc - offset;
1225 *endp = pc - offset + size;
1226 }
1227 sync();
1228 }
1229 catch_memory_errors = 0;
1230}
1231
1232static int xmon_depth_to_print = 64;
1233
1234static void xmon_show_stack(unsigned long sp, unsigned long lr,
1235 unsigned long pc)
1236{
1237 unsigned long ip;
1238 unsigned long newsp;
1239 unsigned long marker;
1240 int count = 0;
1241 struct pt_regs regs;
1242
1243 do {
1244 if (sp < PAGE_OFFSET) {
1245 if (sp != 0)
1246 printf("SP (%lx) is in userspace\n", sp);
1247 break;
1248 }
1249
1250 if (!mread(sp + 16, &ip, sizeof(unsigned long))
1251 || !mread(sp, &newsp, sizeof(unsigned long))) {
1252 printf("Couldn't read stack frame at %lx\n", sp);
1253 break;
1254 }
1255
1256 /*
1257 * For the first stack frame, try to work out if
1258 * LR and/or the saved LR value in the bottommost
1259 * stack frame are valid.
1260 */
1261 if ((pc | lr) != 0) {
1262 unsigned long fnstart, fnend;
1263 unsigned long nextip;
1264 int printip = 1;
1265
1266 get_function_bounds(pc, &fnstart, &fnend);
1267 nextip = 0;
1268 if (newsp > sp)
1269 mread(newsp + 16, &nextip,
1270 sizeof(unsigned long));
1271 if (lr == ip) {
1272 if (lr < PAGE_OFFSET
1273 || (fnstart <= lr && lr < fnend))
1274 printip = 0;
1275 } else if (lr == nextip) {
1276 printip = 0;
1277 } else if (lr >= PAGE_OFFSET
1278 && !(fnstart <= lr && lr < fnend)) {
1279 printf("[link register ] ");
1280 xmon_print_symbol(lr, " ", "\n");
1281 }
1282 if (printip) {
1283 printf("[%.16lx] ", sp);
1284 xmon_print_symbol(ip, " ", " (unreliable)\n");
1285 }
1286 pc = lr = 0;
1287
1288 } else {
1289 printf("[%.16lx] ", sp);
1290 xmon_print_symbol(ip, " ", "\n");
1291 }
1292
1293 /* Look for "regshere" marker to see if this is
1294 an exception frame. */
1295 if (mread(sp + 0x60, &marker, sizeof(unsigned long))
1296 && marker == 0x7265677368657265ul) {
1297 if (mread(sp + 0x70, &regs, sizeof(regs))
1298 != sizeof(regs)) {
1299 printf("Couldn't read registers at %lx\n",
1300 sp + 0x70);
1301 break;
1302 }
1303 printf("--- Exception: %lx %s at ", regs.trap,
1304 getvecname(TRAP(&regs)));
1305 pc = regs.nip;
1306 lr = regs.link;
1307 xmon_print_symbol(pc, " ", "\n");
1308 }
1309
1310 if (newsp == 0)
1311 break;
1312
1313 sp = newsp;
1314 } while (count++ < xmon_depth_to_print);
1315}
1316
1317static void backtrace(struct pt_regs *excp)
1318{
1319 unsigned long sp;
1320
1321 if (scanhex(&sp))
1322 xmon_show_stack(sp, 0, 0);
1323 else
1324 xmon_show_stack(excp->gpr[1], excp->link, excp->nip);
1325 scannl();
1326}
1327
1328static void print_bug_trap(struct pt_regs *regs)
1329{
1330 struct bug_entry *bug;
1331 unsigned long addr;
1332
1333 if (regs->msr & MSR_PR)
1334 return; /* not in kernel */
1335 addr = regs->nip; /* address of trap instruction */
1336 if (addr < PAGE_OFFSET)
1337 return;
1338 bug = find_bug(regs->nip);
1339 if (bug == NULL)
1340 return;
1341 if (bug->line & BUG_WARNING_TRAP)
1342 return;
1343
1344 printf("kernel BUG in %s at %s:%d!\n",
1345 bug->function, bug->file, (unsigned int)bug->line);
1346}
1347
1348void excprint(struct pt_regs *fp)
1349{
1350 unsigned long trap;
1351
1352#ifdef CONFIG_SMP
1353 printf("cpu 0x%x: ", smp_processor_id());
1354#endif /* CONFIG_SMP */
1355
1356 trap = TRAP(fp);
1357 printf("Vector: %lx %s at [%lx]\n", fp->trap, getvecname(trap), fp);
1358 printf(" pc: ");
1359 xmon_print_symbol(fp->nip, ": ", "\n");
1360
1361 printf(" lr: ", fp->link);
1362 xmon_print_symbol(fp->link, ": ", "\n");
1363
1364 printf(" sp: %lx\n", fp->gpr[1]);
1365 printf(" msr: %lx\n", fp->msr);
1366
1367 if (trap == 0x300 || trap == 0x380 || trap == 0x600) {
1368 printf(" dar: %lx\n", fp->dar);
1369 if (trap != 0x380)
1370 printf(" dsisr: %lx\n", fp->dsisr);
1371 }
1372
1373 printf(" current = 0x%lx\n", current);
1374 printf(" paca = 0x%lx\n", get_paca());
1375 if (current) {
1376 printf(" pid = %ld, comm = %s\n",
1377 current->pid, current->comm);
1378 }
1379
1380 if (trap == 0x700)
1381 print_bug_trap(fp);
1382}
1383
1384void prregs(struct pt_regs *fp)
1385{
1386 int n;
1387 unsigned long base;
1388 struct pt_regs regs;
1389
1390 if (scanhex(&base)) {
1391 if (setjmp(bus_error_jmp) == 0) {
1392 catch_memory_errors = 1;
1393 sync();
1394 regs = *(struct pt_regs *)base;
1395 sync();
1396 __delay(200);
1397 } else {
1398 catch_memory_errors = 0;
1399 printf("*** Error reading registers from %.16lx\n",
1400 base);
1401 return;
1402 }
1403 catch_memory_errors = 0;
1404 fp = &regs;
1405 }
1406
1407 if (FULL_REGS(fp)) {
1408 for (n = 0; n < 16; ++n)
1409 printf("R%.2ld = %.16lx R%.2ld = %.16lx\n",
1410 n, fp->gpr[n], n+16, fp->gpr[n+16]);
1411 } else {
1412 for (n = 0; n < 7; ++n)
1413 printf("R%.2ld = %.16lx R%.2ld = %.16lx\n",
1414 n, fp->gpr[n], n+7, fp->gpr[n+7]);
1415 }
1416 printf("pc = ");
1417 xmon_print_symbol(fp->nip, " ", "\n");
1418 printf("lr = ");
1419 xmon_print_symbol(fp->link, " ", "\n");
1420 printf("msr = %.16lx cr = %.8lx\n", fp->msr, fp->ccr);
1421 printf("ctr = %.16lx xer = %.16lx trap = %8lx\n",
1422 fp->ctr, fp->xer, fp->trap);
1423}
1424
1425void cacheflush(void)
1426{
1427 int cmd;
1428 unsigned long nflush;
1429
1430 cmd = inchar();
1431 if (cmd != 'i')
1432 termch = cmd;
1433 scanhex((void *)&adrs);
1434 if (termch != '\n')
1435 termch = 0;
1436 nflush = 1;
1437 scanhex(&nflush);
1438 nflush = (nflush + L1_CACHE_BYTES - 1) / L1_CACHE_BYTES;
1439 if (setjmp(bus_error_jmp) == 0) {
1440 catch_memory_errors = 1;
1441 sync();
1442
1443 if (cmd != 'i') {
1444 for (; nflush > 0; --nflush, adrs += L1_CACHE_BYTES)
1445 cflush((void *) adrs);
1446 } else {
1447 for (; nflush > 0; --nflush, adrs += L1_CACHE_BYTES)
1448 cinval((void *) adrs);
1449 }
1450 sync();
1451 /* wait a little while to see if we get a machine check */
1452 __delay(200);
1453 }
1454 catch_memory_errors = 0;
1455}
1456
1457unsigned long
1458read_spr(int n)
1459{
1460 unsigned int instrs[2];
1461 unsigned long (*code)(void);
1462 unsigned long opd[3];
1463 unsigned long ret = -1UL;
1464
1465 instrs[0] = 0x7c6002a6 + ((n & 0x1F) << 16) + ((n & 0x3e0) << 6);
1466 instrs[1] = 0x4e800020;
1467 opd[0] = (unsigned long)instrs;
1468 opd[1] = 0;
1469 opd[2] = 0;
1470 store_inst(instrs);
1471 store_inst(instrs+1);
1472 code = (unsigned long (*)(void)) opd;
1473
1474 if (setjmp(bus_error_jmp) == 0) {
1475 catch_memory_errors = 1;
1476 sync();
1477
1478 ret = code();
1479
1480 sync();
1481 /* wait a little while to see if we get a machine check */
1482 __delay(200);
1483 n = size;
1484 }
1485
1486 return ret;
1487}
1488
1489void
1490write_spr(int n, unsigned long val)
1491{
1492 unsigned int instrs[2];
1493 unsigned long (*code)(unsigned long);
1494 unsigned long opd[3];
1495
1496 instrs[0] = 0x7c6003a6 + ((n & 0x1F) << 16) + ((n & 0x3e0) << 6);
1497 instrs[1] = 0x4e800020;
1498 opd[0] = (unsigned long)instrs;
1499 opd[1] = 0;
1500 opd[2] = 0;
1501 store_inst(instrs);
1502 store_inst(instrs+1);
1503 code = (unsigned long (*)(unsigned long)) opd;
1504
1505 if (setjmp(bus_error_jmp) == 0) {
1506 catch_memory_errors = 1;
1507 sync();
1508
1509 code(val);
1510
1511 sync();
1512 /* wait a little while to see if we get a machine check */
1513 __delay(200);
1514 n = size;
1515 }
1516}
1517
1518static unsigned long regno;
1519extern char exc_prolog;
1520extern char dec_exc;
1521
1522void
1523super_regs(void)
1524{
1525 int cmd;
1526 unsigned long val;
1527#ifdef CONFIG_PPC_ISERIES
1528 struct paca_struct *ptrPaca = NULL;
1529 struct lppaca *ptrLpPaca = NULL;
1530 struct ItLpRegSave *ptrLpRegSave = NULL;
1531#endif
1532
1533 cmd = skipbl();
1534 if (cmd == '\n') {
1535 unsigned long sp, toc;
1536 asm("mr %0,1" : "=r" (sp) :);
1537 asm("mr %0,2" : "=r" (toc) :);
1538
1539 printf("msr = %.16lx sprg0= %.16lx\n", get_msr(), get_sprg0());
1540 printf("pvr = %.16lx sprg1= %.16lx\n", get_pvr(), get_sprg1());
1541 printf("dec = %.16lx sprg2= %.16lx\n", get_dec(), get_sprg2());
1542 printf("sp = %.16lx sprg3= %.16lx\n", sp, get_sprg3());
1543 printf("toc = %.16lx dar = %.16lx\n", toc, get_dar());
1544 printf("srr0 = %.16lx srr1 = %.16lx\n", get_srr0(), get_srr1());
1545#ifdef CONFIG_PPC_ISERIES
1546 // Dump out relevant Paca data areas.
1547 printf("Paca: \n");
1548 ptrPaca = get_paca();
1549
1550 printf(" Local Processor Control Area (LpPaca): \n");
1551 ptrLpPaca = ptrPaca->lppaca_ptr;
1552 printf(" Saved Srr0=%.16lx Saved Srr1=%.16lx \n",
1553 ptrLpPaca->saved_srr0, ptrLpPaca->saved_srr1);
1554 printf(" Saved Gpr3=%.16lx Saved Gpr4=%.16lx \n",
1555 ptrLpPaca->saved_gpr3, ptrLpPaca->saved_gpr4);
1556 printf(" Saved Gpr5=%.16lx \n", ptrLpPaca->saved_gpr5);
1557
1558 printf(" Local Processor Register Save Area (LpRegSave): \n");
1559 ptrLpRegSave = ptrPaca->reg_save_ptr;
1560 printf(" Saved Sprg0=%.16lx Saved Sprg1=%.16lx \n",
1561 ptrLpRegSave->xSPRG0, ptrLpRegSave->xSPRG0);
1562 printf(" Saved Sprg2=%.16lx Saved Sprg3=%.16lx \n",
1563 ptrLpRegSave->xSPRG2, ptrLpRegSave->xSPRG3);
1564 printf(" Saved Msr =%.16lx Saved Nia =%.16lx \n",
1565 ptrLpRegSave->xMSR, ptrLpRegSave->xNIA);
1566#endif
1567
1568 return;
1569 }
1570
1571 scanhex(&regno);
1572 switch (cmd) {
1573 case 'w':
1574 val = read_spr(regno);
1575 scanhex(&val);
1576 write_spr(regno, val);
1577 /* fall through */
1578 case 'r':
1579 printf("spr %lx = %lx\n", regno, read_spr(regno));
1580 break;
1581 case 'm':
1582 val = get_msr();
1583 scanhex(&val);
1584 set_msrd(val);
1585 break;
1586 }
1587 scannl();
1588}
1589
1590/*
1591 * Stuff for reading and writing memory safely
1592 */
1593int
1594mread(unsigned long adrs, void *buf, int size)
1595{
1596 volatile int n;
1597 char *p, *q;
1598
1599 n = 0;
1600 if (setjmp(bus_error_jmp) == 0) {
1601 catch_memory_errors = 1;
1602 sync();
1603 p = (char *)adrs;
1604 q = (char *)buf;
1605 switch (size) {
1606 case 2:
1607 *(short *)q = *(short *)p;
1608 break;
1609 case 4:
1610 *(int *)q = *(int *)p;
1611 break;
1612 case 8:
1613 *(long *)q = *(long *)p;
1614 break;
1615 default:
1616 for( ; n < size; ++n) {
1617 *q++ = *p++;
1618 sync();
1619 }
1620 }
1621 sync();
1622 /* wait a little while to see if we get a machine check */
1623 __delay(200);
1624 n = size;
1625 }
1626 catch_memory_errors = 0;
1627 return n;
1628}
1629
1630int
1631mwrite(unsigned long adrs, void *buf, int size)
1632{
1633 volatile int n;
1634 char *p, *q;
1635
1636 n = 0;
1637 if (setjmp(bus_error_jmp) == 0) {
1638 catch_memory_errors = 1;
1639 sync();
1640 p = (char *) adrs;
1641 q = (char *) buf;
1642 switch (size) {
1643 case 2:
1644 *(short *)p = *(short *)q;
1645 break;
1646 case 4:
1647 *(int *)p = *(int *)q;
1648 break;
1649 case 8:
1650 *(long *)p = *(long *)q;
1651 break;
1652 default:
1653 for ( ; n < size; ++n) {
1654 *p++ = *q++;
1655 sync();
1656 }
1657 }
1658 sync();
1659 /* wait a little while to see if we get a machine check */
1660 __delay(200);
1661 n = size;
1662 } else {
1663 printf("*** Error writing address %x\n", adrs + n);
1664 }
1665 catch_memory_errors = 0;
1666 return n;
1667}
1668
1669static int fault_type;
1670static char *fault_chars[] = { "--", "**", "##" };
1671
1672static int
1673handle_fault(struct pt_regs *regs)
1674{
1675 switch (TRAP(regs)) {
1676 case 0x200:
1677 fault_type = 0;
1678 break;
1679 case 0x300:
1680 case 0x380:
1681 fault_type = 1;
1682 break;
1683 default:
1684 fault_type = 2;
1685 }
1686
1687 longjmp(bus_error_jmp, 1);
1688
1689 return 0;
1690}
1691
1692#define SWAP(a, b, t) ((t) = (a), (a) = (b), (b) = (t))
1693
1694void
1695byterev(unsigned char *val, int size)
1696{
1697 int t;
1698
1699 switch (size) {
1700 case 2:
1701 SWAP(val[0], val[1], t);
1702 break;
1703 case 4:
1704 SWAP(val[0], val[3], t);
1705 SWAP(val[1], val[2], t);
1706 break;
1707 case 8: /* is there really any use for this? */
1708 SWAP(val[0], val[7], t);
1709 SWAP(val[1], val[6], t);
1710 SWAP(val[2], val[5], t);
1711 SWAP(val[3], val[4], t);
1712 break;
1713 }
1714}
1715
1716static int brev;
1717static int mnoread;
1718
1719static char *memex_help_string =
1720 "Memory examine command usage:\n"
1721 "m [addr] [flags] examine/change memory\n"
1722 " addr is optional. will start where left off.\n"
1723 " flags may include chars from this set:\n"
1724 " b modify by bytes (default)\n"
1725 " w modify by words (2 byte)\n"
1726 " l modify by longs (4 byte)\n"
1727 " d modify by doubleword (8 byte)\n"
1728 " r toggle reverse byte order mode\n"
1729 " n do not read memory (for i/o spaces)\n"
1730 " . ok to read (default)\n"
1731 "NOTE: flags are saved as defaults\n"
1732 "";
1733
1734static char *memex_subcmd_help_string =
1735 "Memory examine subcommands:\n"
1736 " hexval write this val to current location\n"
1737 " 'string' write chars from string to this location\n"
1738 " ' increment address\n"
1739 " ^ decrement address\n"
1740 " / increment addr by 0x10. //=0x100, ///=0x1000, etc\n"
1741 " \\ decrement addr by 0x10. \\\\=0x100, \\\\\\=0x1000, etc\n"
1742 " ` clear no-read flag\n"
1743 " ; stay at this addr\n"
1744 " v change to byte mode\n"
1745 " w change to word (2 byte) mode\n"
1746 " l change to long (4 byte) mode\n"
1747 " u change to doubleword (8 byte) mode\n"
1748 " m addr change current addr\n"
1749 " n toggle no-read flag\n"
1750 " r toggle byte reverse flag\n"
1751 " < count back up count bytes\n"
1752 " > count skip forward count bytes\n"
1753 " x exit this mode\n"
1754 "";
1755
1756void
1757memex(void)
1758{
1759 int cmd, inc, i, nslash;
1760 unsigned long n;
1761 unsigned char val[16];
1762
1763 scanhex((void *)&adrs);
1764 cmd = skipbl();
1765 if (cmd == '?') {
1766 printf(memex_help_string);
1767 return;
1768 } else {
1769 termch = cmd;
1770 }
1771 last_cmd = "m\n";
1772 while ((cmd = skipbl()) != '\n') {
1773 switch( cmd ){
1774 case 'b': size = 1; break;
1775 case 'w': size = 2; break;
1776 case 'l': size = 4; break;
1777 case 'd': size = 8; break;
1778 case 'r': brev = !brev; break;
1779 case 'n': mnoread = 1; break;
1780 case '.': mnoread = 0; break;
1781 }
1782 }
1783 if( size <= 0 )
1784 size = 1;
1785 else if( size > 8 )
1786 size = 8;
1787 for(;;){
1788 if (!mnoread)
1789 n = mread(adrs, val, size);
1790 printf("%.16x%c", adrs, brev? 'r': ' ');
1791 if (!mnoread) {
1792 if (brev)
1793 byterev(val, size);
1794 putchar(' ');
1795 for (i = 0; i < n; ++i)
1796 printf("%.2x", val[i]);
1797 for (; i < size; ++i)
1798 printf("%s", fault_chars[fault_type]);
1799 }
1800 putchar(' ');
1801 inc = size;
1802 nslash = 0;
1803 for(;;){
1804 if( scanhex(&n) ){
1805 for (i = 0; i < size; ++i)
1806 val[i] = n >> (i * 8);
1807 if (!brev)
1808 byterev(val, size);
1809 mwrite(adrs, val, size);
1810 inc = size;
1811 }
1812 cmd = skipbl();
1813 if (cmd == '\n')
1814 break;
1815 inc = 0;
1816 switch (cmd) {
1817 case '\'':
1818 for(;;){
1819 n = inchar();
1820 if( n == '\\' )
1821 n = bsesc();
1822 else if( n == '\'' )
1823 break;
1824 for (i = 0; i < size; ++i)
1825 val[i] = n >> (i * 8);
1826 if (!brev)
1827 byterev(val, size);
1828 mwrite(adrs, val, size);
1829 adrs += size;
1830 }
1831 adrs -= size;
1832 inc = size;
1833 break;
1834 case ',':
1835 adrs += size;
1836 break;
1837 case '.':
1838 mnoread = 0;
1839 break;
1840 case ';':
1841 break;
1842 case 'x':
1843 case EOF:
1844 scannl();
1845 return;
1846 case 'b':
1847 case 'v':
1848 size = 1;
1849 break;
1850 case 'w':
1851 size = 2;
1852 break;
1853 case 'l':
1854 size = 4;
1855 break;
1856 case 'u':
1857 size = 8;
1858 break;
1859 case '^':
1860 adrs -= size;
1861 break;
1862 break;
1863 case '/':
1864 if (nslash > 0)
1865 adrs -= 1 << nslash;
1866 else
1867 nslash = 0;
1868 nslash += 4;
1869 adrs += 1 << nslash;
1870 break;
1871 case '\\':
1872 if (nslash < 0)
1873 adrs += 1 << -nslash;
1874 else
1875 nslash = 0;
1876 nslash -= 4;
1877 adrs -= 1 << -nslash;
1878 break;
1879 case 'm':
1880 scanhex((void *)&adrs);
1881 break;
1882 case 'n':
1883 mnoread = 1;
1884 break;
1885 case 'r':
1886 brev = !brev;
1887 break;
1888 case '<':
1889 n = size;
1890 scanhex(&n);
1891 adrs -= n;
1892 break;
1893 case '>':
1894 n = size;
1895 scanhex(&n);
1896 adrs += n;
1897 break;
1898 case '?':
1899 printf(memex_subcmd_help_string);
1900 break;
1901 }
1902 }
1903 adrs += inc;
1904 }
1905}
1906
1907int
1908bsesc(void)
1909{
1910 int c;
1911
1912 c = inchar();
1913 switch( c ){
1914 case 'n': c = '\n'; break;
1915 case 'r': c = '\r'; break;
1916 case 'b': c = '\b'; break;
1917 case 't': c = '\t'; break;
1918 }
1919 return c;
1920}
1921
1922#define isxdigit(c) (('0' <= (c) && (c) <= '9') \
1923 || ('a' <= (c) && (c) <= 'f') \
1924 || ('A' <= (c) && (c) <= 'F'))
1925void
1926dump(void)
1927{
1928 int c;
1929
1930 c = inchar();
1931 if ((isxdigit(c) && c != 'f' && c != 'd') || c == '\n')
1932 termch = c;
1933 scanhex((void *)&adrs);
1934 if (termch != '\n')
1935 termch = 0;
1936 if (c == 'i') {
1937 scanhex(&nidump);
1938 if (nidump == 0)
1939 nidump = 16;
1940 else if (nidump > MAX_DUMP)
1941 nidump = MAX_DUMP;
1942 adrs += ppc_inst_dump(adrs, nidump, 1);
1943 last_cmd = "di\n";
1944 } else {
1945 scanhex(&ndump);
1946 if (ndump == 0)
1947 ndump = 64;
1948 else if (ndump > MAX_DUMP)
1949 ndump = MAX_DUMP;
1950 prdump(adrs, ndump);
1951 adrs += ndump;
1952 last_cmd = "d\n";
1953 }
1954}
1955
1956void
1957prdump(unsigned long adrs, long ndump)
1958{
1959 long n, m, c, r, nr;
1960 unsigned char temp[16];
1961
1962 for (n = ndump; n > 0;) {
1963 printf("%.16lx", adrs);
1964 putchar(' ');
1965 r = n < 16? n: 16;
1966 nr = mread(adrs, temp, r);
1967 adrs += nr;
1968 for (m = 0; m < r; ++m) {
1969 if ((m & 7) == 0 && m > 0)
1970 putchar(' ');
1971 if (m < nr)
1972 printf("%.2x", temp[m]);
1973 else
1974 printf("%s", fault_chars[fault_type]);
1975 }
1976 if (m <= 8)
1977 printf(" ");
1978 for (; m < 16; ++m)
1979 printf(" ");
1980 printf(" |");
1981 for (m = 0; m < r; ++m) {
1982 if (m < nr) {
1983 c = temp[m];
1984 putchar(' ' <= c && c <= '~'? c: '.');
1985 } else
1986 putchar(' ');
1987 }
1988 n -= r;
1989 for (; m < 16; ++m)
1990 putchar(' ');
1991 printf("|\n");
1992 if (nr < r)
1993 break;
1994 }
1995}
1996
1997int
1998ppc_inst_dump(unsigned long adr, long count, int praddr)
1999{
2000 int nr, dotted;
2001 unsigned long first_adr;
2002 unsigned long inst, last_inst = 0;
2003 unsigned char val[4];
2004
2005 dotted = 0;
2006 for (first_adr = adr; count > 0; --count, adr += 4) {
2007 nr = mread(adr, val, 4);
2008 if (nr == 0) {
2009 if (praddr) {
2010 const char *x = fault_chars[fault_type];
2011 printf("%.16lx %s%s%s%s\n", adr, x, x, x, x);
2012 }
2013 break;
2014 }
2015 inst = GETWORD(val);
2016 if (adr > first_adr && inst == last_inst) {
2017 if (!dotted) {
2018 printf(" ...\n");
2019 dotted = 1;
2020 }
2021 continue;
2022 }
2023 dotted = 0;
2024 last_inst = inst;
2025 if (praddr)
2026 printf("%.16lx %.8x", adr, inst);
2027 printf("\t");
2028 print_insn_powerpc(inst, adr, 0); /* always returns 4 */
2029 printf("\n");
2030 }
2031 return adr - first_adr;
2032}
2033
2034void
2035print_address(unsigned long addr)
2036{
2037 xmon_print_symbol(addr, "\t# ", "");
2038}
2039
2040
2041/*
2042 * Memory operations - move, set, print differences
2043 */
2044static unsigned long mdest; /* destination address */
2045static unsigned long msrc; /* source address */
2046static unsigned long mval; /* byte value to set memory to */
2047static unsigned long mcount; /* # bytes to affect */
2048static unsigned long mdiffs; /* max # differences to print */
2049
2050void
2051memops(int cmd)
2052{
2053 scanhex((void *)&mdest);
2054 if( termch != '\n' )
2055 termch = 0;
2056 scanhex((void *)(cmd == 's'? &mval: &msrc));
2057 if( termch != '\n' )
2058 termch = 0;
2059 scanhex((void *)&mcount);
2060 switch( cmd ){
2061 case 'm':
2062 memmove((void *)mdest, (void *)msrc, mcount);
2063 break;
2064 case 's':
2065 memset((void *)mdest, mval, mcount);
2066 break;
2067 case 'd':
2068 if( termch != '\n' )
2069 termch = 0;
2070 scanhex((void *)&mdiffs);
2071 memdiffs((unsigned char *)mdest, (unsigned char *)msrc, mcount, mdiffs);
2072 break;
2073 }
2074}
2075
2076void
2077memdiffs(unsigned char *p1, unsigned char *p2, unsigned nb, unsigned maxpr)
2078{
2079 unsigned n, prt;
2080
2081 prt = 0;
2082 for( n = nb; n > 0; --n )
2083 if( *p1++ != *p2++ )
2084 if( ++prt <= maxpr )
2085 printf("%.16x %.2x # %.16x %.2x\n", p1 - 1,
2086 p1[-1], p2 - 1, p2[-1]);
2087 if( prt > maxpr )
2088 printf("Total of %d differences\n", prt);
2089}
2090
2091static unsigned mend;
2092static unsigned mask;
2093
2094void
2095memlocate(void)
2096{
2097 unsigned a, n;
2098 unsigned char val[4];
2099
2100 last_cmd = "ml";
2101 scanhex((void *)&mdest);
2102 if (termch != '\n') {
2103 termch = 0;
2104 scanhex((void *)&mend);
2105 if (termch != '\n') {
2106 termch = 0;
2107 scanhex((void *)&mval);
2108 mask = ~0;
2109 if (termch != '\n') termch = 0;
2110 scanhex((void *)&mask);
2111 }
2112 }
2113 n = 0;
2114 for (a = mdest; a < mend; a += 4) {
2115 if (mread(a, val, 4) == 4
2116 && ((GETWORD(val) ^ mval) & mask) == 0) {
2117 printf("%.16x: %.16x\n", a, GETWORD(val));
2118 if (++n >= 10)
2119 break;
2120 }
2121 }
2122}
2123
2124static unsigned long mskip = 0x1000;
2125static unsigned long mlim = 0xffffffff;
2126
2127void
2128memzcan(void)
2129{
2130 unsigned char v;
2131 unsigned a;
2132 int ok, ook;
2133
2134 scanhex(&mdest);
2135 if (termch != '\n') termch = 0;
2136 scanhex(&mskip);
2137 if (termch != '\n') termch = 0;
2138 scanhex(&mlim);
2139 ook = 0;
2140 for (a = mdest; a < mlim; a += mskip) {
2141 ok = mread(a, &v, 1);
2142 if (ok && !ook) {
2143 printf("%.8x .. ", a);
2144 fflush(stdout);
2145 } else if (!ok && ook)
2146 printf("%.8x\n", a - mskip);
2147 ook = ok;
2148 if (a + mskip < a)
2149 break;
2150 }
2151 if (ook)
2152 printf("%.8x\n", a - mskip);
2153}
2154
2155/* Input scanning routines */
2156int
2157skipbl(void)
2158{
2159 int c;
2160
2161 if( termch != 0 ){
2162 c = termch;
2163 termch = 0;
2164 } else
2165 c = inchar();
2166 while( c == ' ' || c == '\t' )
2167 c = inchar();
2168 return c;
2169}
2170
2171#define N_PTREGS 44
2172static char *regnames[N_PTREGS] = {
2173 "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
2174 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
2175 "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
2176 "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31",
2177 "pc", "msr", "or3", "ctr", "lr", "xer", "ccr", "softe",
2178 "trap", "dar", "dsisr", "res"
2179};
2180
2181int
2182scanhex(unsigned long *vp)
2183{
2184 int c, d;
2185 unsigned long v;
2186
2187 c = skipbl();
2188 if (c == '%') {
2189 /* parse register name */
2190 char regname[8];
2191 int i;
2192
2193 for (i = 0; i < sizeof(regname) - 1; ++i) {
2194 c = inchar();
2195 if (!isalnum(c)) {
2196 termch = c;
2197 break;
2198 }
2199 regname[i] = c;
2200 }
2201 regname[i] = 0;
2202 for (i = 0; i < N_PTREGS; ++i) {
2203 if (strcmp(regnames[i], regname) == 0) {
2204 if (xmon_regs == NULL) {
2205 printf("regs not available\n");
2206 return 0;
2207 }
2208 *vp = ((unsigned long *)xmon_regs)[i];
2209 return 1;
2210 }
2211 }
2212 printf("invalid register name '%%%s'\n", regname);
2213 return 0;
2214 }
2215
2216 /* skip leading "0x" if any */
2217
2218 if (c == '0') {
2219 c = inchar();
2220 if (c == 'x') {
2221 c = inchar();
2222 } else {
2223 d = hexdigit(c);
2224 if (d == EOF) {
2225 termch = c;
2226 *vp = 0;
2227 return 1;
2228 }
2229 }
2230 } else if (c == '$') {
2231 int i;
2232 for (i=0; i<63; i++) {
2233 c = inchar();
2234 if (isspace(c)) {
2235 termch = c;
2236 break;
2237 }
2238 tmpstr[i] = c;
2239 }
2240 tmpstr[i++] = 0;
2241 *vp = 0;
2242 if (setjmp(bus_error_jmp) == 0) {
2243 catch_memory_errors = 1;
2244 sync();
2245 *vp = kallsyms_lookup_name(tmpstr);
2246 sync();
2247 }
2248 catch_memory_errors = 0;
2249 if (!(*vp)) {
2250 printf("unknown symbol '%s'\n", tmpstr);
2251 return 0;
2252 }
2253 return 1;
2254 }
2255
2256 d = hexdigit(c);
2257 if (d == EOF) {
2258 termch = c;
2259 return 0;
2260 }
2261 v = 0;
2262 do {
2263 v = (v << 4) + d;
2264 c = inchar();
2265 d = hexdigit(c);
2266 } while (d != EOF);
2267 termch = c;
2268 *vp = v;
2269 return 1;
2270}
2271
2272void
2273scannl(void)
2274{
2275 int c;
2276
2277 c = termch;
2278 termch = 0;
2279 while( c != '\n' )
2280 c = inchar();
2281}
2282
2283int
2284hexdigit(int c)
2285{
2286 if( '0' <= c && c <= '9' )
2287 return c - '0';
2288 if( 'A' <= c && c <= 'F' )
2289 return c - ('A' - 10);
2290 if( 'a' <= c && c <= 'f' )
2291 return c - ('a' - 10);
2292 return EOF;
2293}
2294
2295void
2296getstring(char *s, int size)
2297{
2298 int c;
2299
2300 c = skipbl();
2301 do {
2302 if( size > 1 ){
2303 *s++ = c;
2304 --size;
2305 }
2306 c = inchar();
2307 } while( c != ' ' && c != '\t' && c != '\n' );
2308 termch = c;
2309 *s = 0;
2310}
2311
2312static char line[256];
2313static char *lineptr;
2314
2315void
2316flush_input(void)
2317{
2318 lineptr = NULL;
2319}
2320
2321int
2322inchar(void)
2323{
2324 if (lineptr == NULL || *lineptr == 0) {
2325 if (fgets(line, sizeof(line), stdin) == NULL) {
2326 lineptr = NULL;
2327 return EOF;
2328 }
2329 lineptr = line;
2330 }
2331 return *lineptr++;
2332}
2333
2334void
2335take_input(char *str)
2336{
2337 lineptr = str;
2338}
2339
2340
2341static void
2342symbol_lookup(void)
2343{
2344 int type = inchar();
2345 unsigned long addr;
2346 static char tmp[64];
2347
2348 switch (type) {
2349 case 'a':
2350 if (scanhex(&addr))
2351 xmon_print_symbol(addr, ": ", "\n");
2352 termch = 0;
2353 break;
2354 case 's':
2355 getstring(tmp, 64);
2356 if (setjmp(bus_error_jmp) == 0) {
2357 catch_memory_errors = 1;
2358 sync();
2359 addr = kallsyms_lookup_name(tmp);
2360 if (addr)
2361 printf("%s: %lx\n", tmp, addr);
2362 else
2363 printf("Symbol '%s' not found.\n", tmp);
2364 sync();
2365 }
2366 catch_memory_errors = 0;
2367 termch = 0;
2368 break;
2369 }
2370}
2371
2372
2373/* Print an address in numeric and symbolic form (if possible) */
2374static void xmon_print_symbol(unsigned long address, const char *mid,
2375 const char *after)
2376{
2377 char *modname;
2378 const char *name = NULL;
2379 unsigned long offset, size;
2380
2381 printf("%.16lx", address);
2382 if (setjmp(bus_error_jmp) == 0) {
2383 catch_memory_errors = 1;
2384 sync();
2385 name = kallsyms_lookup(address, &size, &offset, &modname,
2386 tmpstr);
2387 sync();
2388 /* wait a little while to see if we get a machine check */
2389 __delay(200);
2390 }
2391
2392 catch_memory_errors = 0;
2393
2394 if (name) {
2395 printf("%s%s+%#lx/%#lx", mid, name, offset, size);
2396 if (modname)
2397 printf(" [%s]", modname);
2398 }
2399 printf("%s", after);
2400}
2401
2402static void debug_trace(void)
2403{
2404 unsigned long val, cmd, on;
2405
2406 cmd = skipbl();
2407 if (cmd == '\n') {
2408 /* show current state */
2409 unsigned long i;
2410 printf("ppc64_debug_switch = 0x%lx\n", ppc64_debug_switch);
2411 for (i = 0; i < PPCDBG_NUM_FLAGS ;i++) {
2412 on = PPCDBG_BITVAL(i) & ppc64_debug_switch;
2413 printf("%02x %s %12s ", i, on ? "on " : "off", trace_names[i] ? trace_names[i] : "");
2414 if (((i+1) % 3) == 0)
2415 printf("\n");
2416 }
2417 printf("\n");
2418 return;
2419 }
2420 while (cmd != '\n') {
2421 on = 1; /* default if no sign given */
2422 while (cmd == '+' || cmd == '-') {
2423 on = (cmd == '+');
2424 cmd = inchar();
2425 if (cmd == ' ' || cmd == '\n') { /* Turn on or off based on + or - */
2426 ppc64_debug_switch = on ? PPCDBG_ALL:PPCDBG_NONE;
2427 printf("Setting all values to %s...\n", on ? "on" : "off");
2428 if (cmd == '\n') return;
2429 else cmd = skipbl();
2430 }
2431 else
2432 termch = cmd;
2433 }
2434 termch = cmd; /* not +/- ... let scanhex see it */
2435 scanhex((void *)&val);
2436 if (val >= 64) {
2437 printf("Value %x out of range:\n", val);
2438 return;
2439 }
2440 if (on) {
2441 ppc64_debug_switch |= PPCDBG_BITVAL(val);
2442 printf("enable debug %x %s\n", val, trace_names[val] ? trace_names[val] : "");
2443 } else {
2444 ppc64_debug_switch &= ~PPCDBG_BITVAL(val);
2445 printf("disable debug %x %s\n", val, trace_names[val] ? trace_names[val] : "");
2446 }
2447 cmd = skipbl();
2448 }
2449}
2450
2451static void dump_slb(void)
2452{
2453 int i;
2454 unsigned long tmp;
2455
2456 printf("SLB contents of cpu %x\n", smp_processor_id());
2457
2458 for (i = 0; i < SLB_NUM_ENTRIES; i++) {
2459 asm volatile("slbmfee %0,%1" : "=r" (tmp) : "r" (i));
2460 printf("%02d %016lx ", i, tmp);
2461
2462 asm volatile("slbmfev %0,%1" : "=r" (tmp) : "r" (i));
2463 printf("%016lx\n", tmp);
2464 }
2465}
2466
2467static void dump_stab(void)
2468{
2469 int i;
2470 unsigned long *tmp = (unsigned long *)get_paca()->stab_addr;
2471
2472 printf("Segment table contents of cpu %x\n", smp_processor_id());
2473
2474 for (i = 0; i < PAGE_SIZE/16; i++) {
2475 unsigned long a, b;
2476
2477 a = *tmp++;
2478 b = *tmp++;
2479
2480 if (a || b) {
2481 printf("%03d %016lx ", i, a);
2482 printf("%016lx\n", b);
2483 }
2484 }
2485}
2486
2487void xmon_init(int enable)
2488{
2489 if (enable) {
2490 __debugger = xmon;
2491 __debugger_ipi = xmon_ipi;
2492 __debugger_bpt = xmon_bpt;
2493 __debugger_sstep = xmon_sstep;
2494 __debugger_iabr_match = xmon_iabr_match;
2495 __debugger_dabr_match = xmon_dabr_match;
2496 __debugger_fault_handler = xmon_fault_handler;
2497 } else {
2498 __debugger = NULL;
2499 __debugger_ipi = NULL;
2500 __debugger_bpt = NULL;
2501 __debugger_sstep = NULL;
2502 __debugger_iabr_match = NULL;
2503 __debugger_dabr_match = NULL;
2504 __debugger_fault_handler = NULL;
2505 }
2506}
2507
2508void dump_segments(void)
2509{
2510 if (cpu_has_feature(CPU_FTR_SLB))
2511 dump_slb();
2512 else
2513 dump_stab();
2514}