aboutsummaryrefslogtreecommitdiffstats
path: root/arch/ppc64
diff options
context:
space:
mode:
Diffstat (limited to 'arch/ppc64')
-rw-r--r--arch/ppc64/Kconfig78
-rw-r--r--arch/ppc64/Kconfig.debug9
-rw-r--r--arch/ppc64/Makefile40
-rw-r--r--arch/ppc64/boot/Makefile57
-rw-r--r--arch/ppc64/boot/addnote.c4
-rw-r--r--arch/ppc64/boot/crt0.S2
-rw-r--r--arch/ppc64/boot/div64.S2
-rw-r--r--arch/ppc64/boot/elf.h149
-rw-r--r--arch/ppc64/boot/main.c86
-rw-r--r--arch/ppc64/boot/page.h34
-rw-r--r--arch/ppc64/boot/ppc32-types.h36
-rw-r--r--arch/ppc64/boot/ppc_asm.h62
-rw-r--r--arch/ppc64/boot/prom.c196
-rw-r--r--arch/ppc64/boot/prom.h18
-rw-r--r--arch/ppc64/boot/stdio.h16
-rw-r--r--arch/ppc64/boot/string.S2
-rw-r--r--arch/ppc64/boot/string.h16
-rw-r--r--arch/ppc64/boot/zlib.c2
-rw-r--r--arch/ppc64/configs/g5_defconfig6
-rw-r--r--arch/ppc64/configs/iSeries_defconfig7
-rw-r--r--arch/ppc64/configs/maple_defconfig6
-rw-r--r--arch/ppc64/configs/pSeries_defconfig6
-rw-r--r--arch/ppc64/defconfig6
-rw-r--r--arch/ppc64/kernel/LparData.c37
-rw-r--r--arch/ppc64/kernel/Makefile15
-rw-r--r--arch/ppc64/kernel/asm-offsets.c3
-rw-r--r--arch/ppc64/kernel/bpa_iic.c28
-rw-r--r--arch/ppc64/kernel/btext.c1
-rw-r--r--arch/ppc64/kernel/cpu_setup_power4.S3
-rw-r--r--arch/ppc64/kernel/cputable.c130
-rw-r--r--arch/ppc64/kernel/eeh.c86
-rw-r--r--arch/ppc64/kernel/entry.S13
-rw-r--r--arch/ppc64/kernel/firmware.c47
-rw-r--r--arch/ppc64/kernel/head.S534
-rw-r--r--arch/ppc64/kernel/iSeries_VpdInfo.c5
-rw-r--r--arch/ppc64/kernel/iSeries_htab.c5
-rw-r--r--arch/ppc64/kernel/iSeries_pci.c2
-rw-r--r--arch/ppc64/kernel/iSeries_setup.c30
-rw-r--r--arch/ppc64/kernel/iSeries_vio.c155
-rw-r--r--arch/ppc64/kernel/idle_power4.S2
-rw-r--r--arch/ppc64/kernel/iomap.c32
-rw-r--r--arch/ppc64/kernel/iommu.c3
-rw-r--r--arch/ppc64/kernel/kprobes.c40
-rw-r--r--arch/ppc64/kernel/lmb.c151
-rw-r--r--arch/ppc64/kernel/lparcfg.c9
-rw-r--r--arch/ppc64/kernel/maple_pci.c60
-rw-r--r--arch/ppc64/kernel/maple_setup.c3
-rw-r--r--arch/ppc64/kernel/misc.S112
-rw-r--r--arch/ppc64/kernel/of_device.c2
-rw-r--r--arch/ppc64/kernel/pSeries_iommu.c75
-rw-r--r--arch/ppc64/kernel/pSeries_lpar.c26
-rw-r--r--arch/ppc64/kernel/pSeries_reconfig.c2
-rw-r--r--arch/ppc64/kernel/pSeries_setup.c56
-rw-r--r--arch/ppc64/kernel/pSeries_smp.c16
-rw-r--r--arch/ppc64/kernel/pSeries_vio.c273
-rw-r--r--arch/ppc64/kernel/pacaData.c5
-rw-r--r--arch/ppc64/kernel/pci.c480
-rw-r--r--arch/ppc64/kernel/pci.h1
-rw-r--r--arch/ppc64/kernel/pci_dn.c47
-rw-r--r--arch/ppc64/kernel/pci_iommu.c2
-rw-r--r--arch/ppc64/kernel/pmac_feature.c8
-rw-r--r--arch/ppc64/kernel/pmac_pci.c66
-rw-r--r--arch/ppc64/kernel/pmac_setup.c34
-rw-r--r--arch/ppc64/kernel/pmc.c23
-rw-r--r--arch/ppc64/kernel/process.c46
-rw-r--r--arch/ppc64/kernel/prom.c186
-rw-r--r--arch/ppc64/kernel/prom_init.c94
-rw-r--r--arch/ppc64/kernel/ptrace.c28
-rw-r--r--arch/ppc64/kernel/ptrace32.c34
-rw-r--r--arch/ppc64/kernel/ras.c2
-rw-r--r--arch/ppc64/kernel/rtas_pci.c54
-rw-r--r--arch/ppc64/kernel/rtasd.c10
-rw-r--r--arch/ppc64/kernel/rtc.c7
-rw-r--r--arch/ppc64/kernel/scanlog.c17
-rw-r--r--arch/ppc64/kernel/setup.c90
-rw-r--r--arch/ppc64/kernel/signal.c14
-rw-r--r--arch/ppc64/kernel/signal32.c13
-rw-r--r--arch/ppc64/kernel/sys_ppc32.c86
-rw-r--r--arch/ppc64/kernel/syscalls.c4
-rw-r--r--arch/ppc64/kernel/sysfs.c105
-rw-r--r--arch/ppc64/kernel/time.c15
-rw-r--r--arch/ppc64/kernel/traps.c5
-rw-r--r--arch/ppc64/kernel/u3_iommu.c4
-rw-r--r--arch/ppc64/kernel/udbg.c306
-rw-r--r--arch/ppc64/kernel/udbg_16550.c123
-rw-r--r--arch/ppc64/kernel/udbg_scc.c136
-rw-r--r--arch/ppc64/kernel/vdso32/cacheflush.S2
-rw-r--r--arch/ppc64/kernel/vdso32/datapage.S2
-rw-r--r--arch/ppc64/kernel/vdso32/gettimeofday.S2
-rw-r--r--arch/ppc64/kernel/vdso64/cacheflush.S2
-rw-r--r--arch/ppc64/kernel/vdso64/datapage.S2
-rw-r--r--arch/ppc64/kernel/vdso64/gettimeofday.S2
-rw-r--r--arch/ppc64/kernel/vio.c444
-rw-r--r--arch/ppc64/kernel/vmlinux.lds.S1
-rw-r--r--arch/ppc64/kernel/xics.c44
-rw-r--r--arch/ppc64/lib/dec_and_lock.c8
-rw-r--r--arch/ppc64/lib/locks.c14
-rw-r--r--arch/ppc64/mm/fault.c36
-rw-r--r--arch/ppc64/mm/hash_low.S6
-rw-r--r--arch/ppc64/mm/hash_native.c3
-rw-r--r--arch/ppc64/mm/hash_utils.c4
-rw-r--r--arch/ppc64/mm/hugetlbpage.c392
-rw-r--r--arch/ppc64/mm/imalloc.c2
-rw-r--r--arch/ppc64/mm/init.c112
-rw-r--r--arch/ppc64/mm/numa.c45
-rw-r--r--arch/ppc64/mm/slb.c4
-rw-r--r--arch/ppc64/mm/slb_low.S37
-rw-r--r--arch/ppc64/mm/tlb.c95
-rw-r--r--arch/ppc64/oprofile/common.c57
-rw-r--r--arch/ppc64/oprofile/op_impl.h108
-rw-r--r--arch/ppc64/oprofile/op_model_power4.c12
-rw-r--r--arch/ppc64/oprofile/op_model_rs64.c3
-rw-r--r--arch/ppc64/xmon/privinst.h1
-rw-r--r--arch/ppc64/xmon/start.c6
-rw-r--r--arch/ppc64/xmon/xmon.c46
115 files changed, 3540 insertions, 2793 deletions
diff --git a/arch/ppc64/Kconfig b/arch/ppc64/Kconfig
index 2ce87836c671..deca68ad644a 100644
--- a/arch/ppc64/Kconfig
+++ b/arch/ppc64/Kconfig
@@ -44,6 +44,10 @@ config SCHED_NO_NO_OMIT_FRAME_POINTER
44 bool 44 bool
45 default y 45 default y
46 46
47config ARCH_MAY_HAVE_PC_FDC
48 bool
49 default y
50
47# We optimistically allocate largepages from the VM, so make the limit 51# We optimistically allocate largepages from the VM, so make the limit
48# large enough (16MB). This badly named config option is actually 52# large enough (16MB). This badly named config option is actually
49# max order + 1 53# max order + 1
@@ -302,12 +306,6 @@ config GENERIC_HARDIRQS
302 bool 306 bool
303 default y 307 default y
304 308
305config MSCHUNKS
306 bool
307 depends on PPC_ISERIES
308 default y
309
310
311config PPC_RTAS 309config PPC_RTAS
312 bool 310 bool
313 depends on PPC_PSERIES || PPC_BPA 311 depends on PPC_PSERIES || PPC_BPA
@@ -350,13 +348,46 @@ config SECCOMP
350 348
351 If unsure, say Y. Only embedded should say N here. 349 If unsure, say Y. Only embedded should say N here.
352 350
351source "fs/Kconfig.binfmt"
352
353config HOTPLUG_CPU
354 bool "Support for hot-pluggable CPUs"
355 depends on SMP && EXPERIMENTAL && (PPC_PSERIES || PPC_PMAC)
356 select HOTPLUG
357 ---help---
358 Say Y here to be able to turn CPUs off and on.
359
360 Say N if you are unsure.
361
362config PROC_DEVICETREE
363 bool "Support for Open Firmware device tree in /proc"
364 depends on !PPC_ISERIES
365 help
366 This option adds a device-tree directory under /proc which contains
367 an image of the device tree that the kernel copies from Open
368 Firmware. If unsure, say Y here.
369
370config CMDLINE_BOOL
371 bool "Default bootloader kernel arguments"
372 depends on !PPC_ISERIES
373
374config CMDLINE
375 string "Initial kernel command string"
376 depends on CMDLINE_BOOL
377 default "console=ttyS0,9600 console=tty0 root=/dev/sda2"
378 help
379 On some platforms, there is currently no way for the boot loader to
380 pass arguments to the kernel. For these platforms, you can supply
381 some command-line options at build time by entering them here. In
382 most cases you will need to specify the root device here.
383
353endmenu 384endmenu
354 385
355config ISA_DMA_API 386config ISA_DMA_API
356 bool 387 bool
357 default y 388 default y
358 389
359menu "General setup" 390menu "Bus Options"
360 391
361config ISA 392config ISA
362 bool 393 bool
@@ -389,45 +420,12 @@ config PCI_DOMAINS
389 bool 420 bool
390 default PCI 421 default PCI
391 422
392source "fs/Kconfig.binfmt"
393
394source "drivers/pci/Kconfig" 423source "drivers/pci/Kconfig"
395 424
396config HOTPLUG_CPU
397 bool "Support for hot-pluggable CPUs"
398 depends on SMP && EXPERIMENTAL && (PPC_PSERIES || PPC_PMAC)
399 select HOTPLUG
400 ---help---
401 Say Y here to be able to turn CPUs off and on.
402
403 Say N if you are unsure.
404
405source "drivers/pcmcia/Kconfig" 425source "drivers/pcmcia/Kconfig"
406 426
407source "drivers/pci/hotplug/Kconfig" 427source "drivers/pci/hotplug/Kconfig"
408 428
409config PROC_DEVICETREE
410 bool "Support for Open Firmware device tree in /proc"
411 depends on !PPC_ISERIES
412 help
413 This option adds a device-tree directory under /proc which contains
414 an image of the device tree that the kernel copies from Open
415 Firmware. If unsure, say Y here.
416
417config CMDLINE_BOOL
418 bool "Default bootloader kernel arguments"
419 depends on !PPC_ISERIES
420
421config CMDLINE
422 string "Initial kernel command string"
423 depends on CMDLINE_BOOL
424 default "console=ttyS0,9600 console=tty0 root=/dev/sda2"
425 help
426 On some platforms, there is currently no way for the boot loader to
427 pass arguments to the kernel. For these platforms, you can supply
428 some command-line options at build time by entering them here. In
429 most cases you will need to specify the root device here.
430
431endmenu 429endmenu
432 430
433source "net/Kconfig" 431source "net/Kconfig"
diff --git a/arch/ppc64/Kconfig.debug b/arch/ppc64/Kconfig.debug
index 46b1ce58da3b..f16a5030527b 100644
--- a/arch/ppc64/Kconfig.debug
+++ b/arch/ppc64/Kconfig.debug
@@ -41,10 +41,19 @@ config XMON
41 help 41 help
42 Include in-kernel hooks for the xmon kernel monitor/debugger. 42 Include in-kernel hooks for the xmon kernel monitor/debugger.
43 Unless you are intending to debug the kernel, say N here. 43 Unless you are intending to debug the kernel, say N here.
44 Make sure to enable also CONFIG_BOOTX_TEXT on Macs. Otherwise
45 nothing will appear on the screen (xmon writes directly to the
46 framebuffer memory).
47 The cmdline option 'xmon' or 'xmon=early' will drop into xmon very
48 early during boot. 'xmon=on' will just enable the xmon debugger hooks.
49 'xmon=off' will disable the debugger hooks if CONFIG_XMON_DEFAULT is set.
44 50
45config XMON_DEFAULT 51config XMON_DEFAULT
46 bool "Enable xmon by default" 52 bool "Enable xmon by default"
47 depends on XMON 53 depends on XMON
54 help
55 xmon is normally disabled unless booted with 'xmon=on'.
56 Use 'xmon=off' to disable xmon init during runtime.
48 57
49config PPCDBG 58config PPCDBG
50 bool "Include PPCDBG realtime debugging" 59 bool "Include PPCDBG realtime debugging"
diff --git a/arch/ppc64/Makefile b/arch/ppc64/Makefile
index 731b84758331..17d2c1eac3b8 100644
--- a/arch/ppc64/Makefile
+++ b/arch/ppc64/Makefile
@@ -49,12 +49,14 @@ NM := $(NM) --synthetic
49 49
50endif 50endif
51 51
52CHECKFLAGS += -m64 -D__powerpc__ 52CHECKFLAGS += -m64 -D__powerpc__ -D__powerpc64__
53 53
54LDFLAGS := -m elf64ppc 54LDFLAGS := -m elf64ppc
55LDFLAGS_vmlinux := -Bstatic -e $(KERNELLOAD) -Ttext $(KERNELLOAD) 55LDFLAGS_vmlinux := -Bstatic -e $(KERNELLOAD) -Ttext $(KERNELLOAD)
56CFLAGS += -msoft-float -pipe -mminimal-toc -mtraceback=none \ 56CFLAGS += -msoft-float -pipe -mminimal-toc -mtraceback=none \
57 -mcall-aixdesc 57 -mcall-aixdesc
58# Temporary hack until we have migrated to asm-powerpc
59CPPFLAGS += -Iarch/$(ARCH)/include
58 60
59GCC_VERSION := $(call cc-version) 61GCC_VERSION := $(call cc-version)
60GCC_BROKEN_VEC := $(shell if [ $(GCC_VERSION) -lt 0400 ] ; then echo "y"; fi ;) 62GCC_BROKEN_VEC := $(shell if [ $(GCC_VERSION) -lt 0400 ] ; then echo "y"; fi ;)
@@ -87,11 +89,12 @@ drivers-$(CONFIG_OPROFILE) += arch/ppc64/oprofile/
87 89
88boot := arch/ppc64/boot 90boot := arch/ppc64/boot
89 91
90boottarget-$(CONFIG_PPC_PSERIES) := zImage zImage.initrd 92boottargets-$(CONFIG_PPC_PSERIES) += zImage zImage.initrd
91boottarget-$(CONFIG_PPC_MAPLE) := zImage zImage.initrd 93boottargets-$(CONFIG_PPC_PMAC) += zImage.vmode zImage.initrd.vmode
92boottarget-$(CONFIG_PPC_ISERIES) := vmlinux.sminitrd vmlinux.initrd vmlinux.sm 94boottargets-$(CONFIG_PPC_MAPLE) += zImage zImage.initrd
93boottarget-$(CONFIG_PPC_BPA) := zImage zImage.initrd 95boottargets-$(CONFIG_PPC_ISERIES) += vmlinux.sminitrd vmlinux.initrd vmlinux.sm
94$(boottarget-y): vmlinux 96boottargets-$(CONFIG_PPC_BPA) += zImage zImage.initrd
97$(boottargets-y): vmlinux
95 $(Q)$(MAKE) $(build)=$(boot) $(boot)/$@ 98 $(Q)$(MAKE) $(build)=$(boot) $(boot)/$@
96 99
97bootimage-$(CONFIG_PPC_PSERIES) := $(boot)/zImage 100bootimage-$(CONFIG_PPC_PSERIES) := $(boot)/zImage
@@ -112,20 +115,21 @@ all: $(KBUILD_IMAGE)
112 115
113archclean: 116archclean:
114 $(Q)$(MAKE) $(clean)=$(boot) 117 $(Q)$(MAKE) $(clean)=$(boot)
118 # Temporary hack until we have migrated to asm-powerpc
119 $(Q)rm -rf arch/$(ARCH)/include
115 120
116prepare: include/asm-ppc64/offsets.h
117 121
118arch/ppc64/kernel/asm-offsets.s: include/asm include/linux/version.h \ 122# Temporary hack until we have migrated to asm-powerpc
119 include/config/MARKER 123include/asm: arch/$(ARCH)/include/asm
120 124arch/$(ARCH)/include/asm:
121include/asm-ppc64/offsets.h: arch/ppc64/kernel/asm-offsets.s 125 $(Q)if [ ! -d arch/$(ARCH)/include ]; then mkdir -p arch/$(ARCH)/include; fi
122 $(call filechk,gen-asm-offsets) 126 $(Q)ln -fsn $(srctree)/include/asm-powerpc arch/$(ARCH)/include/asm
123 127
124define archhelp 128define archhelp
125 echo '* zImage - Compressed kernel image (arch/$(ARCH)/boot/zImage)' 129 echo ' zImage.vmode - Compressed kernel image (arch/$(ARCH)/boot/zImage.vmode)'
126 echo ' zImage.initrd- Compressed kernel image with initrd attached,' 130 echo ' zImage.initrd.vmode - Compressed kernel image with initrd attached,'
127 echo ' sourced from arch/$(ARCH)/boot/ramdisk.image.gz' 131 echo ' sourced from arch/$(ARCH)/boot/ramdisk.image.gz'
128 echo ' (arch/$(ARCH)/boot/zImage.initrd)' 132 echo ' (arch/$(ARCH)/boot/zImage.initrd.vmode)'
133 echo ' zImage - zImage for pSeries machines'
134 echo ' zImage.initrd - zImage with initrd for pSeries machines'
129endef 135endef
130
131CLEAN_FILES += include/asm-ppc64/offsets.h
diff --git a/arch/ppc64/boot/Makefile b/arch/ppc64/boot/Makefile
index 683b2d43c15f..33fdc8710891 100644
--- a/arch/ppc64/boot/Makefile
+++ b/arch/ppc64/boot/Makefile
@@ -22,8 +22,8 @@
22 22
23 23
24HOSTCC := gcc 24HOSTCC := gcc
25BOOTCFLAGS := $(HOSTCFLAGS) $(LINUXINCLUDE) -fno-builtin 25BOOTCFLAGS := $(HOSTCFLAGS) -fno-builtin -nostdinc -isystem $(shell $(CROSS32CC) -print-file-name=include)
26BOOTAFLAGS := -D__ASSEMBLY__ $(BOOTCFLAGS) -traditional 26BOOTAFLAGS := -D__ASSEMBLY__ $(BOOTCFLAGS) -traditional -nostdinc
27BOOTLFLAGS := -Ttext 0x00400000 -e _start -T $(srctree)/$(src)/zImage.lds 27BOOTLFLAGS := -Ttext 0x00400000 -e _start -T $(srctree)/$(src)/zImage.lds
28OBJCOPYFLAGS := contents,alloc,load,readonly,data 28OBJCOPYFLAGS := contents,alloc,load,readonly,data
29 29
@@ -37,6 +37,9 @@ quiet_cmd_bootcc = BOOTCC $@
37quiet_cmd_bootas = BOOTAS $@ 37quiet_cmd_bootas = BOOTAS $@
38 cmd_bootas = $(CROSS32CC) -Wp,-MD,$(depfile) $(BOOTAFLAGS) -c -o $@ $< 38 cmd_bootas = $(CROSS32CC) -Wp,-MD,$(depfile) $(BOOTAFLAGS) -c -o $@ $<
39 39
40quiet_cmd_bootld = BOOTLD $@
41 cmd_bootld = $(CROSS32LD) $(BOOTLFLAGS) -o $@ $(2)
42
40$(patsubst %.c,%.o, $(filter %.c, $(src-boot))): %.o: %.c 43$(patsubst %.c,%.o, $(filter %.c, $(src-boot))): %.o: %.c
41 $(call if_changed_dep,bootcc) 44 $(call if_changed_dep,bootcc)
42$(patsubst %.S,%.o, $(filter %.S, $(src-boot))): %.o: %.S 45$(patsubst %.S,%.o, $(filter %.S, $(src-boot))): %.o: %.S
@@ -53,7 +56,7 @@ src-sec = $(foreach section, $(1), $(patsubst %,$(obj)/kernel-%.c, $(section)))
53gz-sec = $(foreach section, $(1), $(patsubst %,$(obj)/kernel-%.gz, $(section))) 56gz-sec = $(foreach section, $(1), $(patsubst %,$(obj)/kernel-%.gz, $(section)))
54 57
55hostprogs-y := addnote addRamDisk 58hostprogs-y := addnote addRamDisk
56targets += zImage zImage.initrd imagesize.c \ 59targets += zImage.vmode zImage.initrd.vmode zImage zImage.initrd imagesize.c \
57 $(patsubst $(obj)/%,%, $(call obj-sec, $(required) $(initrd))) \ 60 $(patsubst $(obj)/%,%, $(call obj-sec, $(required) $(initrd))) \
58 $(patsubst $(obj)/%,%, $(call src-sec, $(required) $(initrd))) \ 61 $(patsubst $(obj)/%,%, $(call src-sec, $(required) $(initrd))) \
59 $(patsubst $(obj)/%,%, $(call gz-sec, $(required) $(initrd))) \ 62 $(patsubst $(obj)/%,%, $(call gz-sec, $(required) $(initrd))) \
@@ -63,7 +66,7 @@ extra-y := initrd.o
63quiet_cmd_ramdisk = RAMDISK $@ 66quiet_cmd_ramdisk = RAMDISK $@
64 cmd_ramdisk = $(obj)/addRamDisk $(obj)/ramdisk.image.gz $< $@ 67 cmd_ramdisk = $(obj)/addRamDisk $(obj)/ramdisk.image.gz $< $@
65 68
66quiet_cmd_stripvm = STRIP $@ 69quiet_cmd_stripvm = STRIP $@
67 cmd_stripvm = $(STRIP) -s $< -o $@ 70 cmd_stripvm = $(STRIP) -s $< -o $@
68 71
69vmlinux.strip: vmlinux FORCE 72vmlinux.strip: vmlinux FORCE
@@ -71,12 +74,20 @@ vmlinux.strip: vmlinux FORCE
71$(obj)/vmlinux.initrd: vmlinux.strip $(obj)/addRamDisk $(obj)/ramdisk.image.gz FORCE 74$(obj)/vmlinux.initrd: vmlinux.strip $(obj)/addRamDisk $(obj)/ramdisk.image.gz FORCE
72 $(call if_changed,ramdisk) 75 $(call if_changed,ramdisk)
73 76
74addsection = $(CROSS32OBJCOPY) $(1) \ 77quiet_cmd_addsection = ADDSEC $@
75 --add-section=.kernel:$(strip $(patsubst $(obj)/kernel-%.o,%, $(1)))=$(patsubst %.o,%.gz, $(1)) \ 78 cmd_addsection = $(CROSS32OBJCOPY) $@ \
76 --set-section-flags=.kernel:$(strip $(patsubst $(obj)/kernel-%.o,%, $(1)))=$(OBJCOPYFLAGS) 79 --add-section=.kernel:$(strip $(patsubst $(obj)/kernel-%.o,%, $@))=$(patsubst %.o,%.gz, $@) \
80 --set-section-flags=.kernel:$(strip $(patsubst $(obj)/kernel-%.o,%, $@))=$(OBJCOPYFLAGS)
81
82quiet_cmd_imagesize = GENSIZE $@
83 cmd_imagesize = ls -l vmlinux.strip | \
84 awk '{printf "/* generated -- do not edit! */\n" "unsigned long vmlinux_filesize = %d;\n", $$5}' \
85 > $(obj)/imagesize.c && \
86 $(CROSS_COMPILE)nm -n vmlinux | tail -n 1 | \
87 awk '{printf "unsigned long vmlinux_memsize = 0x%s;\n", substr($$1,8)}' >> $(obj)/imagesize.c
77 88
78quiet_cmd_addnote = ADDNOTE $@ 89quiet_cmd_addnote = ADDNOTE $@
79 cmd_addnote = $(CROSS32LD) $(BOOTLFLAGS) -o $@ $(obj-boot) && $(obj)/addnote $@ 90 cmd_addnote = $(obj)/addnote $@
80 91
81$(call gz-sec, $(required)): $(obj)/kernel-%.gz: % FORCE 92$(call gz-sec, $(required)): $(obj)/kernel-%.gz: % FORCE
82 $(call if_changed,gzip) 93 $(call if_changed,gzip)
@@ -85,28 +96,30 @@ $(obj)/kernel-initrd.gz: $(obj)/ramdisk.image.gz
85 cp -f $(obj)/ramdisk.image.gz $@ 96 cp -f $(obj)/ramdisk.image.gz $@
86 97
87$(call src-sec, $(required) $(initrd)): $(obj)/kernel-%.c: $(obj)/kernel-%.gz FORCE 98$(call src-sec, $(required) $(initrd)): $(obj)/kernel-%.c: $(obj)/kernel-%.gz FORCE
88 touch $@ 99 @touch $@
89 100
90$(call obj-sec, $(required) $(initrd)): $(obj)/kernel-%.o: $(obj)/kernel-%.c FORCE 101$(call obj-sec, $(required) $(initrd)): $(obj)/kernel-%.o: $(obj)/kernel-%.c FORCE
91 $(call if_changed_dep,bootcc) 102 $(call if_changed_dep,bootcc)
92 $(call addsection, $@) 103 $(call cmd,addsection)
104
105$(obj)/zImage.vmode: obj-boot += $(call obj-sec, $(required))
106$(obj)/zImage.vmode: $(call obj-sec, $(required)) $(obj-boot) FORCE
107 $(call cmd,bootld,$(obj-boot))
108
109$(obj)/zImage.initrd.vmode: obj-boot += $(call obj-sec, $(required) $(initrd))
110$(obj)/zImage.initrd.vmode: $(call obj-sec, $(required) $(initrd)) $(obj-boot) FORCE
111 $(call cmd,bootld,$(obj-boot))
93 112
94$(obj)/zImage: obj-boot += $(call obj-sec, $(required)) 113$(obj)/zImage: $(obj)/zImage.vmode $(obj)/addnote FORCE
95$(obj)/zImage: $(call obj-sec, $(required)) $(obj-boot) $(obj)/addnote FORCE 114 @cp -f $< $@
96 $(call if_changed,addnote) 115 $(call if_changed,addnote)
97 116
98$(obj)/zImage.initrd: obj-boot += $(call obj-sec, $(required) $(initrd)) 117$(obj)/zImage.initrd: $(obj)/zImage.initrd.vmode $(obj)/addnote FORCE
99$(obj)/zImage.initrd: $(call obj-sec, $(required) $(initrd)) $(obj-boot) $(obj)/addnote FORCE 118 @cp -f $< $@
100 $(call if_changed,addnote) 119 $(call if_changed,addnote)
101 120
102$(obj)/imagesize.c: vmlinux.strip 121$(obj)/imagesize.c: vmlinux.strip
103 @echo Generating $@ 122 $(call cmd,imagesize)
104 ls -l vmlinux.strip | \
105 awk '{printf "/* generated -- do not edit! */\n" \
106 "unsigned long vmlinux_filesize = %d;\n", $$5}' > $(obj)/imagesize.c
107 $(CROSS_COMPILE)nm -n vmlinux | tail -n 1 | \
108 awk '{printf "unsigned long vmlinux_memsize = 0x%s;\n", substr($$1,8)}' \
109 >> $(obj)/imagesize.c
110 123
111install: $(CONFIGURE) $(BOOTIMAGE) 124install: $(CONFIGURE) $(BOOTIMAGE)
112 sh -x $(srctree)/$(src)/install.sh "$(KERNELRELEASE)" vmlinux System.map "$(INSTALL_PATH)" "$(BOOTIMAGE)" 125 sh -x $(srctree)/$(src)/install.sh "$(KERNELRELEASE)" vmlinux System.map "$(INSTALL_PATH)" "$(BOOTIMAGE)"
diff --git a/arch/ppc64/boot/addnote.c b/arch/ppc64/boot/addnote.c
index 719663a694bb..8041a9845ab7 100644
--- a/arch/ppc64/boot/addnote.c
+++ b/arch/ppc64/boot/addnote.c
@@ -157,7 +157,7 @@ main(int ac, char **av)
157 PUT_32BE(ns, strlen(arch) + 1); 157 PUT_32BE(ns, strlen(arch) + 1);
158 PUT_32BE(ns + 4, N_DESCR * 4); 158 PUT_32BE(ns + 4, N_DESCR * 4);
159 PUT_32BE(ns + 8, 0x1275); 159 PUT_32BE(ns + 8, 0x1275);
160 strcpy(&buf[ns + 12], arch); 160 strcpy((char *) &buf[ns + 12], arch);
161 ns += 12 + strlen(arch) + 1; 161 ns += 12 + strlen(arch) + 1;
162 for (i = 0; i < N_DESCR; ++i, ns += 4) 162 for (i = 0; i < N_DESCR; ++i, ns += 4)
163 PUT_32BE(ns, descr[i]); 163 PUT_32BE(ns, descr[i]);
@@ -172,7 +172,7 @@ main(int ac, char **av)
172 PUT_32BE(ns, strlen(rpaname) + 1); 172 PUT_32BE(ns, strlen(rpaname) + 1);
173 PUT_32BE(ns + 4, sizeof(rpanote)); 173 PUT_32BE(ns + 4, sizeof(rpanote));
174 PUT_32BE(ns + 8, 0x12759999); 174 PUT_32BE(ns + 8, 0x12759999);
175 strcpy(&buf[ns + 12], rpaname); 175 strcpy((char *) &buf[ns + 12], rpaname);
176 ns += 12 + ROUNDUP(strlen(rpaname) + 1); 176 ns += 12 + ROUNDUP(strlen(rpaname) + 1);
177 for (i = 0; i < N_RPA_DESCR; ++i, ns += 4) 177 for (i = 0; i < N_RPA_DESCR; ++i, ns += 4)
178 PUT_32BE(ns, rpanote[i]); 178 PUT_32BE(ns, rpanote[i]);
diff --git a/arch/ppc64/boot/crt0.S b/arch/ppc64/boot/crt0.S
index 04d3e74cd72f..3861e7f9cf19 100644
--- a/arch/ppc64/boot/crt0.S
+++ b/arch/ppc64/boot/crt0.S
@@ -9,7 +9,7 @@
9 * NOTE: this code runs in 32 bit mode and is packaged as ELF32. 9 * NOTE: this code runs in 32 bit mode and is packaged as ELF32.
10 */ 10 */
11 11
12#include <asm/ppc_asm.h> 12#include "ppc_asm.h"
13 13
14 .text 14 .text
15 .globl _start 15 .globl _start
diff --git a/arch/ppc64/boot/div64.S b/arch/ppc64/boot/div64.S
index 38f7e466d7d6..722f360a32a9 100644
--- a/arch/ppc64/boot/div64.S
+++ b/arch/ppc64/boot/div64.S
@@ -13,7 +13,7 @@
13 * as published by the Free Software Foundation; either version 13 * as published by the Free Software Foundation; either version
14 * 2 of the License, or (at your option) any later version. 14 * 2 of the License, or (at your option) any later version.
15 */ 15 */
16#include <asm/ppc_asm.h> 16#include "ppc_asm.h"
17 17
18 .globl __div64_32 18 .globl __div64_32
19__div64_32: 19__div64_32:
diff --git a/arch/ppc64/boot/elf.h b/arch/ppc64/boot/elf.h
new file mode 100644
index 000000000000..d4828fcf1cb9
--- /dev/null
+++ b/arch/ppc64/boot/elf.h
@@ -0,0 +1,149 @@
1#ifndef _PPC_BOOT_ELF_H_
2#define _PPC_BOOT_ELF_H_
3
4/* 32-bit ELF base types. */
5typedef unsigned int Elf32_Addr;
6typedef unsigned short Elf32_Half;
7typedef unsigned int Elf32_Off;
8typedef signed int Elf32_Sword;
9typedef unsigned int Elf32_Word;
10
11/* 64-bit ELF base types. */
12typedef unsigned long long Elf64_Addr;
13typedef unsigned short Elf64_Half;
14typedef signed short Elf64_SHalf;
15typedef unsigned long long Elf64_Off;
16typedef signed int Elf64_Sword;
17typedef unsigned int Elf64_Word;
18typedef unsigned long long Elf64_Xword;
19typedef signed long long Elf64_Sxword;
20
21/* These constants are for the segment types stored in the image headers */
22#define PT_NULL 0
23#define PT_LOAD 1
24#define PT_DYNAMIC 2
25#define PT_INTERP 3
26#define PT_NOTE 4
27#define PT_SHLIB 5
28#define PT_PHDR 6
29#define PT_TLS 7 /* Thread local storage segment */
30#define PT_LOOS 0x60000000 /* OS-specific */
31#define PT_HIOS 0x6fffffff /* OS-specific */
32#define PT_LOPROC 0x70000000
33#define PT_HIPROC 0x7fffffff
34#define PT_GNU_EH_FRAME 0x6474e550
35
36#define PT_GNU_STACK (PT_LOOS + 0x474e551)
37
38/* These constants define the different elf file types */
39#define ET_NONE 0
40#define ET_REL 1
41#define ET_EXEC 2
42#define ET_DYN 3
43#define ET_CORE 4
44#define ET_LOPROC 0xff00
45#define ET_HIPROC 0xffff
46
47/* These constants define the various ELF target machines */
48#define EM_NONE 0
49#define EM_PPC 20 /* PowerPC */
50#define EM_PPC64 21 /* PowerPC64 */
51
52#define EI_NIDENT 16
53
54typedef struct elf32_hdr {
55 unsigned char e_ident[EI_NIDENT];
56 Elf32_Half e_type;
57 Elf32_Half e_machine;
58 Elf32_Word e_version;
59 Elf32_Addr e_entry; /* Entry point */
60 Elf32_Off e_phoff;
61 Elf32_Off e_shoff;
62 Elf32_Word e_flags;
63 Elf32_Half e_ehsize;
64 Elf32_Half e_phentsize;
65 Elf32_Half e_phnum;
66 Elf32_Half e_shentsize;
67 Elf32_Half e_shnum;
68 Elf32_Half e_shstrndx;
69} Elf32_Ehdr;
70
71typedef struct elf64_hdr {
72 unsigned char e_ident[16]; /* ELF "magic number" */
73 Elf64_Half e_type;
74 Elf64_Half e_machine;
75 Elf64_Word e_version;
76 Elf64_Addr e_entry; /* Entry point virtual address */
77 Elf64_Off e_phoff; /* Program header table file offset */
78 Elf64_Off e_shoff; /* Section header table file offset */
79 Elf64_Word e_flags;
80 Elf64_Half e_ehsize;
81 Elf64_Half e_phentsize;
82 Elf64_Half e_phnum;
83 Elf64_Half e_shentsize;
84 Elf64_Half e_shnum;
85 Elf64_Half e_shstrndx;
86} Elf64_Ehdr;
87
88/* These constants define the permissions on sections in the program
89 header, p_flags. */
90#define PF_R 0x4
91#define PF_W 0x2
92#define PF_X 0x1
93
94typedef struct elf32_phdr {
95 Elf32_Word p_type;
96 Elf32_Off p_offset;
97 Elf32_Addr p_vaddr;
98 Elf32_Addr p_paddr;
99 Elf32_Word p_filesz;
100 Elf32_Word p_memsz;
101 Elf32_Word p_flags;
102 Elf32_Word p_align;
103} Elf32_Phdr;
104
105typedef struct elf64_phdr {
106 Elf64_Word p_type;
107 Elf64_Word p_flags;
108 Elf64_Off p_offset; /* Segment file offset */
109 Elf64_Addr p_vaddr; /* Segment virtual address */
110 Elf64_Addr p_paddr; /* Segment physical address */
111 Elf64_Xword p_filesz; /* Segment size in file */
112 Elf64_Xword p_memsz; /* Segment size in memory */
113 Elf64_Xword p_align; /* Segment alignment, file & memory */
114} Elf64_Phdr;
115
116#define EI_MAG0 0 /* e_ident[] indexes */
117#define EI_MAG1 1
118#define EI_MAG2 2
119#define EI_MAG3 3
120#define EI_CLASS 4
121#define EI_DATA 5
122#define EI_VERSION 6
123#define EI_OSABI 7
124#define EI_PAD 8
125
126#define ELFMAG0 0x7f /* EI_MAG */
127#define ELFMAG1 'E'
128#define ELFMAG2 'L'
129#define ELFMAG3 'F'
130#define ELFMAG "\177ELF"
131#define SELFMAG 4
132
133#define ELFCLASSNONE 0 /* EI_CLASS */
134#define ELFCLASS32 1
135#define ELFCLASS64 2
136#define ELFCLASSNUM 3
137
138#define ELFDATANONE 0 /* e_ident[EI_DATA] */
139#define ELFDATA2LSB 1
140#define ELFDATA2MSB 2
141
142#define EV_NONE 0 /* e_version, EI_VERSION */
143#define EV_CURRENT 1
144#define EV_NUM 2
145
146#define ELFOSABI_NONE 0
147#define ELFOSABI_LINUX 3
148
149#endif /* _PPC_BOOT_ELF_H_ */
diff --git a/arch/ppc64/boot/main.c b/arch/ppc64/boot/main.c
index 199d9804f61c..f7ec19a2d0b0 100644
--- a/arch/ppc64/boot/main.c
+++ b/arch/ppc64/boot/main.c
@@ -8,38 +8,32 @@
8 * as published by the Free Software Foundation; either version 8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version. 9 * 2 of the License, or (at your option) any later version.
10 */ 10 */
11#include "ppc32-types.h" 11#include <stdarg.h>
12#include <stddef.h>
13#include "elf.h"
14#include "page.h"
15#include "string.h"
16#include "stdio.h"
17#include "prom.h"
12#include "zlib.h" 18#include "zlib.h"
13#include <linux/elf.h> 19
14#include <linux/string.h> 20static void gunzip(void *, int, unsigned char *, int *);
15#include <asm/processor.h> 21extern void flush_cache(void *, unsigned long);
16#include <asm/page.h> 22
17
18extern void *finddevice(const char *);
19extern int getprop(void *, const char *, void *, int);
20extern void printf(const char *fmt, ...);
21extern int sprintf(char *buf, const char *fmt, ...);
22void gunzip(void *, int, unsigned char *, int *);
23void *claim(unsigned int, unsigned int, unsigned int);
24void flush_cache(void *, unsigned long);
25void pause(void);
26extern void exit(void);
27
28unsigned long strlen(const char *s);
29void *memmove(void *dest, const void *src, unsigned long n);
30void *memcpy(void *dest, const void *src, unsigned long n);
31 23
32/* Value picked to match that used by yaboot */ 24/* Value picked to match that used by yaboot */
33#define PROG_START 0x01400000 25#define PROG_START 0x01400000
34#define RAM_END (256<<20) // Fixme: use OF */ 26#define RAM_END (512<<20) // Fixme: use OF */
27#define ONE_MB 0x100000
35 28
36char *avail_ram; 29static char *avail_ram;
37char *begin_avail, *end_avail; 30static char *begin_avail, *end_avail;
38char *avail_high; 31static char *avail_high;
39unsigned int heap_use; 32static unsigned int heap_use;
40unsigned int heap_max; 33static unsigned int heap_max;
41 34
42extern char _start[]; 35extern char _start[];
36extern char _end[];
43extern char _vmlinux_start[]; 37extern char _vmlinux_start[];
44extern char _vmlinux_end[]; 38extern char _vmlinux_end[];
45extern char _initrd_start[]; 39extern char _initrd_start[];
@@ -52,9 +46,9 @@ struct addr_range {
52 unsigned long size; 46 unsigned long size;
53 unsigned long memsize; 47 unsigned long memsize;
54}; 48};
55struct addr_range vmlinux = {0, 0, 0}; 49static struct addr_range vmlinux = {0, 0, 0};
56struct addr_range vmlinuz = {0, 0, 0}; 50static struct addr_range vmlinuz = {0, 0, 0};
57struct addr_range initrd = {0, 0, 0}; 51static struct addr_range initrd = {0, 0, 0};
58 52
59static char scratch[128<<10]; /* 128kB of scratch space for gunzip */ 53static char scratch[128<<10]; /* 128kB of scratch space for gunzip */
60 54
@@ -64,22 +58,15 @@ typedef void (*kernel_entry_t)( unsigned long,
64 void *); 58 void *);
65 59
66 60
67int (*prom)(void *);
68
69void *chosen_handle;
70void *stdin;
71void *stdout;
72void *stderr;
73
74#undef DEBUG 61#undef DEBUG
75 62
76static unsigned long claim_base = PROG_START; 63static unsigned long claim_base;
77 64
78static unsigned long try_claim(unsigned long size) 65static unsigned long try_claim(unsigned long size)
79{ 66{
80 unsigned long addr = 0; 67 unsigned long addr = 0;
81 68
82 for(; claim_base < RAM_END; claim_base += 0x100000) { 69 for(; claim_base < RAM_END; claim_base += ONE_MB) {
83#ifdef DEBUG 70#ifdef DEBUG
84 printf(" trying: 0x%08lx\n\r", claim_base); 71 printf(" trying: 0x%08lx\n\r", claim_base);
85#endif 72#endif
@@ -110,7 +97,26 @@ void start(unsigned long a1, unsigned long a2, void *promptr)
110 if (getprop(chosen_handle, "stdin", &stdin, sizeof(stdin)) != 4) 97 if (getprop(chosen_handle, "stdin", &stdin, sizeof(stdin)) != 4)
111 exit(); 98 exit();
112 99
113 printf("\n\rzImage starting: loaded at 0x%x\n\r", (unsigned)_start); 100 printf("\n\rzImage starting: loaded at 0x%lx\n\r", (unsigned long) _start);
101
102 /*
103 * The first available claim_base must be above the end of the
104 * the loaded kernel wrapper file (_start to _end includes the
105 * initrd image if it is present) and rounded up to a nice
106 * 1 MB boundary for good measure.
107 */
108
109 claim_base = _ALIGN_UP((unsigned long)_end, ONE_MB);
110
111#if defined(PROG_START)
112 /*
113 * Maintain a "magic" minimum address. This keeps some older
114 * firmware platforms running.
115 */
116
117 if (claim_base < PROG_START)
118 claim_base = PROG_START;
119#endif
114 120
115 /* 121 /*
116 * Now we try to claim some memory for the kernel itself 122 * Now we try to claim some memory for the kernel itself
@@ -120,7 +126,7 @@ void start(unsigned long a1, unsigned long a2, void *promptr)
120 * size... In practice we add 1Mb, that is enough, but we should really 126 * size... In practice we add 1Mb, that is enough, but we should really
121 * consider fixing the Makefile to put a _raw_ kernel in there ! 127 * consider fixing the Makefile to put a _raw_ kernel in there !
122 */ 128 */
123 vmlinux_memsize += 0x100000; 129 vmlinux_memsize += ONE_MB;
124 printf("Allocating 0x%lx bytes for kernel ...\n\r", vmlinux_memsize); 130 printf("Allocating 0x%lx bytes for kernel ...\n\r", vmlinux_memsize);
125 vmlinux.addr = try_claim(vmlinux_memsize); 131 vmlinux.addr = try_claim(vmlinux_memsize);
126 if (vmlinux.addr == 0) { 132 if (vmlinux.addr == 0) {
@@ -277,7 +283,7 @@ void zfree(void *x, void *addr, unsigned nb)
277 283
278#define DEFLATED 8 284#define DEFLATED 8
279 285
280void gunzip(void *dst, int dstlen, unsigned char *src, int *lenp) 286static void gunzip(void *dst, int dstlen, unsigned char *src, int *lenp)
281{ 287{
282 z_stream s; 288 z_stream s;
283 int r, i, flags; 289 int r, i, flags;
diff --git a/arch/ppc64/boot/page.h b/arch/ppc64/boot/page.h
new file mode 100644
index 000000000000..14eca30fef64
--- /dev/null
+++ b/arch/ppc64/boot/page.h
@@ -0,0 +1,34 @@
1#ifndef _PPC_BOOT_PAGE_H
2#define _PPC_BOOT_PAGE_H
3/*
4 * Copyright (C) 2001 PPC64 Team, IBM Corp
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11
12#ifdef __ASSEMBLY__
13#define ASM_CONST(x) x
14#else
15#define __ASM_CONST(x) x##UL
16#define ASM_CONST(x) __ASM_CONST(x)
17#endif
18
19/* PAGE_SHIFT determines the page size */
20#define PAGE_SHIFT 12
21#define PAGE_SIZE (ASM_CONST(1) << PAGE_SHIFT)
22#define PAGE_MASK (~(PAGE_SIZE-1))
23
24/* align addr on a size boundary - adjust address up/down if needed */
25#define _ALIGN_UP(addr,size) (((addr)+((size)-1))&(~((size)-1)))
26#define _ALIGN_DOWN(addr,size) ((addr)&(~((size)-1)))
27
28/* align addr on a size boundary - adjust address up if needed */
29#define _ALIGN(addr,size) _ALIGN_UP(addr,size)
30
31/* to align the pointer to the (next) page boundary */
32#define PAGE_ALIGN(addr) _ALIGN(addr, PAGE_SIZE)
33
34#endif /* _PPC_BOOT_PAGE_H */
diff --git a/arch/ppc64/boot/ppc32-types.h b/arch/ppc64/boot/ppc32-types.h
deleted file mode 100644
index f7b8884f8f70..000000000000
--- a/arch/ppc64/boot/ppc32-types.h
+++ /dev/null
@@ -1,36 +0,0 @@
1#ifndef _PPC64_TYPES_H
2#define _PPC64_TYPES_H
3
4typedef __signed__ char __s8;
5typedef unsigned char __u8;
6
7typedef __signed__ short __s16;
8typedef unsigned short __u16;
9
10typedef __signed__ int __s32;
11typedef unsigned int __u32;
12
13typedef __signed__ long long __s64;
14typedef unsigned long long __u64;
15
16typedef signed char s8;
17typedef unsigned char u8;
18
19typedef signed short s16;
20typedef unsigned short u16;
21
22typedef signed int s32;
23typedef unsigned int u32;
24
25typedef signed long long s64;
26typedef unsigned long long u64;
27
28typedef struct {
29 __u32 u[4];
30} __attribute((aligned(16))) __vector128;
31
32#define BITS_PER_LONG 32
33
34typedef __vector128 vector128;
35
36#endif /* _PPC64_TYPES_H */
diff --git a/arch/ppc64/boot/ppc_asm.h b/arch/ppc64/boot/ppc_asm.h
new file mode 100644
index 000000000000..1c2c2817f9b7
--- /dev/null
+++ b/arch/ppc64/boot/ppc_asm.h
@@ -0,0 +1,62 @@
1#ifndef _PPC64_PPC_ASM_H
2#define _PPC64_PPC_ASM_H
3/*
4 *
5 * Definitions used by various bits of low-level assembly code on PowerPC.
6 *
7 * Copyright (C) 1995-1999 Gary Thomas, Paul Mackerras, Cort Dougan.
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; either version
12 * 2 of the License, or (at your option) any later version.
13 */
14
15/* Condition Register Bit Fields */
16
17#define cr0 0
18#define cr1 1
19#define cr2 2
20#define cr3 3
21#define cr4 4
22#define cr5 5
23#define cr6 6
24#define cr7 7
25
26
27/* General Purpose Registers (GPRs) */
28
29#define r0 0
30#define r1 1
31#define r2 2
32#define r3 3
33#define r4 4
34#define r5 5
35#define r6 6
36#define r7 7
37#define r8 8
38#define r9 9
39#define r10 10
40#define r11 11
41#define r12 12
42#define r13 13
43#define r14 14
44#define r15 15
45#define r16 16
46#define r17 17
47#define r18 18
48#define r19 19
49#define r20 20
50#define r21 21
51#define r22 22
52#define r23 23
53#define r24 24
54#define r25 25
55#define r26 26
56#define r27 27
57#define r28 28
58#define r29 29
59#define r30 30
60#define r31 31
61
62#endif /* _PPC64_PPC_ASM_H */
diff --git a/arch/ppc64/boot/prom.c b/arch/ppc64/boot/prom.c
index 5e48b80ff5a0..4bea2f4dcb06 100644
--- a/arch/ppc64/boot/prom.c
+++ b/arch/ppc64/boot/prom.c
@@ -7,43 +7,19 @@
7 * 2 of the License, or (at your option) any later version. 7 * 2 of the License, or (at your option) any later version.
8 */ 8 */
9#include <stdarg.h> 9#include <stdarg.h>
10#include <linux/types.h> 10#include <stddef.h>
11#include <linux/string.h> 11#include "string.h"
12#include <linux/ctype.h> 12#include "stdio.h"
13 13#include "prom.h"
14extern __u32 __div64_32(unsigned long long *dividend, __u32 divisor);
15
16/* The unnecessary pointer compare is there
17 * to check for type safety (n must be 64bit)
18 */
19# define do_div(n,base) ({ \
20 __u32 __base = (base); \
21 __u32 __rem; \
22 (void)(((typeof((n)) *)0) == ((unsigned long long *)0)); \
23 if (((n) >> 32) == 0) { \
24 __rem = (__u32)(n) % __base; \
25 (n) = (__u32)(n) / __base; \
26 } else \
27 __rem = __div64_32(&(n), __base); \
28 __rem; \
29 })
30 14
31int (*prom)(void *); 15int (*prom)(void *);
32 16
33void *chosen_handle; 17void *chosen_handle;
18
34void *stdin; 19void *stdin;
35void *stdout; 20void *stdout;
36void *stderr; 21void *stderr;
37 22
38void exit(void);
39void *finddevice(const char *name);
40int getprop(void *phandle, const char *name, void *buf, int buflen);
41void chrpboot(int a1, int a2, void *prom); /* in main.c */
42
43int printf(char *fmt, ...);
44
45/* there is no convenient header to get this from... -- paulus */
46extern unsigned long strlen(const char *);
47 23
48int 24int
49write(void *handle, void *ptr, int nb) 25write(void *handle, void *ptr, int nb)
@@ -210,107 +186,6 @@ fputs(char *str, void *f)
210 return write(f, str, n) == n? 0: -1; 186 return write(f, str, n) == n? 0: -1;
211} 187}
212 188
213int
214readchar(void)
215{
216 char ch;
217
218 for (;;) {
219 switch (read(stdin, &ch, 1)) {
220 case 1:
221 return ch;
222 case -1:
223 printf("read(stdin) returned -1\r\n");
224 return -1;
225 }
226 }
227}
228
229static char line[256];
230static char *lineptr;
231static int lineleft;
232
233int
234getchar(void)
235{
236 int c;
237
238 if (lineleft == 0) {
239 lineptr = line;
240 for (;;) {
241 c = readchar();
242 if (c == -1 || c == 4)
243 break;
244 if (c == '\r' || c == '\n') {
245 *lineptr++ = '\n';
246 putchar('\n');
247 break;
248 }
249 switch (c) {
250 case 0177:
251 case '\b':
252 if (lineptr > line) {
253 putchar('\b');
254 putchar(' ');
255 putchar('\b');
256 --lineptr;
257 }
258 break;
259 case 'U' & 0x1F:
260 while (lineptr > line) {
261 putchar('\b');
262 putchar(' ');
263 putchar('\b');
264 --lineptr;
265 }
266 break;
267 default:
268 if (lineptr >= &line[sizeof(line) - 1])
269 putchar('\a');
270 else {
271 putchar(c);
272 *lineptr++ = c;
273 }
274 }
275 }
276 lineleft = lineptr - line;
277 lineptr = line;
278 }
279 if (lineleft == 0)
280 return -1;
281 --lineleft;
282 return *lineptr++;
283}
284
285
286
287/* String functions lifted from lib/vsprintf.c and lib/ctype.c */
288unsigned char _ctype[] = {
289_C,_C,_C,_C,_C,_C,_C,_C, /* 0-7 */
290_C,_C|_S,_C|_S,_C|_S,_C|_S,_C|_S,_C,_C, /* 8-15 */
291_C,_C,_C,_C,_C,_C,_C,_C, /* 16-23 */
292_C,_C,_C,_C,_C,_C,_C,_C, /* 24-31 */
293_S|_SP,_P,_P,_P,_P,_P,_P,_P, /* 32-39 */
294_P,_P,_P,_P,_P,_P,_P,_P, /* 40-47 */
295_D,_D,_D,_D,_D,_D,_D,_D, /* 48-55 */
296_D,_D,_P,_P,_P,_P,_P,_P, /* 56-63 */
297_P,_U|_X,_U|_X,_U|_X,_U|_X,_U|_X,_U|_X,_U, /* 64-71 */
298_U,_U,_U,_U,_U,_U,_U,_U, /* 72-79 */
299_U,_U,_U,_U,_U,_U,_U,_U, /* 80-87 */
300_U,_U,_U,_P,_P,_P,_P,_P, /* 88-95 */
301_P,_L|_X,_L|_X,_L|_X,_L|_X,_L|_X,_L|_X,_L, /* 96-103 */
302_L,_L,_L,_L,_L,_L,_L,_L, /* 104-111 */
303_L,_L,_L,_L,_L,_L,_L,_L, /* 112-119 */
304_L,_L,_L,_P,_P,_P,_P,_C, /* 120-127 */
3050,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 128-143 */
3060,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 144-159 */
307_S|_SP,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P, /* 160-175 */
308_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P, /* 176-191 */
309_U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U, /* 192-207 */
310_U,_U,_U,_U,_U,_U,_U,_P,_U,_U,_U,_U,_U,_U,_U,_L, /* 208-223 */
311_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L, /* 224-239 */
312_L,_L,_L,_L,_L,_L,_L,_P,_L,_L,_L,_L,_L,_L,_L,_L}; /* 240-255 */
313
314size_t strnlen(const char * s, size_t count) 189size_t strnlen(const char * s, size_t count)
315{ 190{
316 const char *sc; 191 const char *sc;
@@ -320,44 +195,30 @@ size_t strnlen(const char * s, size_t count)
320 return sc - s; 195 return sc - s;
321} 196}
322 197
323unsigned long simple_strtoul(const char *cp,char **endp,unsigned int base) 198extern unsigned int __div64_32(unsigned long long *dividend,
324{ 199 unsigned int divisor);
325 unsigned long result = 0,value;
326 200
327 if (!base) { 201/* The unnecessary pointer compare is there
328 base = 10; 202 * to check for type safety (n must be 64bit)
329 if (*cp == '0') { 203 */
330 base = 8; 204# define do_div(n,base) ({ \
331 cp++; 205 unsigned int __base = (base); \
332 if ((*cp == 'x') && isxdigit(cp[1])) { 206 unsigned int __rem; \
333 cp++; 207 (void)(((typeof((n)) *)0) == ((unsigned long long *)0)); \
334 base = 16; 208 if (((n) >> 32) == 0) { \
335 } 209 __rem = (unsigned int)(n) % __base; \
336 } 210 (n) = (unsigned int)(n) / __base; \
337 } 211 } else \
338 while (isxdigit(*cp) && 212 __rem = __div64_32(&(n), __base); \
339 (value = isdigit(*cp) ? *cp-'0' : toupper(*cp)-'A'+10) < base) { 213 __rem; \
340 result = result*base + value; 214 })
341 cp++;
342 }
343 if (endp)
344 *endp = (char *)cp;
345 return result;
346}
347
348long simple_strtol(const char *cp,char **endp,unsigned int base)
349{
350 if(*cp=='-')
351 return -simple_strtoul(cp+1,endp,base);
352 return simple_strtoul(cp,endp,base);
353}
354 215
355static int skip_atoi(const char **s) 216static int skip_atoi(const char **s)
356{ 217{
357 int i=0; 218 int i, c;
358 219
359 while (isdigit(**s)) 220 for (i = 0; '0' <= (c = **s) && c <= '9'; ++*s)
360 i = i*10 + *((*s)++) - '0'; 221 i = i*10 + c - '0';
361 return i; 222 return i;
362} 223}
363 224
@@ -436,9 +297,6 @@ static char * number(char * str, unsigned long long num, int base, int size, int
436 return str; 297 return str;
437} 298}
438 299
439/* Forward decl. needed for IP address printing stuff... */
440int sprintf(char * buf, const char *fmt, ...);
441
442int vsprintf(char *buf, const char *fmt, va_list args) 300int vsprintf(char *buf, const char *fmt, va_list args)
443{ 301{
444 int len; 302 int len;
@@ -477,7 +335,7 @@ int vsprintf(char *buf, const char *fmt, va_list args)
477 335
478 /* get field width */ 336 /* get field width */
479 field_width = -1; 337 field_width = -1;
480 if (isdigit(*fmt)) 338 if ('0' <= *fmt && *fmt <= '9')
481 field_width = skip_atoi(&fmt); 339 field_width = skip_atoi(&fmt);
482 else if (*fmt == '*') { 340 else if (*fmt == '*') {
483 ++fmt; 341 ++fmt;
@@ -493,7 +351,7 @@ int vsprintf(char *buf, const char *fmt, va_list args)
493 precision = -1; 351 precision = -1;
494 if (*fmt == '.') { 352 if (*fmt == '.') {
495 ++fmt; 353 ++fmt;
496 if (isdigit(*fmt)) 354 if ('0' <= *fmt && *fmt <= '9')
497 precision = skip_atoi(&fmt); 355 precision = skip_atoi(&fmt);
498 else if (*fmt == '*') { 356 else if (*fmt == '*') {
499 ++fmt; 357 ++fmt;
@@ -628,7 +486,7 @@ int sprintf(char * buf, const char *fmt, ...)
628static char sprint_buf[1024]; 486static char sprint_buf[1024];
629 487
630int 488int
631printf(char *fmt, ...) 489printf(const char *fmt, ...)
632{ 490{
633 va_list args; 491 va_list args;
634 int n; 492 int n;
diff --git a/arch/ppc64/boot/prom.h b/arch/ppc64/boot/prom.h
new file mode 100644
index 000000000000..96ab5aec740c
--- /dev/null
+++ b/arch/ppc64/boot/prom.h
@@ -0,0 +1,18 @@
1#ifndef _PPC_BOOT_PROM_H_
2#define _PPC_BOOT_PROM_H_
3
4extern int (*prom) (void *);
5extern void *chosen_handle;
6
7extern void *stdin;
8extern void *stdout;
9extern void *stderr;
10
11extern int write(void *handle, void *ptr, int nb);
12extern int read(void *handle, void *ptr, int nb);
13extern void exit(void);
14extern void pause(void);
15extern void *finddevice(const char *);
16extern void *claim(unsigned long virt, unsigned long size, unsigned long align);
17extern int getprop(void *phandle, const char *name, void *buf, int buflen);
18#endif /* _PPC_BOOT_PROM_H_ */
diff --git a/arch/ppc64/boot/stdio.h b/arch/ppc64/boot/stdio.h
new file mode 100644
index 000000000000..24bd3a8dee94
--- /dev/null
+++ b/arch/ppc64/boot/stdio.h
@@ -0,0 +1,16 @@
1#ifndef _PPC_BOOT_STDIO_H_
2#define _PPC_BOOT_STDIO_H_
3
4extern int printf(const char *fmt, ...);
5
6extern int sprintf(char *buf, const char *fmt, ...);
7
8extern int vsprintf(char *buf, const char *fmt, va_list args);
9
10extern int putc(int c, void *f);
11extern int putchar(int c);
12extern int getchar(void);
13
14extern int fputs(char *str, void *f);
15
16#endif /* _PPC_BOOT_STDIO_H_ */
diff --git a/arch/ppc64/boot/string.S b/arch/ppc64/boot/string.S
index ba5f2d21c9ea..7ade87ae7718 100644
--- a/arch/ppc64/boot/string.S
+++ b/arch/ppc64/boot/string.S
@@ -9,7 +9,7 @@
9 * NOTE: this code runs in 32 bit mode and is packaged as ELF32. 9 * NOTE: this code runs in 32 bit mode and is packaged as ELF32.
10 */ 10 */
11 11
12#include <asm/ppc_asm.h> 12#include "ppc_asm.h"
13 13
14 .text 14 .text
15 .globl strcpy 15 .globl strcpy
diff --git a/arch/ppc64/boot/string.h b/arch/ppc64/boot/string.h
new file mode 100644
index 000000000000..9289258bcbd6
--- /dev/null
+++ b/arch/ppc64/boot/string.h
@@ -0,0 +1,16 @@
1#ifndef _PPC_BOOT_STRING_H_
2#define _PPC_BOOT_STRING_H_
3
4extern char *strcpy(char *dest, const char *src);
5extern char *strncpy(char *dest, const char *src, size_t n);
6extern char *strcat(char *dest, const char *src);
7extern int strcmp(const char *s1, const char *s2);
8extern size_t strlen(const char *s);
9extern size_t strnlen(const char *s, size_t count);
10
11extern void *memset(void *s, int c, size_t n);
12extern void *memmove(void *dest, const void *src, unsigned long n);
13extern void *memcpy(void *dest, const void *src, unsigned long n);
14extern int memcmp(const void *s1, const void *s2, size_t n);
15
16#endif /* _PPC_BOOT_STRING_H_ */
diff --git a/arch/ppc64/boot/zlib.c b/arch/ppc64/boot/zlib.c
index 78837e884b8b..0d910cd2079d 100644
--- a/arch/ppc64/boot/zlib.c
+++ b/arch/ppc64/boot/zlib.c
@@ -107,7 +107,7 @@ extern void *memcpy(void *, const void *, unsigned long);
107 107
108/* Diagnostic functions */ 108/* Diagnostic functions */
109#ifdef DEBUG_ZLIB 109#ifdef DEBUG_ZLIB
110# include <stdio.h> 110# include "stdio.h"
111# ifndef verbose 111# ifndef verbose
112# define verbose 0 112# define verbose 0
113# endif 113# endif
diff --git a/arch/ppc64/configs/g5_defconfig b/arch/ppc64/configs/g5_defconfig
index ab567741e80e..fc83d9330282 100644
--- a/arch/ppc64/configs/g5_defconfig
+++ b/arch/ppc64/configs/g5_defconfig
@@ -103,10 +103,10 @@ CONFIG_PREEMPT_NONE=y
103# CONFIG_PREEMPT_VOLUNTARY is not set 103# CONFIG_PREEMPT_VOLUNTARY is not set
104# CONFIG_PREEMPT is not set 104# CONFIG_PREEMPT is not set
105# CONFIG_PREEMPT_BKL is not set 105# CONFIG_PREEMPT_BKL is not set
106CONFIG_HZ_100=y 106# CONFIG_HZ_100 is not set
107# CONFIG_HZ_250 is not set 107CONFIG_HZ_250=y
108# CONFIG_HZ_1000 is not set 108# CONFIG_HZ_1000 is not set
109CONFIG_HZ=100 109CONFIG_HZ=250
110CONFIG_GENERIC_HARDIRQS=y 110CONFIG_GENERIC_HARDIRQS=y
111CONFIG_SECCOMP=y 111CONFIG_SECCOMP=y
112CONFIG_ISA_DMA_API=y 112CONFIG_ISA_DMA_API=y
diff --git a/arch/ppc64/configs/iSeries_defconfig b/arch/ppc64/configs/iSeries_defconfig
index 394ba18b58c7..013d4e0e4003 100644
--- a/arch/ppc64/configs/iSeries_defconfig
+++ b/arch/ppc64/configs/iSeries_defconfig
@@ -94,12 +94,11 @@ CONFIG_PREEMPT_NONE=y
94# CONFIG_PREEMPT_VOLUNTARY is not set 94# CONFIG_PREEMPT_VOLUNTARY is not set
95# CONFIG_PREEMPT is not set 95# CONFIG_PREEMPT is not set
96# CONFIG_PREEMPT_BKL is not set 96# CONFIG_PREEMPT_BKL is not set
97CONFIG_HZ_100=y 97# CONFIG_HZ_100 is not set
98# CONFIG_HZ_250 is not set 98CONFIG_HZ_250=y
99# CONFIG_HZ_1000 is not set 99# CONFIG_HZ_1000 is not set
100CONFIG_HZ=100 100CONFIG_HZ=250
101CONFIG_GENERIC_HARDIRQS=y 101CONFIG_GENERIC_HARDIRQS=y
102CONFIG_MSCHUNKS=y
103CONFIG_LPARCFG=y 102CONFIG_LPARCFG=y
104CONFIG_SECCOMP=y 103CONFIG_SECCOMP=y
105CONFIG_ISA_DMA_API=y 104CONFIG_ISA_DMA_API=y
diff --git a/arch/ppc64/configs/maple_defconfig b/arch/ppc64/configs/maple_defconfig
index 2033fe663dbe..dd42892cd873 100644
--- a/arch/ppc64/configs/maple_defconfig
+++ b/arch/ppc64/configs/maple_defconfig
@@ -103,10 +103,10 @@ CONFIG_PREEMPT_NONE=y
103# CONFIG_PREEMPT_VOLUNTARY is not set 103# CONFIG_PREEMPT_VOLUNTARY is not set
104# CONFIG_PREEMPT is not set 104# CONFIG_PREEMPT is not set
105# CONFIG_PREEMPT_BKL is not set 105# CONFIG_PREEMPT_BKL is not set
106CONFIG_HZ_100=y 106# CONFIG_HZ_100 is not set
107# CONFIG_HZ_250 is not set 107CONFIG_HZ_250=y
108# CONFIG_HZ_1000 is not set 108# CONFIG_HZ_1000 is not set
109CONFIG_HZ=100 109CONFIG_HZ=250
110CONFIG_GENERIC_HARDIRQS=y 110CONFIG_GENERIC_HARDIRQS=y
111CONFIG_SECCOMP=y 111CONFIG_SECCOMP=y
112CONFIG_ISA_DMA_API=y 112CONFIG_ISA_DMA_API=y
diff --git a/arch/ppc64/configs/pSeries_defconfig b/arch/ppc64/configs/pSeries_defconfig
index 297fd5229487..29f7b80b0efc 100644
--- a/arch/ppc64/configs/pSeries_defconfig
+++ b/arch/ppc64/configs/pSeries_defconfig
@@ -112,10 +112,10 @@ CONFIG_PREEMPT_NONE=y
112# CONFIG_PREEMPT_VOLUNTARY is not set 112# CONFIG_PREEMPT_VOLUNTARY is not set
113# CONFIG_PREEMPT is not set 113# CONFIG_PREEMPT is not set
114# CONFIG_PREEMPT_BKL is not set 114# CONFIG_PREEMPT_BKL is not set
115CONFIG_HZ_100=y 115# CONFIG_HZ_100 is not set
116# CONFIG_HZ_250 is not set 116CONFIG_HZ_250=y
117# CONFIG_HZ_1000 is not set 117# CONFIG_HZ_1000 is not set
118CONFIG_HZ=100 118CONFIG_HZ=250
119CONFIG_EEH=y 119CONFIG_EEH=y
120CONFIG_GENERIC_HARDIRQS=y 120CONFIG_GENERIC_HARDIRQS=y
121CONFIG_PPC_RTAS=y 121CONFIG_PPC_RTAS=y
diff --git a/arch/ppc64/defconfig b/arch/ppc64/defconfig
index c361e7727b7a..7cb4750bb7a9 100644
--- a/arch/ppc64/defconfig
+++ b/arch/ppc64/defconfig
@@ -114,10 +114,10 @@ CONFIG_PREEMPT_NONE=y
114# CONFIG_PREEMPT_VOLUNTARY is not set 114# CONFIG_PREEMPT_VOLUNTARY is not set
115# CONFIG_PREEMPT is not set 115# CONFIG_PREEMPT is not set
116# CONFIG_PREEMPT_BKL is not set 116# CONFIG_PREEMPT_BKL is not set
117CONFIG_HZ_100=y 117# CONFIG_HZ_100 is not set
118# CONFIG_HZ_250 is not set 118CONFIG_HZ_250=y
119# CONFIG_HZ_1000 is not set 119# CONFIG_HZ_1000 is not set
120CONFIG_HZ=100 120CONFIG_HZ=250
121CONFIG_EEH=y 121CONFIG_EEH=y
122CONFIG_GENERIC_HARDIRQS=y 122CONFIG_GENERIC_HARDIRQS=y
123CONFIG_PPC_RTAS=y 123CONFIG_PPC_RTAS=y
diff --git a/arch/ppc64/kernel/LparData.c b/arch/ppc64/kernel/LparData.c
index 1c11031c838e..0a9c23ca2f0c 100644
--- a/arch/ppc64/kernel/LparData.c
+++ b/arch/ppc64/kernel/LparData.c
@@ -51,6 +51,17 @@ struct HvReleaseData hvReleaseData = {
51 0xf4, 0x4b, 0xf6, 0xf4 }, 51 0xf4, 0x4b, 0xf6, 0xf4 },
52}; 52};
53 53
54/*
55 * The NACA. The first dword of the naca is required by the iSeries
56 * hypervisor to point to itVpdAreas. The hypervisor finds the NACA
57 * through the pointer in hvReleaseData.
58 */
59struct naca_struct naca = {
60 .xItVpdAreas = &itVpdAreas,
61 .xRamDisk = 0,
62 .xRamDiskSize = 0,
63};
64
54extern void system_reset_iSeries(void); 65extern void system_reset_iSeries(void);
55extern void machine_check_iSeries(void); 66extern void machine_check_iSeries(void);
56extern void data_access_iSeries(void); 67extern void data_access_iSeries(void);
@@ -214,29 +225,3 @@ struct ItVpdAreas itVpdAreas = {
214 0,0 225 0,0
215 } 226 }
216}; 227};
217
218struct msChunks msChunks;
219EXPORT_SYMBOL(msChunks);
220
221/* Depending on whether this is called from iSeries or pSeries setup
222 * code, the location of the msChunks struct may or may not have
223 * to be reloc'd, so we force the caller to do that for us by passing
224 * in a pointer to the structure.
225 */
226unsigned long
227msChunks_alloc(unsigned long mem, unsigned long num_chunks, unsigned long chunk_size)
228{
229 unsigned long offset = reloc_offset();
230 struct msChunks *_msChunks = PTRRELOC(&msChunks);
231
232 _msChunks->num_chunks = num_chunks;
233 _msChunks->chunk_size = chunk_size;
234 _msChunks->chunk_shift = __ilog2(chunk_size);
235 _msChunks->chunk_mask = (1UL<<_msChunks->chunk_shift)-1;
236
237 mem = _ALIGN(mem, sizeof(msChunks_entry));
238 _msChunks->abs = (msChunks_entry *)(mem + offset);
239 mem += num_chunks * sizeof(msChunks_entry);
240
241 return mem;
242}
diff --git a/arch/ppc64/kernel/Makefile b/arch/ppc64/kernel/Makefile
index 2ecccb6b4f8c..ae60eb1193c6 100644
--- a/arch/ppc64/kernel/Makefile
+++ b/arch/ppc64/kernel/Makefile
@@ -11,7 +11,7 @@ obj-y := setup.o entry.o traps.o irq.o idle.o dma.o \
11 udbg.o binfmt_elf32.o sys_ppc32.o ioctl32.o \ 11 udbg.o binfmt_elf32.o sys_ppc32.o ioctl32.o \
12 ptrace32.o signal32.o rtc.o init_task.o \ 12 ptrace32.o signal32.o rtc.o init_task.o \
13 lmb.o cputable.o cpu_setup_power4.o idle_power4.o \ 13 lmb.o cputable.o cpu_setup_power4.o idle_power4.o \
14 iommu.o sysfs.o vdso.o pmc.o 14 iommu.o sysfs.o vdso.o pmc.o firmware.o
15obj-y += vdso32/ vdso64/ 15obj-y += vdso32/ vdso64/
16 16
17obj-$(CONFIG_PPC_OF) += of_device.o 17obj-$(CONFIG_PPC_OF) += of_device.o
@@ -31,7 +31,7 @@ obj-$(CONFIG_PPC_MULTIPLATFORM) += nvram.o i8259.o prom_init.o prom.o
31 31
32obj-$(CONFIG_PPC_PSERIES) += pSeries_pci.o pSeries_lpar.o pSeries_hvCall.o \ 32obj-$(CONFIG_PPC_PSERIES) += pSeries_pci.o pSeries_lpar.o pSeries_hvCall.o \
33 pSeries_nvram.o rtasd.o ras.o pSeries_reconfig.o \ 33 pSeries_nvram.o rtasd.o ras.o pSeries_reconfig.o \
34 pSeries_setup.o pSeries_iommu.o 34 pSeries_setup.o pSeries_iommu.o udbg_16550.o
35 35
36obj-$(CONFIG_PPC_BPA) += bpa_setup.o bpa_iommu.o bpa_nvram.o \ 36obj-$(CONFIG_PPC_BPA) += bpa_setup.o bpa_iommu.o bpa_nvram.o \
37 bpa_iic.o spider-pic.o 37 bpa_iic.o spider-pic.o
@@ -50,14 +50,19 @@ obj-$(CONFIG_LPARCFG) += lparcfg.o
50obj-$(CONFIG_HVC_CONSOLE) += hvconsole.o 50obj-$(CONFIG_HVC_CONSOLE) += hvconsole.o
51obj-$(CONFIG_BOOTX_TEXT) += btext.o 51obj-$(CONFIG_BOOTX_TEXT) += btext.o
52obj-$(CONFIG_HVCS) += hvcserver.o 52obj-$(CONFIG_HVCS) += hvcserver.o
53obj-$(CONFIG_IBMVIO) += vio.o 53
54vio-obj-$(CONFIG_PPC_PSERIES) += pSeries_vio.o
55vio-obj-$(CONFIG_PPC_ISERIES) += iSeries_vio.o
56obj-$(CONFIG_IBMVIO) += vio.o $(vio-obj-y)
54obj-$(CONFIG_XICS) += xics.o 57obj-$(CONFIG_XICS) += xics.o
55obj-$(CONFIG_MPIC) += mpic.o 58obj-$(CONFIG_MPIC) += mpic.o
56 59
57obj-$(CONFIG_PPC_PMAC) += pmac_setup.o pmac_feature.o pmac_pci.o \ 60obj-$(CONFIG_PPC_PMAC) += pmac_setup.o pmac_feature.o pmac_pci.o \
58 pmac_time.o pmac_nvram.o pmac_low_i2c.o 61 pmac_time.o pmac_nvram.o pmac_low_i2c.o \
62 udbg_scc.o
59 63
60obj-$(CONFIG_PPC_MAPLE) += maple_setup.o maple_pci.o maple_time.o 64obj-$(CONFIG_PPC_MAPLE) += maple_setup.o maple_pci.o maple_time.o \
65 udbg_16550.o
61 66
62obj-$(CONFIG_U3_DART) += u3_iommu.o 67obj-$(CONFIG_U3_DART) += u3_iommu.o
63 68
diff --git a/arch/ppc64/kernel/asm-offsets.c b/arch/ppc64/kernel/asm-offsets.c
index 6f910fa2746f..1ff4fa05a973 100644
--- a/arch/ppc64/kernel/asm-offsets.c
+++ b/arch/ppc64/kernel/asm-offsets.c
@@ -95,7 +95,8 @@ int main(void)
95 DEFINE(PACASLBCACHEPTR, offsetof(struct paca_struct, slb_cache_ptr)); 95 DEFINE(PACASLBCACHEPTR, offsetof(struct paca_struct, slb_cache_ptr));
96 DEFINE(PACACONTEXTID, offsetof(struct paca_struct, context.id)); 96 DEFINE(PACACONTEXTID, offsetof(struct paca_struct, context.id));
97#ifdef CONFIG_HUGETLB_PAGE 97#ifdef CONFIG_HUGETLB_PAGE
98 DEFINE(PACAHTLBSEGS, offsetof(struct paca_struct, context.htlb_segs)); 98 DEFINE(PACALOWHTLBAREAS, offsetof(struct paca_struct, context.low_htlb_areas));
99 DEFINE(PACAHIGHHTLBAREAS, offsetof(struct paca_struct, context.high_htlb_areas));
99#endif /* CONFIG_HUGETLB_PAGE */ 100#endif /* CONFIG_HUGETLB_PAGE */
100 DEFINE(PACADEFAULTDECR, offsetof(struct paca_struct, default_decr)); 101 DEFINE(PACADEFAULTDECR, offsetof(struct paca_struct, default_decr));
101 DEFINE(PACA_EXGEN, offsetof(struct paca_struct, exgen)); 102 DEFINE(PACA_EXGEN, offsetof(struct paca_struct, exgen));
diff --git a/arch/ppc64/kernel/bpa_iic.c b/arch/ppc64/kernel/bpa_iic.c
index c8f3dc3fad70..0aaa878e19d3 100644
--- a/arch/ppc64/kernel/bpa_iic.c
+++ b/arch/ppc64/kernel/bpa_iic.c
@@ -205,6 +205,18 @@ static struct iic_regs __iomem *find_iic(int cpu)
205} 205}
206 206
207#ifdef CONFIG_SMP 207#ifdef CONFIG_SMP
208
209/* Use the highest interrupt priorities for IPI */
210static inline int iic_ipi_to_irq(int ipi)
211{
212 return IIC_IPI_OFFSET + IIC_NUM_IPIS - 1 - ipi;
213}
214
215static inline int iic_irq_to_ipi(int irq)
216{
217 return IIC_NUM_IPIS - 1 - (irq - IIC_IPI_OFFSET);
218}
219
208void iic_setup_cpu(void) 220void iic_setup_cpu(void)
209{ 221{
210 out_be64(&__get_cpu_var(iic).regs->prio, 0xff); 222 out_be64(&__get_cpu_var(iic).regs->prio, 0xff);
@@ -212,18 +224,20 @@ void iic_setup_cpu(void)
212 224
213void iic_cause_IPI(int cpu, int mesg) 225void iic_cause_IPI(int cpu, int mesg)
214{ 226{
215 out_be64(&per_cpu(iic, cpu).regs->generate, mesg); 227 out_be64(&per_cpu(iic, cpu).regs->generate, (IIC_NUM_IPIS - 1 - mesg) << 4);
216} 228}
217 229
218static irqreturn_t iic_ipi_action(int irq, void *dev_id, struct pt_regs *regs) 230static irqreturn_t iic_ipi_action(int irq, void *dev_id, struct pt_regs *regs)
219{ 231{
220 232 smp_message_recv(iic_irq_to_ipi(irq), regs);
221 smp_message_recv(irq - IIC_IPI_OFFSET, regs);
222 return IRQ_HANDLED; 233 return IRQ_HANDLED;
223} 234}
224 235
225static void iic_request_ipi(int irq, const char *name) 236static void iic_request_ipi(int ipi, const char *name)
226{ 237{
238 int irq;
239
240 irq = iic_ipi_to_irq(ipi);
227 /* IPIs are marked SA_INTERRUPT as they must run with irqs 241 /* IPIs are marked SA_INTERRUPT as they must run with irqs
228 * disabled */ 242 * disabled */
229 get_irq_desc(irq)->handler = &iic_pic; 243 get_irq_desc(irq)->handler = &iic_pic;
@@ -233,10 +247,10 @@ static void iic_request_ipi(int irq, const char *name)
233 247
234void iic_request_IPIs(void) 248void iic_request_IPIs(void)
235{ 249{
236 iic_request_ipi(IIC_IPI_OFFSET + PPC_MSG_CALL_FUNCTION, "IPI-call"); 250 iic_request_ipi(PPC_MSG_CALL_FUNCTION, "IPI-call");
237 iic_request_ipi(IIC_IPI_OFFSET + PPC_MSG_RESCHEDULE, "IPI-resched"); 251 iic_request_ipi(PPC_MSG_RESCHEDULE, "IPI-resched");
238#ifdef CONFIG_DEBUGGER 252#ifdef CONFIG_DEBUGGER
239 iic_request_ipi(IIC_IPI_OFFSET + PPC_MSG_DEBUGGER_BREAK, "IPI-debug"); 253 iic_request_ipi(PPC_MSG_DEBUGGER_BREAK, "IPI-debug");
240#endif /* CONFIG_DEBUGGER */ 254#endif /* CONFIG_DEBUGGER */
241} 255}
242#endif /* CONFIG_SMP */ 256#endif /* CONFIG_SMP */
diff --git a/arch/ppc64/kernel/btext.c b/arch/ppc64/kernel/btext.c
index c53f079e9b77..b6fbfbe9032d 100644
--- a/arch/ppc64/kernel/btext.c
+++ b/arch/ppc64/kernel/btext.c
@@ -7,7 +7,6 @@
7#include <linux/kernel.h> 7#include <linux/kernel.h>
8#include <linux/string.h> 8#include <linux/string.h>
9#include <linux/init.h> 9#include <linux/init.h>
10#include <linux/version.h>
11 10
12#include <asm/sections.h> 11#include <asm/sections.h>
13#include <asm/prom.h> 12#include <asm/prom.h>
diff --git a/arch/ppc64/kernel/cpu_setup_power4.S b/arch/ppc64/kernel/cpu_setup_power4.S
index 0482c063c26e..1fb673c511ff 100644
--- a/arch/ppc64/kernel/cpu_setup_power4.S
+++ b/arch/ppc64/kernel/cpu_setup_power4.S
@@ -12,10 +12,9 @@
12#include <linux/config.h> 12#include <linux/config.h>
13#include <asm/processor.h> 13#include <asm/processor.h>
14#include <asm/page.h> 14#include <asm/page.h>
15#include <asm/ppc_asm.h>
16#include <asm/cputable.h> 15#include <asm/cputable.h>
17#include <asm/ppc_asm.h> 16#include <asm/ppc_asm.h>
18#include <asm/offsets.h> 17#include <asm/asm-offsets.h>
19#include <asm/cache.h> 18#include <asm/cache.h>
20 19
21_GLOBAL(__970_cpu_preinit) 20_GLOBAL(__970_cpu_preinit)
diff --git a/arch/ppc64/kernel/cputable.c b/arch/ppc64/kernel/cputable.c
index 77cec42f9525..8831a28c3c4e 100644
--- a/arch/ppc64/kernel/cputable.c
+++ b/arch/ppc64/kernel/cputable.c
@@ -5,7 +5,7 @@
5 * 5 *
6 * Modifications for ppc64: 6 * Modifications for ppc64:
7 * Copyright (C) 2003 Dave Engebretsen <engebret@us.ibm.com> 7 * Copyright (C) 2003 Dave Engebretsen <engebret@us.ibm.com>
8 * 8 *
9 * This program is free software; you can redistribute it and/or 9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License 10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; either version 11 * as published by the Free Software Foundation; either version
@@ -19,6 +19,7 @@
19#include <linux/init.h> 19#include <linux/init.h>
20#include <linux/module.h> 20#include <linux/module.h>
21 21
22#include <asm/oprofile_impl.h>
22#include <asm/cputable.h> 23#include <asm/cputable.h>
23 24
24struct cpu_spec* cur_cpu_spec = NULL; 25struct cpu_spec* cur_cpu_spec = NULL;
@@ -54,26 +55,32 @@ struct cpu_spec cpu_specs[] = {
54 .pvr_value = 0x00400000, 55 .pvr_value = 0x00400000,
55 .cpu_name = "POWER3 (630)", 56 .cpu_name = "POWER3 (630)",
56 .cpu_features = CPU_FTR_SPLIT_ID_CACHE | 57 .cpu_features = CPU_FTR_SPLIT_ID_CACHE |
57 CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE | CPU_FTR_IABR | 58 CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE | CPU_FTR_IABR,
58 CPU_FTR_PMC8,
59 .cpu_user_features = COMMON_USER_PPC64, 59 .cpu_user_features = COMMON_USER_PPC64,
60 .icache_bsize = 128, 60 .icache_bsize = 128,
61 .dcache_bsize = 128, 61 .dcache_bsize = 128,
62 .num_pmcs = 8,
62 .cpu_setup = __setup_cpu_power3, 63 .cpu_setup = __setup_cpu_power3,
63 .firmware_features = COMMON_PPC64_FW, 64#ifdef CONFIG_OPROFILE
65 .oprofile_cpu_type = "ppc64/power3",
66 .oprofile_model = &op_model_rs64,
67#endif
64 }, 68 },
65 { /* Power3+ */ 69 { /* Power3+ */
66 .pvr_mask = 0xffff0000, 70 .pvr_mask = 0xffff0000,
67 .pvr_value = 0x00410000, 71 .pvr_value = 0x00410000,
68 .cpu_name = "POWER3 (630+)", 72 .cpu_name = "POWER3 (630+)",
69 .cpu_features = CPU_FTR_SPLIT_ID_CACHE | 73 .cpu_features = CPU_FTR_SPLIT_ID_CACHE |
70 CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE | CPU_FTR_IABR | 74 CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE | CPU_FTR_IABR,
71 CPU_FTR_PMC8,
72 .cpu_user_features = COMMON_USER_PPC64, 75 .cpu_user_features = COMMON_USER_PPC64,
73 .icache_bsize = 128, 76 .icache_bsize = 128,
74 .dcache_bsize = 128, 77 .dcache_bsize = 128,
78 .num_pmcs = 8,
75 .cpu_setup = __setup_cpu_power3, 79 .cpu_setup = __setup_cpu_power3,
76 .firmware_features = COMMON_PPC64_FW, 80#ifdef CONFIG_OPROFILE
81 .oprofile_cpu_type = "ppc64/power3",
82 .oprofile_model = &op_model_rs64,
83#endif
77 }, 84 },
78 { /* Northstar */ 85 { /* Northstar */
79 .pvr_mask = 0xffff0000, 86 .pvr_mask = 0xffff0000,
@@ -81,12 +88,16 @@ struct cpu_spec cpu_specs[] = {
81 .cpu_name = "RS64-II (northstar)", 88 .cpu_name = "RS64-II (northstar)",
82 .cpu_features = CPU_FTR_SPLIT_ID_CACHE | 89 .cpu_features = CPU_FTR_SPLIT_ID_CACHE |
83 CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE | CPU_FTR_IABR | 90 CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE | CPU_FTR_IABR |
84 CPU_FTR_PMC8 | CPU_FTR_MMCRA | CPU_FTR_CTRL, 91 CPU_FTR_MMCRA | CPU_FTR_CTRL,
85 .cpu_user_features = COMMON_USER_PPC64, 92 .cpu_user_features = COMMON_USER_PPC64,
86 .icache_bsize = 128, 93 .icache_bsize = 128,
87 .dcache_bsize = 128, 94 .dcache_bsize = 128,
95 .num_pmcs = 8,
88 .cpu_setup = __setup_cpu_power3, 96 .cpu_setup = __setup_cpu_power3,
89 .firmware_features = COMMON_PPC64_FW, 97#ifdef CONFIG_OPROFILE
98 .oprofile_cpu_type = "ppc64/rs64",
99 .oprofile_model = &op_model_rs64,
100#endif
90 }, 101 },
91 { /* Pulsar */ 102 { /* Pulsar */
92 .pvr_mask = 0xffff0000, 103 .pvr_mask = 0xffff0000,
@@ -94,12 +105,16 @@ struct cpu_spec cpu_specs[] = {
94 .cpu_name = "RS64-III (pulsar)", 105 .cpu_name = "RS64-III (pulsar)",
95 .cpu_features = CPU_FTR_SPLIT_ID_CACHE | 106 .cpu_features = CPU_FTR_SPLIT_ID_CACHE |
96 CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE | CPU_FTR_IABR | 107 CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE | CPU_FTR_IABR |
97 CPU_FTR_PMC8 | CPU_FTR_MMCRA | CPU_FTR_CTRL, 108 CPU_FTR_MMCRA | CPU_FTR_CTRL,
98 .cpu_user_features = COMMON_USER_PPC64, 109 .cpu_user_features = COMMON_USER_PPC64,
99 .icache_bsize = 128, 110 .icache_bsize = 128,
100 .dcache_bsize = 128, 111 .dcache_bsize = 128,
112 .num_pmcs = 8,
101 .cpu_setup = __setup_cpu_power3, 113 .cpu_setup = __setup_cpu_power3,
102 .firmware_features = COMMON_PPC64_FW, 114#ifdef CONFIG_OPROFILE
115 .oprofile_cpu_type = "ppc64/rs64",
116 .oprofile_model = &op_model_rs64,
117#endif
103 }, 118 },
104 { /* I-star */ 119 { /* I-star */
105 .pvr_mask = 0xffff0000, 120 .pvr_mask = 0xffff0000,
@@ -107,12 +122,16 @@ struct cpu_spec cpu_specs[] = {
107 .cpu_name = "RS64-III (icestar)", 122 .cpu_name = "RS64-III (icestar)",
108 .cpu_features = CPU_FTR_SPLIT_ID_CACHE | 123 .cpu_features = CPU_FTR_SPLIT_ID_CACHE |
109 CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE | CPU_FTR_IABR | 124 CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE | CPU_FTR_IABR |
110 CPU_FTR_PMC8 | CPU_FTR_MMCRA | CPU_FTR_CTRL, 125 CPU_FTR_MMCRA | CPU_FTR_CTRL,
111 .cpu_user_features = COMMON_USER_PPC64, 126 .cpu_user_features = COMMON_USER_PPC64,
112 .icache_bsize = 128, 127 .icache_bsize = 128,
113 .dcache_bsize = 128, 128 .dcache_bsize = 128,
129 .num_pmcs = 8,
114 .cpu_setup = __setup_cpu_power3, 130 .cpu_setup = __setup_cpu_power3,
115 .firmware_features = COMMON_PPC64_FW, 131#ifdef CONFIG_OPROFILE
132 .oprofile_cpu_type = "ppc64/rs64",
133 .oprofile_model = &op_model_rs64,
134#endif
116 }, 135 },
117 { /* S-star */ 136 { /* S-star */
118 .pvr_mask = 0xffff0000, 137 .pvr_mask = 0xffff0000,
@@ -120,12 +139,16 @@ struct cpu_spec cpu_specs[] = {
120 .cpu_name = "RS64-IV (sstar)", 139 .cpu_name = "RS64-IV (sstar)",
121 .cpu_features = CPU_FTR_SPLIT_ID_CACHE | 140 .cpu_features = CPU_FTR_SPLIT_ID_CACHE |
122 CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE | CPU_FTR_IABR | 141 CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE | CPU_FTR_IABR |
123 CPU_FTR_PMC8 | CPU_FTR_MMCRA | CPU_FTR_CTRL, 142 CPU_FTR_MMCRA | CPU_FTR_CTRL,
124 .cpu_user_features = COMMON_USER_PPC64, 143 .cpu_user_features = COMMON_USER_PPC64,
125 .icache_bsize = 128, 144 .icache_bsize = 128,
126 .dcache_bsize = 128, 145 .dcache_bsize = 128,
146 .num_pmcs = 8,
127 .cpu_setup = __setup_cpu_power3, 147 .cpu_setup = __setup_cpu_power3,
128 .firmware_features = COMMON_PPC64_FW, 148#ifdef CONFIG_OPROFILE
149 .oprofile_cpu_type = "ppc64/rs64",
150 .oprofile_model = &op_model_rs64,
151#endif
129 }, 152 },
130 { /* Power4 */ 153 { /* Power4 */
131 .pvr_mask = 0xffff0000, 154 .pvr_mask = 0xffff0000,
@@ -133,12 +156,16 @@ struct cpu_spec cpu_specs[] = {
133 .cpu_name = "POWER4 (gp)", 156 .cpu_name = "POWER4 (gp)",
134 .cpu_features = CPU_FTR_SPLIT_ID_CACHE | 157 .cpu_features = CPU_FTR_SPLIT_ID_CACHE |
135 CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE | 158 CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE |
136 CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_PMC8 | CPU_FTR_MMCRA, 159 CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_MMCRA,
137 .cpu_user_features = COMMON_USER_PPC64, 160 .cpu_user_features = COMMON_USER_PPC64,
138 .icache_bsize = 128, 161 .icache_bsize = 128,
139 .dcache_bsize = 128, 162 .dcache_bsize = 128,
163 .num_pmcs = 8,
140 .cpu_setup = __setup_cpu_power4, 164 .cpu_setup = __setup_cpu_power4,
141 .firmware_features = COMMON_PPC64_FW, 165#ifdef CONFIG_OPROFILE
166 .oprofile_cpu_type = "ppc64/power4",
167 .oprofile_model = &op_model_rs64,
168#endif
142 }, 169 },
143 { /* Power4+ */ 170 { /* Power4+ */
144 .pvr_mask = 0xffff0000, 171 .pvr_mask = 0xffff0000,
@@ -146,12 +173,16 @@ struct cpu_spec cpu_specs[] = {
146 .cpu_name = "POWER4+ (gq)", 173 .cpu_name = "POWER4+ (gq)",
147 .cpu_features = CPU_FTR_SPLIT_ID_CACHE | 174 .cpu_features = CPU_FTR_SPLIT_ID_CACHE |
148 CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE | 175 CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE |
149 CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_PMC8 | CPU_FTR_MMCRA, 176 CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_MMCRA,
150 .cpu_user_features = COMMON_USER_PPC64, 177 .cpu_user_features = COMMON_USER_PPC64,
151 .icache_bsize = 128, 178 .icache_bsize = 128,
152 .dcache_bsize = 128, 179 .dcache_bsize = 128,
180 .num_pmcs = 8,
153 .cpu_setup = __setup_cpu_power4, 181 .cpu_setup = __setup_cpu_power4,
154 .firmware_features = COMMON_PPC64_FW, 182#ifdef CONFIG_OPROFILE
183 .oprofile_cpu_type = "ppc64/power4",
184 .oprofile_model = &op_model_power4,
185#endif
155 }, 186 },
156 { /* PPC970 */ 187 { /* PPC970 */
157 .pvr_mask = 0xffff0000, 188 .pvr_mask = 0xffff0000,
@@ -160,13 +191,17 @@ struct cpu_spec cpu_specs[] = {
160 .cpu_features = CPU_FTR_SPLIT_ID_CACHE | 191 .cpu_features = CPU_FTR_SPLIT_ID_CACHE |
161 CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE | 192 CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE |
162 CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_ALTIVEC_COMP | 193 CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_ALTIVEC_COMP |
163 CPU_FTR_CAN_NAP | CPU_FTR_PMC8 | CPU_FTR_MMCRA, 194 CPU_FTR_CAN_NAP | CPU_FTR_MMCRA,
164 .cpu_user_features = COMMON_USER_PPC64 | 195 .cpu_user_features = COMMON_USER_PPC64 |
165 PPC_FEATURE_HAS_ALTIVEC_COMP, 196 PPC_FEATURE_HAS_ALTIVEC_COMP,
166 .icache_bsize = 128, 197 .icache_bsize = 128,
167 .dcache_bsize = 128, 198 .dcache_bsize = 128,
199 .num_pmcs = 8,
168 .cpu_setup = __setup_cpu_ppc970, 200 .cpu_setup = __setup_cpu_ppc970,
169 .firmware_features = COMMON_PPC64_FW, 201#ifdef CONFIG_OPROFILE
202 .oprofile_cpu_type = "ppc64/970",
203 .oprofile_model = &op_model_power4,
204#endif
170 }, 205 },
171 { /* PPC970FX */ 206 { /* PPC970FX */
172 .pvr_mask = 0xffff0000, 207 .pvr_mask = 0xffff0000,
@@ -175,13 +210,17 @@ struct cpu_spec cpu_specs[] = {
175 .cpu_features = CPU_FTR_SPLIT_ID_CACHE | 210 .cpu_features = CPU_FTR_SPLIT_ID_CACHE |
176 CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE | 211 CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE |
177 CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_ALTIVEC_COMP | 212 CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_ALTIVEC_COMP |
178 CPU_FTR_CAN_NAP | CPU_FTR_PMC8 | CPU_FTR_MMCRA, 213 CPU_FTR_CAN_NAP | CPU_FTR_MMCRA,
179 .cpu_user_features = COMMON_USER_PPC64 | 214 .cpu_user_features = COMMON_USER_PPC64 |
180 PPC_FEATURE_HAS_ALTIVEC_COMP, 215 PPC_FEATURE_HAS_ALTIVEC_COMP,
181 .icache_bsize = 128, 216 .icache_bsize = 128,
182 .dcache_bsize = 128, 217 .dcache_bsize = 128,
218 .num_pmcs = 8,
183 .cpu_setup = __setup_cpu_ppc970, 219 .cpu_setup = __setup_cpu_ppc970,
184 .firmware_features = COMMON_PPC64_FW, 220#ifdef CONFIG_OPROFILE
221 .oprofile_cpu_type = "ppc64/970",
222 .oprofile_model = &op_model_power4,
223#endif
185 }, 224 },
186 { /* PPC970MP */ 225 { /* PPC970MP */
187 .pvr_mask = 0xffff0000, 226 .pvr_mask = 0xffff0000,
@@ -190,13 +229,16 @@ struct cpu_spec cpu_specs[] = {
190 .cpu_features = CPU_FTR_SPLIT_ID_CACHE | 229 .cpu_features = CPU_FTR_SPLIT_ID_CACHE |
191 CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE | 230 CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE |
192 CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_ALTIVEC_COMP | 231 CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_ALTIVEC_COMP |
193 CPU_FTR_CAN_NAP | CPU_FTR_PMC8 | CPU_FTR_MMCRA, 232 CPU_FTR_CAN_NAP | CPU_FTR_MMCRA,
194 .cpu_user_features = COMMON_USER_PPC64 | 233 .cpu_user_features = COMMON_USER_PPC64 |
195 PPC_FEATURE_HAS_ALTIVEC_COMP, 234 PPC_FEATURE_HAS_ALTIVEC_COMP,
196 .icache_bsize = 128, 235 .icache_bsize = 128,
197 .dcache_bsize = 128, 236 .dcache_bsize = 128,
198 .cpu_setup = __setup_cpu_ppc970, 237 .cpu_setup = __setup_cpu_ppc970,
199 .firmware_features = COMMON_PPC64_FW, 238#ifdef CONFIG_OPROFILE
239 .oprofile_cpu_type = "ppc64/970",
240 .oprofile_model = &op_model_power4,
241#endif
200 }, 242 },
201 { /* Power5 */ 243 { /* Power5 */
202 .pvr_mask = 0xffff0000, 244 .pvr_mask = 0xffff0000,
@@ -210,8 +252,12 @@ struct cpu_spec cpu_specs[] = {
210 .cpu_user_features = COMMON_USER_PPC64, 252 .cpu_user_features = COMMON_USER_PPC64,
211 .icache_bsize = 128, 253 .icache_bsize = 128,
212 .dcache_bsize = 128, 254 .dcache_bsize = 128,
255 .num_pmcs = 6,
213 .cpu_setup = __setup_cpu_power4, 256 .cpu_setup = __setup_cpu_power4,
214 .firmware_features = COMMON_PPC64_FW, 257#ifdef CONFIG_OPROFILE
258 .oprofile_cpu_type = "ppc64/power5",
259 .oprofile_model = &op_model_power4,
260#endif
215 }, 261 },
216 { /* Power5 */ 262 { /* Power5 */
217 .pvr_mask = 0xffff0000, 263 .pvr_mask = 0xffff0000,
@@ -225,8 +271,12 @@ struct cpu_spec cpu_specs[] = {
225 .cpu_user_features = COMMON_USER_PPC64, 271 .cpu_user_features = COMMON_USER_PPC64,
226 .icache_bsize = 128, 272 .icache_bsize = 128,
227 .dcache_bsize = 128, 273 .dcache_bsize = 128,
274 .num_pmcs = 6,
228 .cpu_setup = __setup_cpu_power4, 275 .cpu_setup = __setup_cpu_power4,
229 .firmware_features = COMMON_PPC64_FW, 276#ifdef CONFIG_OPROFILE
277 .oprofile_cpu_type = "ppc64/power5",
278 .oprofile_model = &op_model_power4,
279#endif
230 }, 280 },
231 { /* BE DD1.x */ 281 { /* BE DD1.x */
232 .pvr_mask = 0xffff0000, 282 .pvr_mask = 0xffff0000,
@@ -241,7 +291,6 @@ struct cpu_spec cpu_specs[] = {
241 .icache_bsize = 128, 291 .icache_bsize = 128,
242 .dcache_bsize = 128, 292 .dcache_bsize = 128,
243 .cpu_setup = __setup_cpu_be, 293 .cpu_setup = __setup_cpu_be,
244 .firmware_features = COMMON_PPC64_FW,
245 }, 294 },
246 { /* default match */ 295 { /* default match */
247 .pvr_mask = 0x00000000, 296 .pvr_mask = 0x00000000,
@@ -253,30 +302,7 @@ struct cpu_spec cpu_specs[] = {
253 .cpu_user_features = COMMON_USER_PPC64, 302 .cpu_user_features = COMMON_USER_PPC64,
254 .icache_bsize = 128, 303 .icache_bsize = 128,
255 .dcache_bsize = 128, 304 .dcache_bsize = 128,
305 .num_pmcs = 6,
256 .cpu_setup = __setup_cpu_power4, 306 .cpu_setup = __setup_cpu_power4,
257 .firmware_features = COMMON_PPC64_FW,
258 } 307 }
259}; 308};
260
261firmware_feature_t firmware_features_table[FIRMWARE_MAX_FEATURES] = {
262 {FW_FEATURE_PFT, "hcall-pft"},
263 {FW_FEATURE_TCE, "hcall-tce"},
264 {FW_FEATURE_SPRG0, "hcall-sprg0"},
265 {FW_FEATURE_DABR, "hcall-dabr"},
266 {FW_FEATURE_COPY, "hcall-copy"},
267 {FW_FEATURE_ASR, "hcall-asr"},
268 {FW_FEATURE_DEBUG, "hcall-debug"},
269 {FW_FEATURE_PERF, "hcall-perf"},
270 {FW_FEATURE_DUMP, "hcall-dump"},
271 {FW_FEATURE_INTERRUPT, "hcall-interrupt"},
272 {FW_FEATURE_MIGRATE, "hcall-migrate"},
273 {FW_FEATURE_PERFMON, "hcall-perfmon"},
274 {FW_FEATURE_CRQ, "hcall-crq"},
275 {FW_FEATURE_VIO, "hcall-vio"},
276 {FW_FEATURE_RDMA, "hcall-rdma"},
277 {FW_FEATURE_LLAN, "hcall-lLAN"},
278 {FW_FEATURE_BULK, "hcall-bulk"},
279 {FW_FEATURE_XDABR, "hcall-xdabr"},
280 {FW_FEATURE_MULTITCE, "hcall-multi-tce"},
281 {FW_FEATURE_SPLPAR, "hcall-splpar"},
282};
diff --git a/arch/ppc64/kernel/eeh.c b/arch/ppc64/kernel/eeh.c
index af5272fedadf..ba93fd731222 100644
--- a/arch/ppc64/kernel/eeh.c
+++ b/arch/ppc64/kernel/eeh.c
@@ -202,10 +202,9 @@ static void pci_addr_cache_print(struct pci_io_addr_cache *cache)
202 while (n) { 202 while (n) {
203 struct pci_io_addr_range *piar; 203 struct pci_io_addr_range *piar;
204 piar = rb_entry(n, struct pci_io_addr_range, rb_node); 204 piar = rb_entry(n, struct pci_io_addr_range, rb_node);
205 printk(KERN_DEBUG "PCI: %s addr range %d [%lx-%lx]: %s %s\n", 205 printk(KERN_DEBUG "PCI: %s addr range %d [%lx-%lx]: %s\n",
206 (piar->flags & IORESOURCE_IO) ? "i/o" : "mem", cnt, 206 (piar->flags & IORESOURCE_IO) ? "i/o" : "mem", cnt,
207 piar->addr_lo, piar->addr_hi, pci_name(piar->pcidev), 207 piar->addr_lo, piar->addr_hi, pci_name(piar->pcidev));
208 pci_pretty_name(piar->pcidev));
209 cnt++; 208 cnt++;
210 n = rb_next(n); 209 n = rb_next(n);
211 } 210 }
@@ -255,22 +254,24 @@ pci_addr_cache_insert(struct pci_dev *dev, unsigned long alo,
255static void __pci_addr_cache_insert_device(struct pci_dev *dev) 254static void __pci_addr_cache_insert_device(struct pci_dev *dev)
256{ 255{
257 struct device_node *dn; 256 struct device_node *dn;
257 struct pci_dn *pdn;
258 int i; 258 int i;
259 int inserted = 0; 259 int inserted = 0;
260 260
261 dn = pci_device_to_OF_node(dev); 261 dn = pci_device_to_OF_node(dev);
262 if (!dn) { 262 if (!dn) {
263 printk(KERN_WARNING "PCI: no pci dn found for dev=%s %s\n", 263 printk(KERN_WARNING "PCI: no pci dn found for dev=%s\n",
264 pci_name(dev), pci_pretty_name(dev)); 264 pci_name(dev));
265 return; 265 return;
266 } 266 }
267 267
268 /* Skip any devices for which EEH is not enabled. */ 268 /* Skip any devices for which EEH is not enabled. */
269 if (!(dn->eeh_mode & EEH_MODE_SUPPORTED) || 269 pdn = dn->data;
270 dn->eeh_mode & EEH_MODE_NOCHECK) { 270 if (!(pdn->eeh_mode & EEH_MODE_SUPPORTED) ||
271 pdn->eeh_mode & EEH_MODE_NOCHECK) {
271#ifdef DEBUG 272#ifdef DEBUG
272 printk(KERN_INFO "PCI: skip building address cache for=%s %s\n", 273 printk(KERN_INFO "PCI: skip building address cache for=%s\n",
273 pci_name(dev), pci_pretty_name(dev)); 274 pci_name(dev));
274#endif 275#endif
275 return; 276 return;
276 } 277 }
@@ -416,6 +417,7 @@ int eeh_unregister_notifier(struct notifier_block *nb)
416static int read_slot_reset_state(struct device_node *dn, int rets[]) 417static int read_slot_reset_state(struct device_node *dn, int rets[])
417{ 418{
418 int token, outputs; 419 int token, outputs;
420 struct pci_dn *pdn = dn->data;
419 421
420 if (ibm_read_slot_reset_state2 != RTAS_UNKNOWN_SERVICE) { 422 if (ibm_read_slot_reset_state2 != RTAS_UNKNOWN_SERVICE) {
421 token = ibm_read_slot_reset_state2; 423 token = ibm_read_slot_reset_state2;
@@ -425,8 +427,8 @@ static int read_slot_reset_state(struct device_node *dn, int rets[])
425 outputs = 3; 427 outputs = 3;
426 } 428 }
427 429
428 return rtas_call(token, 3, outputs, rets, dn->eeh_config_addr, 430 return rtas_call(token, 3, outputs, rets, pdn->eeh_config_addr,
429 BUID_HI(dn->phb->buid), BUID_LO(dn->phb->buid)); 431 BUID_HI(pdn->phb->buid), BUID_LO(pdn->phb->buid));
430} 432}
431 433
432/** 434/**
@@ -447,12 +449,12 @@ static void eeh_panic(struct pci_dev *dev, int reset_state)
447 * in light of potential corruption, we can use it here. 449 * in light of potential corruption, we can use it here.
448 */ 450 */
449 if (panic_on_oops) 451 if (panic_on_oops)
450 panic("EEH: MMIO failure (%d) on device:%s %s\n", reset_state, 452 panic("EEH: MMIO failure (%d) on device:%s\n", reset_state,
451 pci_name(dev), pci_pretty_name(dev)); 453 pci_name(dev));
452 else { 454 else {
453 __get_cpu_var(ignored_failures)++; 455 __get_cpu_var(ignored_failures)++;
454 printk(KERN_INFO "EEH: Ignored MMIO failure (%d) on device:%s %s\n", 456 printk(KERN_INFO "EEH: Ignored MMIO failure (%d) on device:%s\n",
455 reset_state, pci_name(dev), pci_pretty_name(dev)); 457 reset_state, pci_name(dev));
456 } 458 }
457} 459}
458 460
@@ -482,8 +484,8 @@ static void eeh_event_handler(void *dummy)
482 break; 484 break;
483 485
484 printk(KERN_INFO "EEH: MMIO failure (%d), notifiying device " 486 printk(KERN_INFO "EEH: MMIO failure (%d), notifiying device "
485 "%s %s\n", event->reset_state, 487 "%s\n", event->reset_state,
486 pci_name(event->dev), pci_pretty_name(event->dev)); 488 pci_name(event->dev));
487 489
488 atomic_set(&eeh_fail_count, 0); 490 atomic_set(&eeh_fail_count, 0);
489 notifier_call_chain (&eeh_notifier_chain, 491 notifier_call_chain (&eeh_notifier_chain,
@@ -535,6 +537,7 @@ int eeh_dn_check_failure(struct device_node *dn, struct pci_dev *dev)
535 unsigned long flags; 537 unsigned long flags;
536 int rc, reset_state; 538 int rc, reset_state;
537 struct eeh_event *event; 539 struct eeh_event *event;
540 struct pci_dn *pdn;
538 541
539 __get_cpu_var(total_mmio_ffs)++; 542 __get_cpu_var(total_mmio_ffs)++;
540 543
@@ -543,14 +546,15 @@ int eeh_dn_check_failure(struct device_node *dn, struct pci_dev *dev)
543 546
544 if (!dn) 547 if (!dn)
545 return 0; 548 return 0;
549 pdn = dn->data;
546 550
547 /* Access to IO BARs might get this far and still not want checking. */ 551 /* Access to IO BARs might get this far and still not want checking. */
548 if (!(dn->eeh_mode & EEH_MODE_SUPPORTED) || 552 if (!pdn->eeh_capable || !(pdn->eeh_mode & EEH_MODE_SUPPORTED) ||
549 dn->eeh_mode & EEH_MODE_NOCHECK) { 553 pdn->eeh_mode & EEH_MODE_NOCHECK) {
550 return 0; 554 return 0;
551 } 555 }
552 556
553 if (!dn->eeh_config_addr) { 557 if (!pdn->eeh_config_addr) {
554 return 0; 558 return 0;
555 } 559 }
556 560
@@ -558,7 +562,7 @@ int eeh_dn_check_failure(struct device_node *dn, struct pci_dev *dev)
558 * If we already have a pending isolation event for this 562 * If we already have a pending isolation event for this
559 * slot, we know it's bad already, we don't need to check... 563 * slot, we know it's bad already, we don't need to check...
560 */ 564 */
561 if (dn->eeh_mode & EEH_MODE_ISOLATED) { 565 if (pdn->eeh_mode & EEH_MODE_ISOLATED) {
562 atomic_inc(&eeh_fail_count); 566 atomic_inc(&eeh_fail_count);
563 if (atomic_read(&eeh_fail_count) >= EEH_MAX_FAILS) { 567 if (atomic_read(&eeh_fail_count) >= EEH_MAX_FAILS) {
564 /* re-read the slot reset state */ 568 /* re-read the slot reset state */
@@ -583,7 +587,7 @@ int eeh_dn_check_failure(struct device_node *dn, struct pci_dev *dev)
583 } 587 }
584 588
585 /* prevent repeated reports of this failure */ 589 /* prevent repeated reports of this failure */
586 dn->eeh_mode |= EEH_MODE_ISOLATED; 590 pdn->eeh_mode |= EEH_MODE_ISOLATED;
587 591
588 reset_state = rets[0]; 592 reset_state = rets[0];
589 593
@@ -591,9 +595,9 @@ int eeh_dn_check_failure(struct device_node *dn, struct pci_dev *dev)
591 memset(slot_errbuf, 0, eeh_error_buf_size); 595 memset(slot_errbuf, 0, eeh_error_buf_size);
592 596
593 rc = rtas_call(ibm_slot_error_detail, 597 rc = rtas_call(ibm_slot_error_detail,
594 8, 1, NULL, dn->eeh_config_addr, 598 8, 1, NULL, pdn->eeh_config_addr,
595 BUID_HI(dn->phb->buid), 599 BUID_HI(pdn->phb->buid),
596 BUID_LO(dn->phb->buid), NULL, 0, 600 BUID_LO(pdn->phb->buid), NULL, 0,
597 virt_to_phys(slot_errbuf), 601 virt_to_phys(slot_errbuf),
598 eeh_error_buf_size, 602 eeh_error_buf_size,
599 1 /* Temporary Error */); 603 1 /* Temporary Error */);
@@ -680,8 +684,9 @@ static void *early_enable_eeh(struct device_node *dn, void *data)
680 u32 *device_id = (u32 *)get_property(dn, "device-id", NULL); 684 u32 *device_id = (u32 *)get_property(dn, "device-id", NULL);
681 u32 *regs; 685 u32 *regs;
682 int enable; 686 int enable;
687 struct pci_dn *pdn = dn->data;
683 688
684 dn->eeh_mode = 0; 689 pdn->eeh_mode = 0;
685 690
686 if (status && strcmp(status, "ok") != 0) 691 if (status && strcmp(status, "ok") != 0)
687 return NULL; /* ignore devices with bad status */ 692 return NULL; /* ignore devices with bad status */
@@ -692,7 +697,7 @@ static void *early_enable_eeh(struct device_node *dn, void *data)
692 697
693 /* There is nothing to check on PCI to ISA bridges */ 698 /* There is nothing to check on PCI to ISA bridges */
694 if (dn->type && !strcmp(dn->type, "isa")) { 699 if (dn->type && !strcmp(dn->type, "isa")) {
695 dn->eeh_mode |= EEH_MODE_NOCHECK; 700 pdn->eeh_mode |= EEH_MODE_NOCHECK;
696 return NULL; 701 return NULL;
697 } 702 }
698 703
@@ -709,7 +714,7 @@ static void *early_enable_eeh(struct device_node *dn, void *data)
709 enable = 0; 714 enable = 0;
710 715
711 if (!enable) 716 if (!enable)
712 dn->eeh_mode |= EEH_MODE_NOCHECK; 717 pdn->eeh_mode |= EEH_MODE_NOCHECK;
713 718
714 /* Ok... see if this device supports EEH. Some do, some don't, 719 /* Ok... see if this device supports EEH. Some do, some don't,
715 * and the only way to find out is to check each and every one. */ 720 * and the only way to find out is to check each and every one. */
@@ -722,8 +727,8 @@ static void *early_enable_eeh(struct device_node *dn, void *data)
722 EEH_ENABLE); 727 EEH_ENABLE);
723 if (ret == 0) { 728 if (ret == 0) {
724 eeh_subsystem_enabled = 1; 729 eeh_subsystem_enabled = 1;
725 dn->eeh_mode |= EEH_MODE_SUPPORTED; 730 pdn->eeh_mode |= EEH_MODE_SUPPORTED;
726 dn->eeh_config_addr = regs[0]; 731 pdn->eeh_config_addr = regs[0];
727#ifdef DEBUG 732#ifdef DEBUG
728 printk(KERN_DEBUG "EEH: %s: eeh enabled\n", dn->full_name); 733 printk(KERN_DEBUG "EEH: %s: eeh enabled\n", dn->full_name);
729#endif 734#endif
@@ -731,10 +736,11 @@ static void *early_enable_eeh(struct device_node *dn, void *data)
731 736
732 /* This device doesn't support EEH, but it may have an 737 /* This device doesn't support EEH, but it may have an
733 * EEH parent, in which case we mark it as supported. */ 738 * EEH parent, in which case we mark it as supported. */
734 if (dn->parent && (dn->parent->eeh_mode & EEH_MODE_SUPPORTED)) { 739 if (dn->parent && dn->parent->data
740 && (PCI_DN(dn->parent)->eeh_mode & EEH_MODE_SUPPORTED)) {
735 /* Parent supports EEH. */ 741 /* Parent supports EEH. */
736 dn->eeh_mode |= EEH_MODE_SUPPORTED; 742 pdn->eeh_mode |= EEH_MODE_SUPPORTED;
737 dn->eeh_config_addr = dn->parent->eeh_config_addr; 743 pdn->eeh_config_addr = PCI_DN(dn->parent)->eeh_config_addr;
738 return NULL; 744 return NULL;
739 } 745 }
740 } 746 }
@@ -791,11 +797,13 @@ void __init eeh_init(void)
791 for (phb = of_find_node_by_name(NULL, "pci"); phb; 797 for (phb = of_find_node_by_name(NULL, "pci"); phb;
792 phb = of_find_node_by_name(phb, "pci")) { 798 phb = of_find_node_by_name(phb, "pci")) {
793 unsigned long buid; 799 unsigned long buid;
800 struct pci_dn *pci;
794 801
795 buid = get_phb_buid(phb); 802 buid = get_phb_buid(phb);
796 if (buid == 0) 803 if (buid == 0 || phb->data == NULL)
797 continue; 804 continue;
798 805
806 pci = phb->data;
799 info.buid_lo = BUID_LO(buid); 807 info.buid_lo = BUID_LO(buid);
800 info.buid_hi = BUID_HI(buid); 808 info.buid_hi = BUID_HI(buid);
801 traverse_pci_devices(phb, early_enable_eeh, &info); 809 traverse_pci_devices(phb, early_enable_eeh, &info);
@@ -824,9 +832,9 @@ void eeh_add_device_early(struct device_node *dn)
824 struct pci_controller *phb; 832 struct pci_controller *phb;
825 struct eeh_early_enable_info info; 833 struct eeh_early_enable_info info;
826 834
827 if (!dn) 835 if (!dn || !dn->data)
828 return; 836 return;
829 phb = dn->phb; 837 phb = PCI_DN(dn)->phb;
830 if (NULL == phb || 0 == phb->buid) { 838 if (NULL == phb || 0 == phb->buid) {
831 printk(KERN_WARNING "EEH: Expected buid but found none\n"); 839 printk(KERN_WARNING "EEH: Expected buid but found none\n");
832 return; 840 return;
@@ -851,8 +859,7 @@ void eeh_add_device_late(struct pci_dev *dev)
851 return; 859 return;
852 860
853#ifdef DEBUG 861#ifdef DEBUG
854 printk(KERN_DEBUG "EEH: adding device %s %s\n", pci_name(dev), 862 printk(KERN_DEBUG "EEH: adding device %s\n", pci_name(dev));
855 pci_pretty_name(dev));
856#endif 863#endif
857 864
858 pci_addr_cache_insert_device (dev); 865 pci_addr_cache_insert_device (dev);
@@ -873,8 +880,7 @@ void eeh_remove_device(struct pci_dev *dev)
873 880
874 /* Unregister the device with the EEH/PCI address search system */ 881 /* Unregister the device with the EEH/PCI address search system */
875#ifdef DEBUG 882#ifdef DEBUG
876 printk(KERN_DEBUG "EEH: remove device %s %s\n", pci_name(dev), 883 printk(KERN_DEBUG "EEH: remove device %s\n", pci_name(dev));
877 pci_pretty_name(dev));
878#endif 884#endif
879 pci_addr_cache_remove_device(dev); 885 pci_addr_cache_remove_device(dev);
880} 886}
diff --git a/arch/ppc64/kernel/entry.S b/arch/ppc64/kernel/entry.S
index 14cd56ac40dd..e8c0bbf4d000 100644
--- a/arch/ppc64/kernel/entry.S
+++ b/arch/ppc64/kernel/entry.S
@@ -28,7 +28,7 @@
28#include <asm/mmu.h> 28#include <asm/mmu.h>
29#include <asm/thread_info.h> 29#include <asm/thread_info.h>
30#include <asm/ppc_asm.h> 30#include <asm/ppc_asm.h>
31#include <asm/offsets.h> 31#include <asm/asm-offsets.h>
32#include <asm/cputable.h> 32#include <asm/cputable.h>
33 33
34#ifdef CONFIG_PPC_ISERIES 34#ifdef CONFIG_PPC_ISERIES
@@ -410,15 +410,14 @@ BEGIN_FTR_SECTION
410 cmpd cr1,r6,r9 /* or is new ESID the same as current ESID? */ 410 cmpd cr1,r6,r9 /* or is new ESID the same as current ESID? */
411 cror eq,4*cr1+eq,eq 411 cror eq,4*cr1+eq,eq
412 beq 2f /* if yes, don't slbie it */ 412 beq 2f /* if yes, don't slbie it */
413 oris r0,r6,0x0800 /* set C (class) bit */
414 413
415 /* Bolt in the new stack SLB entry */ 414 /* Bolt in the new stack SLB entry */
416 ld r7,KSP_VSID(r4) /* Get new stack's VSID */ 415 ld r7,KSP_VSID(r4) /* Get new stack's VSID */
417 oris r6,r6,(SLB_ESID_V)@h 416 oris r0,r6,(SLB_ESID_V)@h
418 ori r6,r6,(SLB_NUM_BOLTED-1)@l 417 ori r0,r0,(SLB_NUM_BOLTED-1)@l
419 slbie r0 418 slbie r6
420 slbie r0 /* Workaround POWER5 < DD2.1 issue */ 419 slbie r6 /* Workaround POWER5 < DD2.1 issue */
421 slbmte r7,r6 420 slbmte r7,r0
422 isync 421 isync
423 422
4242: 4232:
diff --git a/arch/ppc64/kernel/firmware.c b/arch/ppc64/kernel/firmware.c
new file mode 100644
index 000000000000..d8432c0fb27d
--- /dev/null
+++ b/arch/ppc64/kernel/firmware.c
@@ -0,0 +1,47 @@
1/*
2 * arch/ppc64/kernel/firmware.c
3 *
4 * Extracted from cputable.c
5 *
6 * Copyright (C) 2001 Ben. Herrenschmidt (benh@kernel.crashing.org)
7 *
8 * Modifications for ppc64:
9 * Copyright (C) 2003 Dave Engebretsen <engebret@us.ibm.com>
10 * Copyright (C) 2005 Stephen Rothwell, IBM Corporation
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version
15 * 2 of the License, or (at your option) any later version.
16 */
17
18#include <linux/config.h>
19
20#include <asm/firmware.h>
21
22unsigned long ppc64_firmware_features;
23
24#ifdef CONFIG_PPC_PSERIES
25firmware_feature_t firmware_features_table[FIRMWARE_MAX_FEATURES] = {
26 {FW_FEATURE_PFT, "hcall-pft"},
27 {FW_FEATURE_TCE, "hcall-tce"},
28 {FW_FEATURE_SPRG0, "hcall-sprg0"},
29 {FW_FEATURE_DABR, "hcall-dabr"},
30 {FW_FEATURE_COPY, "hcall-copy"},
31 {FW_FEATURE_ASR, "hcall-asr"},
32 {FW_FEATURE_DEBUG, "hcall-debug"},
33 {FW_FEATURE_PERF, "hcall-perf"},
34 {FW_FEATURE_DUMP, "hcall-dump"},
35 {FW_FEATURE_INTERRUPT, "hcall-interrupt"},
36 {FW_FEATURE_MIGRATE, "hcall-migrate"},
37 {FW_FEATURE_PERFMON, "hcall-perfmon"},
38 {FW_FEATURE_CRQ, "hcall-crq"},
39 {FW_FEATURE_VIO, "hcall-vio"},
40 {FW_FEATURE_RDMA, "hcall-rdma"},
41 {FW_FEATURE_LLAN, "hcall-lLAN"},
42 {FW_FEATURE_BULK, "hcall-bulk"},
43 {FW_FEATURE_XDABR, "hcall-xdabr"},
44 {FW_FEATURE_MULTITCE, "hcall-multi-tce"},
45 {FW_FEATURE_SPLPAR, "hcall-splpar"},
46};
47#endif
diff --git a/arch/ppc64/kernel/head.S b/arch/ppc64/kernel/head.S
index accaa052d31f..58c314738c99 100644
--- a/arch/ppc64/kernel/head.S
+++ b/arch/ppc64/kernel/head.S
@@ -23,17 +23,14 @@
23 * 2 of the License, or (at your option) any later version. 23 * 2 of the License, or (at your option) any later version.
24 */ 24 */
25 25
26#define SECONDARY_PROCESSORS
27
28#include <linux/config.h> 26#include <linux/config.h>
29#include <linux/threads.h> 27#include <linux/threads.h>
30#include <asm/processor.h> 28#include <asm/processor.h>
31#include <asm/page.h> 29#include <asm/page.h>
32#include <asm/mmu.h> 30#include <asm/mmu.h>
33#include <asm/naca.h>
34#include <asm/systemcfg.h> 31#include <asm/systemcfg.h>
35#include <asm/ppc_asm.h> 32#include <asm/ppc_asm.h>
36#include <asm/offsets.h> 33#include <asm/asm-offsets.h>
37#include <asm/bug.h> 34#include <asm/bug.h>
38#include <asm/cputable.h> 35#include <asm/cputable.h>
39#include <asm/setup.h> 36#include <asm/setup.h>
@@ -45,18 +42,13 @@
45#endif 42#endif
46 43
47/* 44/*
48 * hcall interface to pSeries LPAR
49 */
50#define H_SET_ASR 0x30
51
52/*
53 * We layout physical memory as follows: 45 * We layout physical memory as follows:
54 * 0x0000 - 0x00ff : Secondary processor spin code 46 * 0x0000 - 0x00ff : Secondary processor spin code
55 * 0x0100 - 0x2fff : pSeries Interrupt prologs 47 * 0x0100 - 0x2fff : pSeries Interrupt prologs
56 * 0x3000 - 0x3fff : Interrupt support 48 * 0x3000 - 0x5fff : interrupt support, iSeries and common interrupt prologs
57 * 0x4000 - 0x4fff : NACA 49 * 0x6000 - 0x6fff : Initial (CPU0) segment table
58 * 0x6000 : iSeries and common interrupt prologs 50 * 0x7000 - 0x7fff : FWNMI data area
59 * 0x9000 - 0x9fff : Initial segment table 51 * 0x8000 - : Early init and support code
60 */ 52 */
61 53
62/* 54/*
@@ -94,6 +86,7 @@ END_FTR_SECTION(0, 1)
94 86
95 /* Catch branch to 0 in real mode */ 87 /* Catch branch to 0 in real mode */
96 trap 88 trap
89
97#ifdef CONFIG_PPC_ISERIES 90#ifdef CONFIG_PPC_ISERIES
98 /* 91 /*
99 * At offset 0x20, there is a pointer to iSeries LPAR data. 92 * At offset 0x20, there is a pointer to iSeries LPAR data.
@@ -103,12 +96,12 @@ END_FTR_SECTION(0, 1)
103 .llong hvReleaseData-KERNELBASE 96 .llong hvReleaseData-KERNELBASE
104 97
105 /* 98 /*
106 * At offset 0x28 and 0x30 are offsets to the msChunks 99 * At offset 0x28 and 0x30 are offsets to the mschunks_map
107 * array (used by the iSeries LPAR debugger to do translation 100 * array (used by the iSeries LPAR debugger to do translation
108 * between physical addresses and absolute addresses) and 101 * between physical addresses and absolute addresses) and
109 * to the pidhash table (also used by the debugger) 102 * to the pidhash table (also used by the debugger)
110 */ 103 */
111 .llong msChunks-KERNELBASE 104 .llong mschunks_map-KERNELBASE
112 .llong 0 /* pidhash-KERNELBASE SFRXXX */ 105 .llong 0 /* pidhash-KERNELBASE SFRXXX */
113 106
114 /* Offset 0x38 - Pointer to start of embedded System.map */ 107 /* Offset 0x38 - Pointer to start of embedded System.map */
@@ -120,7 +113,7 @@ embedded_sysmap_start:
120embedded_sysmap_end: 113embedded_sysmap_end:
121 .llong 0 114 .llong 0
122 115
123#else /* CONFIG_PPC_ISERIES */ 116#endif /* CONFIG_PPC_ISERIES */
124 117
125 /* Secondary processors spin on this value until it goes to 1. */ 118 /* Secondary processors spin on this value until it goes to 1. */
126 .globl __secondary_hold_spinloop 119 .globl __secondary_hold_spinloop
@@ -155,7 +148,7 @@ _GLOBAL(__secondary_hold)
155 std r24,__secondary_hold_acknowledge@l(0) 148 std r24,__secondary_hold_acknowledge@l(0)
156 sync 149 sync
157 150
158 /* All secondary cpu's wait here until told to start. */ 151 /* All secondary cpus wait here until told to start. */
159100: ld r4,__secondary_hold_spinloop@l(0) 152100: ld r4,__secondary_hold_spinloop@l(0)
160 cmpdi 0,r4,1 153 cmpdi 0,r4,1
161 bne 100b 154 bne 100b
@@ -170,7 +163,6 @@ _GLOBAL(__secondary_hold)
170 BUG_OPCODE 163 BUG_OPCODE
171#endif 164#endif
172#endif 165#endif
173#endif
174 166
175/* This value is used to mark exception frames on the stack. */ 167/* This value is used to mark exception frames on the stack. */
176 .section ".toc","aw" 168 .section ".toc","aw"
@@ -502,33 +494,37 @@ system_call_pSeries:
502 STD_EXCEPTION_PSERIES(0x1300, instruction_breakpoint) 494 STD_EXCEPTION_PSERIES(0x1300, instruction_breakpoint)
503 STD_EXCEPTION_PSERIES(0x1700, altivec_assist) 495 STD_EXCEPTION_PSERIES(0x1700, altivec_assist)
504 496
497 . = 0x3000
498
499/*** pSeries interrupt support ***/
500
505 /* moved from 0xf00 */ 501 /* moved from 0xf00 */
506 STD_EXCEPTION_PSERIES(0x3000, performance_monitor) 502 STD_EXCEPTION_PSERIES(., performance_monitor)
507 503
508 . = 0x3100 504 .align 7
509_GLOBAL(do_stab_bolted_pSeries) 505_GLOBAL(do_stab_bolted_pSeries)
510 mtcrf 0x80,r12 506 mtcrf 0x80,r12
511 mfspr r12,SPRG2 507 mfspr r12,SPRG2
512 EXCEPTION_PROLOG_PSERIES(PACA_EXSLB, .do_stab_bolted) 508 EXCEPTION_PROLOG_PSERIES(PACA_EXSLB, .do_stab_bolted)
513 509
514 510/*
515 /* Space for the naca. Architected to be located at real address 511 * Vectors for the FWNMI option. Share common code.
516 * NACA_PHYS_ADDR. Various tools rely on this location being fixed. 512 */
517 * The first dword of the naca is required by iSeries LPAR to 513 .globl system_reset_fwnmi
518 * point to itVpdAreas. On pSeries native, this value is not used. 514system_reset_fwnmi:
519 */ 515 HMT_MEDIUM
520 . = NACA_PHYS_ADDR 516 mtspr SPRG1,r13 /* save r13 */
521 .globl __end_interrupts 517 RUNLATCH_ON(r13)
522__end_interrupts: 518 EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, system_reset_common)
523#ifdef CONFIG_PPC_ISERIES
524 .globl naca
525naca:
526 .llong itVpdAreas
527 .llong 0 /* xRamDisk */
528 .llong 0 /* xRamDiskSize */
529 519
530 . = 0x6100 520 .globl machine_check_fwnmi
521machine_check_fwnmi:
522 HMT_MEDIUM
523 mtspr SPRG1,r13 /* save r13 */
524 RUNLATCH_ON(r13)
525 EXCEPTION_PROLOG_PSERIES(PACA_EXMC, machine_check_common)
531 526
527#ifdef CONFIG_PPC_ISERIES
532/*** ISeries-LPAR interrupt handlers ***/ 528/*** ISeries-LPAR interrupt handlers ***/
533 529
534 STD_EXCEPTION_ISERIES(0x200, machine_check, PACA_EXMC) 530 STD_EXCEPTION_ISERIES(0x200, machine_check, PACA_EXMC)
@@ -626,9 +622,7 @@ system_reset_iSeries:
626 622
627 cmpwi 0,r23,0 623 cmpwi 0,r23,0
628 beq iSeries_secondary_smp_loop /* Loop until told to go */ 624 beq iSeries_secondary_smp_loop /* Loop until told to go */
629#ifdef SECONDARY_PROCESSORS
630 bne .__secondary_start /* Loop until told to go */ 625 bne .__secondary_start /* Loop until told to go */
631#endif
632iSeries_secondary_smp_loop: 626iSeries_secondary_smp_loop:
633 /* Let the Hypervisor know we are alive */ 627 /* Let the Hypervisor know we are alive */
634 /* 8002 is a call to HvCallCfg::getLps, a harmless Hypervisor function */ 628 /* 8002 is a call to HvCallCfg::getLps, a harmless Hypervisor function */
@@ -671,51 +665,8 @@ hardware_interrupt_iSeries_masked:
671 ld r13,PACA_EXGEN+EX_R13(r13) 665 ld r13,PACA_EXGEN+EX_R13(r13)
672 rfid 666 rfid
673 b . /* prevent speculative execution */ 667 b . /* prevent speculative execution */
674#endif
675
676/*
677 * Data area reserved for FWNMI option.
678 */
679 .= 0x7000
680 .globl fwnmi_data_area
681fwnmi_data_area:
682
683#ifdef CONFIG_PPC_ISERIES
684 . = LPARMAP_PHYS
685#include "lparmap.s"
686#endif /* CONFIG_PPC_ISERIES */ 668#endif /* CONFIG_PPC_ISERIES */
687 669
688/*
689 * Vectors for the FWNMI option. Share common code.
690 */
691 . = 0x8000
692 .globl system_reset_fwnmi
693system_reset_fwnmi:
694 HMT_MEDIUM
695 mtspr SPRG1,r13 /* save r13 */
696 RUNLATCH_ON(r13)
697 EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, system_reset_common)
698 .globl machine_check_fwnmi
699machine_check_fwnmi:
700 HMT_MEDIUM
701 mtspr SPRG1,r13 /* save r13 */
702 RUNLATCH_ON(r13)
703 EXCEPTION_PROLOG_PSERIES(PACA_EXMC, machine_check_common)
704
705 /*
706 * Space for the initial segment table
707 * For LPAR, the hypervisor must fill in at least one entry
708 * before we get control (with relocate on)
709 */
710 . = STAB0_PHYS_ADDR
711 .globl __start_stab
712__start_stab:
713
714 . = (STAB0_PHYS_ADDR + PAGE_SIZE)
715 .globl __end_stab
716__end_stab:
717
718
719/*** Common interrupt handlers ***/ 670/*** Common interrupt handlers ***/
720 671
721 STD_EXCEPTION_COMMON(0x100, system_reset, .system_reset_exception) 672 STD_EXCEPTION_COMMON(0x100, system_reset, .system_reset_exception)
@@ -752,8 +703,8 @@ machine_check_common:
752 * R9 contains the saved CR, r13 points to the paca, 703 * R9 contains the saved CR, r13 points to the paca,
753 * r10 contains the (bad) kernel stack pointer, 704 * r10 contains the (bad) kernel stack pointer,
754 * r11 and r12 contain the saved SRR0 and SRR1. 705 * r11 and r12 contain the saved SRR0 and SRR1.
755 * We switch to using the paca guard page as an emergency stack, 706 * We switch to using an emergency stack, save the registers there,
756 * save the registers there, and call kernel_bad_stack(), which panics. 707 * and call kernel_bad_stack(), which panics.
757 */ 708 */
758bad_stack: 709bad_stack:
759 ld r1,PACAEMERGSP(r13) 710 ld r1,PACAEMERGSP(r13)
@@ -906,6 +857,62 @@ fp_unavailable_common:
906 bl .kernel_fp_unavailable_exception 857 bl .kernel_fp_unavailable_exception
907 BUG_OPCODE 858 BUG_OPCODE
908 859
860/*
861 * load_up_fpu(unused, unused, tsk)
862 * Disable FP for the task which had the FPU previously,
863 * and save its floating-point registers in its thread_struct.
864 * Enables the FPU for use in the kernel on return.
865 * On SMP we know the fpu is free, since we give it up every
866 * switch (ie, no lazy save of the FP registers).
867 * On entry: r13 == 'current' && last_task_used_math != 'current'
868 */
869_STATIC(load_up_fpu)
870 mfmsr r5 /* grab the current MSR */
871 ori r5,r5,MSR_FP
872 mtmsrd r5 /* enable use of fpu now */
873 isync
874/*
875 * For SMP, we don't do lazy FPU switching because it just gets too
876 * horrendously complex, especially when a task switches from one CPU
877 * to another. Instead we call giveup_fpu in switch_to.
878 *
879 */
880#ifndef CONFIG_SMP
881 ld r3,last_task_used_math@got(r2)
882 ld r4,0(r3)
883 cmpdi 0,r4,0
884 beq 1f
885 /* Save FP state to last_task_used_math's THREAD struct */
886 addi r4,r4,THREAD
887 SAVE_32FPRS(0, r4)
888 mffs fr0
889 stfd fr0,THREAD_FPSCR(r4)
890 /* Disable FP for last_task_used_math */
891 ld r5,PT_REGS(r4)
892 ld r4,_MSR-STACK_FRAME_OVERHEAD(r5)
893 li r6,MSR_FP|MSR_FE0|MSR_FE1
894 andc r4,r4,r6
895 std r4,_MSR-STACK_FRAME_OVERHEAD(r5)
8961:
897#endif /* CONFIG_SMP */
898 /* enable use of FP after return */
899 ld r4,PACACURRENT(r13)
900 addi r5,r4,THREAD /* Get THREAD */
901 ld r4,THREAD_FPEXC_MODE(r5)
902 ori r12,r12,MSR_FP
903 or r12,r12,r4
904 std r12,_MSR(r1)
905 lfd fr0,THREAD_FPSCR(r5)
906 mtfsf 0xff,fr0
907 REST_32FPRS(0, r5)
908#ifndef CONFIG_SMP
909 /* Update last_task_used_math to 'current' */
910 subi r4,r5,THREAD /* Back to 'current' */
911 std r4,0(r3)
912#endif /* CONFIG_SMP */
913 /* restore registers and return */
914 b fast_exception_return
915
909 .align 7 916 .align 7
910 .globl altivec_unavailable_common 917 .globl altivec_unavailable_common
911altivec_unavailable_common: 918altivec_unavailable_common:
@@ -921,6 +928,80 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
921 bl .altivec_unavailable_exception 928 bl .altivec_unavailable_exception
922 b .ret_from_except 929 b .ret_from_except
923 930
931#ifdef CONFIG_ALTIVEC
932/*
933 * load_up_altivec(unused, unused, tsk)
934 * Disable VMX for the task which had it previously,
935 * and save its vector registers in its thread_struct.
936 * Enables the VMX for use in the kernel on return.
937 * On SMP we know the VMX is free, since we give it up every
938 * switch (ie, no lazy save of the vector registers).
939 * On entry: r13 == 'current' && last_task_used_altivec != 'current'
940 */
941_STATIC(load_up_altivec)
942 mfmsr r5 /* grab the current MSR */
943 oris r5,r5,MSR_VEC@h
944 mtmsrd r5 /* enable use of VMX now */
945 isync
946
947/*
948 * For SMP, we don't do lazy VMX switching because it just gets too
949 * horrendously complex, especially when a task switches from one CPU
950 * to another. Instead we call giveup_altvec in switch_to.
951 * VRSAVE isn't dealt with here, that is done in the normal context
952 * switch code. Note that we could rely on vrsave value to eventually
953 * avoid saving all of the VREGs here...
954 */
955#ifndef CONFIG_SMP
956 ld r3,last_task_used_altivec@got(r2)
957 ld r4,0(r3)
958 cmpdi 0,r4,0
959 beq 1f
960 /* Save VMX state to last_task_used_altivec's THREAD struct */
961 addi r4,r4,THREAD
962 SAVE_32VRS(0,r5,r4)
963 mfvscr vr0
964 li r10,THREAD_VSCR
965 stvx vr0,r10,r4
966 /* Disable VMX for last_task_used_altivec */
967 ld r5,PT_REGS(r4)
968 ld r4,_MSR-STACK_FRAME_OVERHEAD(r5)
969 lis r6,MSR_VEC@h
970 andc r4,r4,r6
971 std r4,_MSR-STACK_FRAME_OVERHEAD(r5)
9721:
973#endif /* CONFIG_SMP */
974 /* Hack: if we get an altivec unavailable trap with VRSAVE
975 * set to all zeros, we assume this is a broken application
976 * that fails to set it properly, and thus we switch it to
977 * all 1's
978 */
979 mfspr r4,SPRN_VRSAVE
980 cmpdi 0,r4,0
981 bne+ 1f
982 li r4,-1
983 mtspr SPRN_VRSAVE,r4
9841:
985 /* enable use of VMX after return */
986 ld r4,PACACURRENT(r13)
987 addi r5,r4,THREAD /* Get THREAD */
988 oris r12,r12,MSR_VEC@h
989 std r12,_MSR(r1)
990 li r4,1
991 li r10,THREAD_VSCR
992 stw r4,THREAD_USED_VR(r5)
993 lvx vr0,r10,r5
994 mtvscr vr0
995 REST_32VRS(0,r4,r5)
996#ifndef CONFIG_SMP
997 /* Update last_task_used_math to 'current' */
998 subi r4,r5,THREAD /* Back to 'current' */
999 std r4,0(r3)
1000#endif /* CONFIG_SMP */
1001 /* restore registers and return */
1002 b fast_exception_return
1003#endif /* CONFIG_ALTIVEC */
1004
924/* 1005/*
925 * Hash table stuff 1006 * Hash table stuff
926 */ 1007 */
@@ -1167,6 +1248,42 @@ unrecov_slb:
1167 bl .unrecoverable_exception 1248 bl .unrecoverable_exception
1168 b 1b 1249 b 1b
1169 1250
1251/*
1252 * Space for CPU0's segment table.
1253 *
1254 * On iSeries, the hypervisor must fill in at least one entry before
1255 * we get control (with relocate on). The address is give to the hv
1256 * as a page number (see xLparMap in LparData.c), so this must be at a
1257 * fixed address (the linker can't compute (u64)&initial_stab >>
1258 * PAGE_SHIFT).
1259 */
1260 . = STAB0_PHYS_ADDR /* 0x6000 */
1261 .globl initial_stab
1262initial_stab:
1263 .space 4096
1264
1265/*
1266 * Data area reserved for FWNMI option.
1267 * This address (0x7000) is fixed by the RPA.
1268 */
1269 .= 0x7000
1270 .globl fwnmi_data_area
1271fwnmi_data_area:
1272
1273 /* iSeries does not use the FWNMI stuff, so it is safe to put
1274 * this here, even if we later allow kernels that will boot on
1275 * both pSeries and iSeries */
1276#ifdef CONFIG_PPC_ISERIES
1277 . = LPARMAP_PHYS
1278#include "lparmap.s"
1279/*
1280 * This ".text" is here for old compilers that generate a trailing
1281 * .note section when compiling .c files to .s
1282 */
1283 .text
1284#endif /* CONFIG_PPC_ISERIES */
1285
1286 . = 0x8000
1170 1287
1171/* 1288/*
1172 * On pSeries, secondary processors spin in the following code. 1289 * On pSeries, secondary processors spin in the following code.
@@ -1200,7 +1317,7 @@ _GLOBAL(pSeries_secondary_smp_init)
1200 b .kexec_wait /* next kernel might do better */ 1317 b .kexec_wait /* next kernel might do better */
1201 1318
12022: mtspr SPRG3,r13 /* Save vaddr of paca in SPRG3 */ 13192: mtspr SPRG3,r13 /* Save vaddr of paca in SPRG3 */
1203 /* From now on, r24 is expected to be logica cpuid */ 1320 /* From now on, r24 is expected to be logical cpuid */
1204 mr r24,r5 1321 mr r24,r5
12053: HMT_LOW 13223: HMT_LOW
1206 lbz r23,PACAPROCSTART(r13) /* Test if this processor should */ 1323 lbz r23,PACAPROCSTART(r13) /* Test if this processor should */
@@ -1213,10 +1330,8 @@ _GLOBAL(pSeries_secondary_smp_init)
1213 1330
1214 cmpwi 0,r23,0 1331 cmpwi 0,r23,0
1215#ifdef CONFIG_SMP 1332#ifdef CONFIG_SMP
1216#ifdef SECONDARY_PROCESSORS
1217 bne .__secondary_start 1333 bne .__secondary_start
1218#endif 1334#endif
1219#endif
1220 b 3b /* Loop until told to go */ 1335 b 3b /* Loop until told to go */
1221 1336
1222#ifdef CONFIG_PPC_ISERIES 1337#ifdef CONFIG_PPC_ISERIES
@@ -1430,228 +1545,6 @@ _GLOBAL(copy_and_flush)
1430.align 8 1545.align 8
1431copy_to_here: 1546copy_to_here:
1432 1547
1433/*
1434 * load_up_fpu(unused, unused, tsk)
1435 * Disable FP for the task which had the FPU previously,
1436 * and save its floating-point registers in its thread_struct.
1437 * Enables the FPU for use in the kernel on return.
1438 * On SMP we know the fpu is free, since we give it up every
1439 * switch (ie, no lazy save of the FP registers).
1440 * On entry: r13 == 'current' && last_task_used_math != 'current'
1441 */
1442_STATIC(load_up_fpu)
1443 mfmsr r5 /* grab the current MSR */
1444 ori r5,r5,MSR_FP
1445 mtmsrd r5 /* enable use of fpu now */
1446 isync
1447/*
1448 * For SMP, we don't do lazy FPU switching because it just gets too
1449 * horrendously complex, especially when a task switches from one CPU
1450 * to another. Instead we call giveup_fpu in switch_to.
1451 *
1452 */
1453#ifndef CONFIG_SMP
1454 ld r3,last_task_used_math@got(r2)
1455 ld r4,0(r3)
1456 cmpdi 0,r4,0
1457 beq 1f
1458 /* Save FP state to last_task_used_math's THREAD struct */
1459 addi r4,r4,THREAD
1460 SAVE_32FPRS(0, r4)
1461 mffs fr0
1462 stfd fr0,THREAD_FPSCR(r4)
1463 /* Disable FP for last_task_used_math */
1464 ld r5,PT_REGS(r4)
1465 ld r4,_MSR-STACK_FRAME_OVERHEAD(r5)
1466 li r6,MSR_FP|MSR_FE0|MSR_FE1
1467 andc r4,r4,r6
1468 std r4,_MSR-STACK_FRAME_OVERHEAD(r5)
14691:
1470#endif /* CONFIG_SMP */
1471 /* enable use of FP after return */
1472 ld r4,PACACURRENT(r13)
1473 addi r5,r4,THREAD /* Get THREAD */
1474 ld r4,THREAD_FPEXC_MODE(r5)
1475 ori r12,r12,MSR_FP
1476 or r12,r12,r4
1477 std r12,_MSR(r1)
1478 lfd fr0,THREAD_FPSCR(r5)
1479 mtfsf 0xff,fr0
1480 REST_32FPRS(0, r5)
1481#ifndef CONFIG_SMP
1482 /* Update last_task_used_math to 'current' */
1483 subi r4,r5,THREAD /* Back to 'current' */
1484 std r4,0(r3)
1485#endif /* CONFIG_SMP */
1486 /* restore registers and return */
1487 b fast_exception_return
1488
1489/*
1490 * disable_kernel_fp()
1491 * Disable the FPU.
1492 */
1493_GLOBAL(disable_kernel_fp)
1494 mfmsr r3
1495 rldicl r0,r3,(63-MSR_FP_LG),1
1496 rldicl r3,r0,(MSR_FP_LG+1),0
1497 mtmsrd r3 /* disable use of fpu now */
1498 isync
1499 blr
1500
1501/*
1502 * giveup_fpu(tsk)
1503 * Disable FP for the task given as the argument,
1504 * and save the floating-point registers in its thread_struct.
1505 * Enables the FPU for use in the kernel on return.
1506 */
1507_GLOBAL(giveup_fpu)
1508 mfmsr r5
1509 ori r5,r5,MSR_FP
1510 mtmsrd r5 /* enable use of fpu now */
1511 isync
1512 cmpdi 0,r3,0
1513 beqlr- /* if no previous owner, done */
1514 addi r3,r3,THREAD /* want THREAD of task */
1515 ld r5,PT_REGS(r3)
1516 cmpdi 0,r5,0
1517 SAVE_32FPRS(0, r3)
1518 mffs fr0
1519 stfd fr0,THREAD_FPSCR(r3)
1520 beq 1f
1521 ld r4,_MSR-STACK_FRAME_OVERHEAD(r5)
1522 li r3,MSR_FP|MSR_FE0|MSR_FE1
1523 andc r4,r4,r3 /* disable FP for previous task */
1524 std r4,_MSR-STACK_FRAME_OVERHEAD(r5)
15251:
1526#ifndef CONFIG_SMP
1527 li r5,0
1528 ld r4,last_task_used_math@got(r2)
1529 std r5,0(r4)
1530#endif /* CONFIG_SMP */
1531 blr
1532
1533
1534#ifdef CONFIG_ALTIVEC
1535
1536/*
1537 * load_up_altivec(unused, unused, tsk)
1538 * Disable VMX for the task which had it previously,
1539 * and save its vector registers in its thread_struct.
1540 * Enables the VMX for use in the kernel on return.
1541 * On SMP we know the VMX is free, since we give it up every
1542 * switch (ie, no lazy save of the vector registers).
1543 * On entry: r13 == 'current' && last_task_used_altivec != 'current'
1544 */
1545_STATIC(load_up_altivec)
1546 mfmsr r5 /* grab the current MSR */
1547 oris r5,r5,MSR_VEC@h
1548 mtmsrd r5 /* enable use of VMX now */
1549 isync
1550
1551/*
1552 * For SMP, we don't do lazy VMX switching because it just gets too
1553 * horrendously complex, especially when a task switches from one CPU
1554 * to another. Instead we call giveup_altvec in switch_to.
1555 * VRSAVE isn't dealt with here, that is done in the normal context
1556 * switch code. Note that we could rely on vrsave value to eventually
1557 * avoid saving all of the VREGs here...
1558 */
1559#ifndef CONFIG_SMP
1560 ld r3,last_task_used_altivec@got(r2)
1561 ld r4,0(r3)
1562 cmpdi 0,r4,0
1563 beq 1f
1564 /* Save VMX state to last_task_used_altivec's THREAD struct */
1565 addi r4,r4,THREAD
1566 SAVE_32VRS(0,r5,r4)
1567 mfvscr vr0
1568 li r10,THREAD_VSCR
1569 stvx vr0,r10,r4
1570 /* Disable VMX for last_task_used_altivec */
1571 ld r5,PT_REGS(r4)
1572 ld r4,_MSR-STACK_FRAME_OVERHEAD(r5)
1573 lis r6,MSR_VEC@h
1574 andc r4,r4,r6
1575 std r4,_MSR-STACK_FRAME_OVERHEAD(r5)
15761:
1577#endif /* CONFIG_SMP */
1578 /* Hack: if we get an altivec unavailable trap with VRSAVE
1579 * set to all zeros, we assume this is a broken application
1580 * that fails to set it properly, and thus we switch it to
1581 * all 1's
1582 */
1583 mfspr r4,SPRN_VRSAVE
1584 cmpdi 0,r4,0
1585 bne+ 1f
1586 li r4,-1
1587 mtspr SPRN_VRSAVE,r4
15881:
1589 /* enable use of VMX after return */
1590 ld r4,PACACURRENT(r13)
1591 addi r5,r4,THREAD /* Get THREAD */
1592 oris r12,r12,MSR_VEC@h
1593 std r12,_MSR(r1)
1594 li r4,1
1595 li r10,THREAD_VSCR
1596 stw r4,THREAD_USED_VR(r5)
1597 lvx vr0,r10,r5
1598 mtvscr vr0
1599 REST_32VRS(0,r4,r5)
1600#ifndef CONFIG_SMP
1601 /* Update last_task_used_math to 'current' */
1602 subi r4,r5,THREAD /* Back to 'current' */
1603 std r4,0(r3)
1604#endif /* CONFIG_SMP */
1605 /* restore registers and return */
1606 b fast_exception_return
1607
1608/*
1609 * disable_kernel_altivec()
1610 * Disable the VMX.
1611 */
1612_GLOBAL(disable_kernel_altivec)
1613 mfmsr r3
1614 rldicl r0,r3,(63-MSR_VEC_LG),1
1615 rldicl r3,r0,(MSR_VEC_LG+1),0
1616 mtmsrd r3 /* disable use of VMX now */
1617 isync
1618 blr
1619
1620/*
1621 * giveup_altivec(tsk)
1622 * Disable VMX for the task given as the argument,
1623 * and save the vector registers in its thread_struct.
1624 * Enables the VMX for use in the kernel on return.
1625 */
1626_GLOBAL(giveup_altivec)
1627 mfmsr r5
1628 oris r5,r5,MSR_VEC@h
1629 mtmsrd r5 /* enable use of VMX now */
1630 isync
1631 cmpdi 0,r3,0
1632 beqlr- /* if no previous owner, done */
1633 addi r3,r3,THREAD /* want THREAD of task */
1634 ld r5,PT_REGS(r3)
1635 cmpdi 0,r5,0
1636 SAVE_32VRS(0,r4,r3)
1637 mfvscr vr0
1638 li r4,THREAD_VSCR
1639 stvx vr0,r4,r3
1640 beq 1f
1641 ld r4,_MSR-STACK_FRAME_OVERHEAD(r5)
1642 lis r3,MSR_VEC@h
1643 andc r4,r4,r3 /* disable FP for previous task */
1644 std r4,_MSR-STACK_FRAME_OVERHEAD(r5)
16451:
1646#ifndef CONFIG_SMP
1647 li r5,0
1648 ld r4,last_task_used_altivec@got(r2)
1649 std r5,0(r4)
1650#endif /* CONFIG_SMP */
1651 blr
1652
1653#endif /* CONFIG_ALTIVEC */
1654
1655#ifdef CONFIG_SMP 1548#ifdef CONFIG_SMP
1656#ifdef CONFIG_PPC_PMAC 1549#ifdef CONFIG_PPC_PMAC
1657/* 1550/*
@@ -1753,8 +1646,9 @@ _GLOBAL(__secondary_start)
1753#else 1646#else
1754 /* set the ASR */ 1647 /* set the ASR */
1755 ld r3,systemcfg@got(r2) /* r3 = ptr to systemcfg */ 1648 ld r3,systemcfg@got(r2) /* r3 = ptr to systemcfg */
1649 ld r3,0(r3)
1756 lwz r3,PLATFORM(r3) /* r3 = platform flags */ 1650 lwz r3,PLATFORM(r3) /* r3 = platform flags */
1757 cmpldi r3,PLATFORM_PSERIES_LPAR 1651 andi. r3,r3,PLATFORM_LPAR /* Test if bit 0 is set (LPAR bit) */
1758 bne 98f 1652 bne 98f
1759 mfspr r3,PVR 1653 mfspr r3,PVR
1760 srwi r3,r3,16 1654 srwi r3,r3,16
@@ -1916,8 +1810,9 @@ _STATIC(start_here_multiplatform)
1916 ld r3,PACASTABREAL(r13) 1810 ld r3,PACASTABREAL(r13)
1917 ori r4,r3,1 /* turn on valid bit */ 1811 ori r4,r3,1 /* turn on valid bit */
1918 ld r3,systemcfg@got(r2) /* r3 = ptr to systemcfg */ 1812 ld r3,systemcfg@got(r2) /* r3 = ptr to systemcfg */
1813 ld r3,0(r3)
1919 lwz r3,PLATFORM(r3) /* r3 = platform flags */ 1814 lwz r3,PLATFORM(r3) /* r3 = platform flags */
1920 cmpldi r3,PLATFORM_PSERIES_LPAR 1815 andi. r3,r3,PLATFORM_LPAR /* Test if bit 0 is set (LPAR bit) */
1921 bne 98f 1816 bne 98f
1922 mfspr r3,PVR 1817 mfspr r3,PVR
1923 srwi r3,r3,16 1818 srwi r3,r3,16
@@ -1935,9 +1830,10 @@ _STATIC(start_here_multiplatform)
193599: 183099:
1936 /* Set SDR1 (hash table pointer) */ 1831 /* Set SDR1 (hash table pointer) */
1937 ld r3,systemcfg@got(r2) /* r3 = ptr to systemcfg */ 1832 ld r3,systemcfg@got(r2) /* r3 = ptr to systemcfg */
1833 ld r3,0(r3)
1938 lwz r3,PLATFORM(r3) /* r3 = platform flags */ 1834 lwz r3,PLATFORM(r3) /* r3 = platform flags */
1939 /* Test if bit 0 is set (LPAR bit) */ 1835 /* Test if bit 0 is set (LPAR bit) */
1940 andi. r3,r3,0x1 1836 andi. r3,r3,PLATFORM_LPAR
1941 bne 98f 1837 bne 98f
1942 LOADADDR(r6,_SDR1) /* Only if NOT LPAR */ 1838 LOADADDR(r6,_SDR1) /* Only if NOT LPAR */
1943 sub r6,r6,r26 1839 sub r6,r6,r26
@@ -2002,9 +1898,6 @@ _STATIC(start_here_common)
2002 1898
2003 bl .start_kernel 1899 bl .start_kernel
2004 1900
2005_GLOBAL(__setup_cpu_power3)
2006 blr
2007
2008_GLOBAL(hmt_init) 1901_GLOBAL(hmt_init)
2009#ifdef CONFIG_HMT 1902#ifdef CONFIG_HMT
2010 LOADADDR(r5, hmt_thread_data) 1903 LOADADDR(r5, hmt_thread_data)
@@ -2095,20 +1988,19 @@ _GLOBAL(smp_release_cpus)
2095 1988
2096/* 1989/*
2097 * We put a few things here that have to be page-aligned. 1990 * We put a few things here that have to be page-aligned.
2098 * This stuff goes at the beginning of the data segment, 1991 * This stuff goes at the beginning of the bss, which is page-aligned.
2099 * which is page-aligned.
2100 */ 1992 */
2101 .data 1993 .section ".bss"
1994
2102 .align 12 1995 .align 12
2103 .globl sdata 1996
2104sdata:
2105 .globl empty_zero_page 1997 .globl empty_zero_page
2106empty_zero_page: 1998empty_zero_page:
2107 .space 4096 1999 .space PAGE_SIZE
2108 2000
2109 .globl swapper_pg_dir 2001 .globl swapper_pg_dir
2110swapper_pg_dir: 2002swapper_pg_dir:
2111 .space 4096 2003 .space PAGE_SIZE
2112 2004
2113/* 2005/*
2114 * This space gets a copy of optional info passed to us by the bootstrap 2006 * This space gets a copy of optional info passed to us by the bootstrap
diff --git a/arch/ppc64/kernel/iSeries_VpdInfo.c b/arch/ppc64/kernel/iSeries_VpdInfo.c
index d11c732daf81..5d921792571f 100644
--- a/arch/ppc64/kernel/iSeries_VpdInfo.c
+++ b/arch/ppc64/kernel/iSeries_VpdInfo.c
@@ -264,8 +264,5 @@ void __init iSeries_Device_Information(struct pci_dev *PciDev, int count)
264 printk("%d. PCI: Bus%3d, Device%3d, Vendor %04X Frame%3d, Card %4s ", 264 printk("%d. PCI: Bus%3d, Device%3d, Vendor %04X Frame%3d, Card %4s ",
265 count, bus, PCI_SLOT(PciDev->devfn), PciDev->vendor, 265 count, bus, PCI_SLOT(PciDev->devfn), PciDev->vendor,
266 frame, card); 266 frame, card);
267 if (pci_class_name(PciDev->class >> 8) == 0) 267 printk("0x%04X\n", (int)(PciDev->class >> 8));
268 printk("0x%04X\n", (int)(PciDev->class >> 8));
269 else
270 printk("%s\n", pci_class_name(PciDev->class >> 8));
271} 268}
diff --git a/arch/ppc64/kernel/iSeries_htab.c b/arch/ppc64/kernel/iSeries_htab.c
index b0250ae4a72a..2192055a90a0 100644
--- a/arch/ppc64/kernel/iSeries_htab.c
+++ b/arch/ppc64/kernel/iSeries_htab.c
@@ -41,6 +41,7 @@ static long iSeries_hpte_insert(unsigned long hpte_group, unsigned long va,
41 unsigned long prpn, unsigned long vflags, 41 unsigned long prpn, unsigned long vflags,
42 unsigned long rflags) 42 unsigned long rflags)
43{ 43{
44 unsigned long arpn;
44 long slot; 45 long slot;
45 hpte_t lhpte; 46 hpte_t lhpte;
46 int secondary = 0; 47 int secondary = 0;
@@ -70,8 +71,10 @@ static long iSeries_hpte_insert(unsigned long hpte_group, unsigned long va,
70 slot &= 0x7fffffffffffffff; 71 slot &= 0x7fffffffffffffff;
71 } 72 }
72 73
74 arpn = phys_to_abs(prpn << PAGE_SHIFT) >> PAGE_SHIFT;
75
73 lhpte.v = (va >> 23) << HPTE_V_AVPN_SHIFT | vflags | HPTE_V_VALID; 76 lhpte.v = (va >> 23) << HPTE_V_AVPN_SHIFT | vflags | HPTE_V_VALID;
74 lhpte.r = (physRpn_to_absRpn(prpn) << HPTE_R_RPN_SHIFT) | rflags; 77 lhpte.r = (arpn << HPTE_R_RPN_SHIFT) | rflags;
75 78
76 /* Now fill in the actual HPTE */ 79 /* Now fill in the actual HPTE */
77 HvCallHpt_addValidate(slot, secondary, &lhpte); 80 HvCallHpt_addValidate(slot, secondary, &lhpte);
diff --git a/arch/ppc64/kernel/iSeries_pci.c b/arch/ppc64/kernel/iSeries_pci.c
index 356e4fd9a94f..fbc273c32bcc 100644
--- a/arch/ppc64/kernel/iSeries_pci.c
+++ b/arch/ppc64/kernel/iSeries_pci.c
@@ -252,7 +252,7 @@ unsigned long __init find_and_init_phbs(void)
252 phb = (struct pci_controller *)kmalloc(sizeof(struct pci_controller), GFP_KERNEL); 252 phb = (struct pci_controller *)kmalloc(sizeof(struct pci_controller), GFP_KERNEL);
253 if (phb == NULL) 253 if (phb == NULL)
254 return -ENOMEM; 254 return -ENOMEM;
255 pci_setup_pci_controller(phb); 255 pci_setup_pci_controller(phb);
256 256
257 phb->pci_mem_offset = phb->local_number = bus; 257 phb->pci_mem_offset = phb->local_number = bus;
258 phb->first_busno = bus; 258 phb->first_busno = bus;
diff --git a/arch/ppc64/kernel/iSeries_setup.c b/arch/ppc64/kernel/iSeries_setup.c
index a649edbb23b6..3ffefbbc6623 100644
--- a/arch/ppc64/kernel/iSeries_setup.c
+++ b/arch/ppc64/kernel/iSeries_setup.c
@@ -39,6 +39,7 @@
39#include <asm/cputable.h> 39#include <asm/cputable.h>
40#include <asm/sections.h> 40#include <asm/sections.h>
41#include <asm/iommu.h> 41#include <asm/iommu.h>
42#include <asm/firmware.h>
42 43
43#include <asm/time.h> 44#include <asm/time.h>
44#include "iSeries_setup.h" 45#include "iSeries_setup.h"
@@ -314,6 +315,8 @@ static void __init iSeries_init_early(void)
314 315
315 DBG(" -> iSeries_init_early()\n"); 316 DBG(" -> iSeries_init_early()\n");
316 317
318 ppc64_firmware_features = FW_FEATURE_ISERIES;
319
317 ppcdbg_initialize(); 320 ppcdbg_initialize();
318 321
319#if defined(CONFIG_BLK_DEV_INITRD) 322#if defined(CONFIG_BLK_DEV_INITRD)
@@ -412,6 +415,22 @@ static void __init iSeries_init_early(void)
412 DBG(" <- iSeries_init_early()\n"); 415 DBG(" <- iSeries_init_early()\n");
413} 416}
414 417
418struct mschunks_map mschunks_map = {
419 /* XXX We don't use these, but Piranha might need them. */
420 .chunk_size = MSCHUNKS_CHUNK_SIZE,
421 .chunk_shift = MSCHUNKS_CHUNK_SHIFT,
422 .chunk_mask = MSCHUNKS_OFFSET_MASK,
423};
424EXPORT_SYMBOL(mschunks_map);
425
426void mschunks_alloc(unsigned long num_chunks)
427{
428 klimit = _ALIGN(klimit, sizeof(u32));
429 mschunks_map.mapping = (u32 *)klimit;
430 klimit += num_chunks * sizeof(u32);
431 mschunks_map.num_chunks = num_chunks;
432}
433
415/* 434/*
416 * The iSeries may have very large memories ( > 128 GB ) and a partition 435 * The iSeries may have very large memories ( > 128 GB ) and a partition
417 * may get memory in "chunks" that may be anywhere in the 2**52 real 436 * may get memory in "chunks" that may be anywhere in the 2**52 real
@@ -449,7 +468,7 @@ static void __init build_iSeries_Memory_Map(void)
449 468
450 /* Chunk size on iSeries is 256K bytes */ 469 /* Chunk size on iSeries is 256K bytes */
451 totalChunks = (u32)HvLpConfig_getMsChunks(); 470 totalChunks = (u32)HvLpConfig_getMsChunks();
452 klimit = msChunks_alloc(klimit, totalChunks, 1UL << 18); 471 mschunks_alloc(totalChunks);
453 472
454 /* 473 /*
455 * Get absolute address of our load area 474 * Get absolute address of our load area
@@ -486,7 +505,7 @@ static void __init build_iSeries_Memory_Map(void)
486 printk("Load area size %dK\n", loadAreaSize * 256); 505 printk("Load area size %dK\n", loadAreaSize * 256);
487 506
488 for (nextPhysChunk = 0; nextPhysChunk < loadAreaSize; ++nextPhysChunk) 507 for (nextPhysChunk = 0; nextPhysChunk < loadAreaSize; ++nextPhysChunk)
489 msChunks.abs[nextPhysChunk] = 508 mschunks_map.mapping[nextPhysChunk] =
490 loadAreaFirstChunk + nextPhysChunk; 509 loadAreaFirstChunk + nextPhysChunk;
491 510
492 /* 511 /*
@@ -495,7 +514,7 @@ static void __init build_iSeries_Memory_Map(void)
495 */ 514 */
496 hptFirstChunk = (u32)addr_to_chunk(HvCallHpt_getHptAddress()); 515 hptFirstChunk = (u32)addr_to_chunk(HvCallHpt_getHptAddress());
497 hptSizePages = (u32)HvCallHpt_getHptPages(); 516 hptSizePages = (u32)HvCallHpt_getHptPages();
498 hptSizeChunks = hptSizePages >> (msChunks.chunk_shift - PAGE_SHIFT); 517 hptSizeChunks = hptSizePages >> (MSCHUNKS_CHUNK_SHIFT - PAGE_SHIFT);
499 hptLastChunk = hptFirstChunk + hptSizeChunks - 1; 518 hptLastChunk = hptFirstChunk + hptSizeChunks - 1;
500 519
501 printk("HPT absolute addr = %016lx, size = %dK\n", 520 printk("HPT absolute addr = %016lx, size = %dK\n",
@@ -552,7 +571,8 @@ static void __init build_iSeries_Memory_Map(void)
552 (absChunk > hptLastChunk)) && 571 (absChunk > hptLastChunk)) &&
553 ((absChunk < loadAreaFirstChunk) || 572 ((absChunk < loadAreaFirstChunk) ||
554 (absChunk > loadAreaLastChunk))) { 573 (absChunk > loadAreaLastChunk))) {
555 msChunks.abs[nextPhysChunk] = absChunk; 574 mschunks_map.mapping[nextPhysChunk] =
575 absChunk;
556 ++nextPhysChunk; 576 ++nextPhysChunk;
557 } 577 }
558 } 578 }
@@ -944,6 +964,8 @@ void __init iSeries_early_setup(void)
944 ppc_md.calibrate_decr = iSeries_calibrate_decr; 964 ppc_md.calibrate_decr = iSeries_calibrate_decr;
945 ppc_md.progress = iSeries_progress; 965 ppc_md.progress = iSeries_progress;
946 966
967 /* XXX Implement enable_pmcs for iSeries */
968
947 if (get_paca()->lppaca.shared_proc) { 969 if (get_paca()->lppaca.shared_proc) {
948 ppc_md.idle_loop = iseries_shared_idle; 970 ppc_md.idle_loop = iseries_shared_idle;
949 printk(KERN_INFO "Using shared processor idle loop\n"); 971 printk(KERN_INFO "Using shared processor idle loop\n");
diff --git a/arch/ppc64/kernel/iSeries_vio.c b/arch/ppc64/kernel/iSeries_vio.c
new file mode 100644
index 000000000000..6b754b0c8344
--- /dev/null
+++ b/arch/ppc64/kernel/iSeries_vio.c
@@ -0,0 +1,155 @@
1/*
2 * IBM PowerPC iSeries Virtual I/O Infrastructure Support.
3 *
4 * Copyright (c) 2005 Stephen Rothwell, IBM Corp.
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11#include <linux/types.h>
12#include <linux/device.h>
13#include <linux/init.h>
14
15#include <asm/vio.h>
16#include <asm/iommu.h>
17#include <asm/abs_addr.h>
18#include <asm/page.h>
19#include <asm/iSeries/vio.h>
20#include <asm/iSeries/HvTypes.h>
21#include <asm/iSeries/HvLpConfig.h>
22#include <asm/iSeries/HvCallXm.h>
23
24struct device *iSeries_vio_dev = &vio_bus_device.dev;
25EXPORT_SYMBOL(iSeries_vio_dev);
26
27static struct iommu_table veth_iommu_table;
28static struct iommu_table vio_iommu_table;
29
30static void __init iommu_vio_init(void)
31{
32 struct iommu_table *t;
33 struct iommu_table_cb cb;
34 unsigned long cbp;
35 unsigned long itc_entries;
36
37 cb.itc_busno = 255; /* Bus 255 is the virtual bus */
38 cb.itc_virtbus = 0xff; /* Ask for virtual bus */
39
40 cbp = virt_to_abs(&cb);
41 HvCallXm_getTceTableParms(cbp);
42
43 itc_entries = cb.itc_size * PAGE_SIZE / sizeof(union tce_entry);
44 veth_iommu_table.it_size = itc_entries / 2;
45 veth_iommu_table.it_busno = cb.itc_busno;
46 veth_iommu_table.it_offset = cb.itc_offset;
47 veth_iommu_table.it_index = cb.itc_index;
48 veth_iommu_table.it_type = TCE_VB;
49 veth_iommu_table.it_blocksize = 1;
50
51 t = iommu_init_table(&veth_iommu_table);
52
53 if (!t)
54 printk("Virtual Bus VETH TCE table failed.\n");
55
56 vio_iommu_table.it_size = itc_entries - veth_iommu_table.it_size;
57 vio_iommu_table.it_busno = cb.itc_busno;
58 vio_iommu_table.it_offset = cb.itc_offset +
59 veth_iommu_table.it_size;
60 vio_iommu_table.it_index = cb.itc_index;
61 vio_iommu_table.it_type = TCE_VB;
62 vio_iommu_table.it_blocksize = 1;
63
64 t = iommu_init_table(&vio_iommu_table);
65
66 if (!t)
67 printk("Virtual Bus VIO TCE table failed.\n");
68}
69
70/**
71 * vio_register_device_iseries: - Register a new iSeries vio device.
72 * @voidev: The device to register.
73 */
74static struct vio_dev *__init vio_register_device_iseries(char *type,
75 uint32_t unit_num)
76{
77 struct vio_dev *viodev;
78
79 /* allocate a vio_dev for this device */
80 viodev = kmalloc(sizeof(struct vio_dev), GFP_KERNEL);
81 if (!viodev)
82 return NULL;
83 memset(viodev, 0, sizeof(struct vio_dev));
84
85 snprintf(viodev->dev.bus_id, BUS_ID_SIZE, "%s%d", type, unit_num);
86
87 viodev->name = viodev->dev.bus_id;
88 viodev->type = type;
89 viodev->unit_address = unit_num;
90 viodev->iommu_table = &vio_iommu_table;
91 if (vio_register_device(viodev) == NULL) {
92 kfree(viodev);
93 return NULL;
94 }
95 return viodev;
96}
97
98void __init probe_bus_iseries(void)
99{
100 HvLpIndexMap vlan_map;
101 struct vio_dev *viodev;
102 int i;
103
104 /* there is only one of each of these */
105 vio_register_device_iseries("viocons", 0);
106 vio_register_device_iseries("vscsi", 0);
107
108 vlan_map = HvLpConfig_getVirtualLanIndexMap();
109 for (i = 0; i < HVMAXARCHITECTEDVIRTUALLANS; i++) {
110 if ((vlan_map & (0x8000 >> i)) == 0)
111 continue;
112 viodev = vio_register_device_iseries("vlan", i);
113 /* veth is special and has it own iommu_table */
114 viodev->iommu_table = &veth_iommu_table;
115 }
116 for (i = 0; i < HVMAXARCHITECTEDVIRTUALDISKS; i++)
117 vio_register_device_iseries("viodasd", i);
118 for (i = 0; i < HVMAXARCHITECTEDVIRTUALCDROMS; i++)
119 vio_register_device_iseries("viocd", i);
120 for (i = 0; i < HVMAXARCHITECTEDVIRTUALTAPES; i++)
121 vio_register_device_iseries("viotape", i);
122}
123
124/**
125 * vio_match_device_iseries: - Tell if a iSeries VIO device matches a
126 * vio_device_id
127 */
128static int vio_match_device_iseries(const struct vio_device_id *id,
129 const struct vio_dev *dev)
130{
131 return strncmp(dev->type, id->type, strlen(id->type)) == 0;
132}
133
134static struct vio_bus_ops vio_bus_ops_iseries = {
135 .match = vio_match_device_iseries,
136};
137
138/**
139 * vio_bus_init_iseries: - Initialize the iSeries virtual IO bus
140 */
141static int __init vio_bus_init_iseries(void)
142{
143 int err;
144
145 err = vio_bus_init(&vio_bus_ops_iseries);
146 if (err == 0) {
147 iommu_vio_init();
148 vio_bus_device.iommu_table = &vio_iommu_table;
149 iSeries_vio_dev = &vio_bus_device.dev;
150 probe_bus_iseries();
151 }
152 return err;
153}
154
155__initcall(vio_bus_init_iseries);
diff --git a/arch/ppc64/kernel/idle_power4.S b/arch/ppc64/kernel/idle_power4.S
index 97e4a2655040..ca02afe2a795 100644
--- a/arch/ppc64/kernel/idle_power4.S
+++ b/arch/ppc64/kernel/idle_power4.S
@@ -20,7 +20,7 @@
20#include <asm/cputable.h> 20#include <asm/cputable.h>
21#include <asm/thread_info.h> 21#include <asm/thread_info.h>
22#include <asm/ppc_asm.h> 22#include <asm/ppc_asm.h>
23#include <asm/offsets.h> 23#include <asm/asm-offsets.h>
24 24
25#undef DEBUG 25#undef DEBUG
26 26
diff --git a/arch/ppc64/kernel/iomap.c b/arch/ppc64/kernel/iomap.c
index 153cc8b0f136..6160c8dbb7c5 100644
--- a/arch/ppc64/kernel/iomap.c
+++ b/arch/ppc64/kernel/iomap.c
@@ -22,13 +22,23 @@ unsigned int fastcall ioread16(void __iomem *addr)
22{ 22{
23 return readw(addr); 23 return readw(addr);
24} 24}
25unsigned int fastcall ioread16be(void __iomem *addr)
26{
27 return in_be16(addr);
28}
25unsigned int fastcall ioread32(void __iomem *addr) 29unsigned int fastcall ioread32(void __iomem *addr)
26{ 30{
27 return readl(addr); 31 return readl(addr);
28} 32}
33unsigned int fastcall ioread32be(void __iomem *addr)
34{
35 return in_be32(addr);
36}
29EXPORT_SYMBOL(ioread8); 37EXPORT_SYMBOL(ioread8);
30EXPORT_SYMBOL(ioread16); 38EXPORT_SYMBOL(ioread16);
39EXPORT_SYMBOL(ioread16be);
31EXPORT_SYMBOL(ioread32); 40EXPORT_SYMBOL(ioread32);
41EXPORT_SYMBOL(ioread32be);
32 42
33void fastcall iowrite8(u8 val, void __iomem *addr) 43void fastcall iowrite8(u8 val, void __iomem *addr)
34{ 44{
@@ -38,13 +48,23 @@ void fastcall iowrite16(u16 val, void __iomem *addr)
38{ 48{
39 writew(val, addr); 49 writew(val, addr);
40} 50}
51void fastcall iowrite16be(u16 val, void __iomem *addr)
52{
53 out_be16(addr, val);
54}
41void fastcall iowrite32(u32 val, void __iomem *addr) 55void fastcall iowrite32(u32 val, void __iomem *addr)
42{ 56{
43 writel(val, addr); 57 writel(val, addr);
44} 58}
59void fastcall iowrite32be(u32 val, void __iomem *addr)
60{
61 out_be32(addr, val);
62}
45EXPORT_SYMBOL(iowrite8); 63EXPORT_SYMBOL(iowrite8);
46EXPORT_SYMBOL(iowrite16); 64EXPORT_SYMBOL(iowrite16);
65EXPORT_SYMBOL(iowrite16be);
47EXPORT_SYMBOL(iowrite32); 66EXPORT_SYMBOL(iowrite32);
67EXPORT_SYMBOL(iowrite32be);
48 68
49/* 69/*
50 * These are the "repeat read/write" functions. Note the 70 * These are the "repeat read/write" functions. Note the
@@ -56,15 +76,15 @@ EXPORT_SYMBOL(iowrite32);
56 */ 76 */
57void ioread8_rep(void __iomem *addr, void *dst, unsigned long count) 77void ioread8_rep(void __iomem *addr, void *dst, unsigned long count)
58{ 78{
59 _insb((u8 __force *) addr, dst, count); 79 _insb((u8 __iomem *) addr, dst, count);
60} 80}
61void ioread16_rep(void __iomem *addr, void *dst, unsigned long count) 81void ioread16_rep(void __iomem *addr, void *dst, unsigned long count)
62{ 82{
63 _insw_ns((u16 __force *) addr, dst, count); 83 _insw_ns((u16 __iomem *) addr, dst, count);
64} 84}
65void ioread32_rep(void __iomem *addr, void *dst, unsigned long count) 85void ioread32_rep(void __iomem *addr, void *dst, unsigned long count)
66{ 86{
67 _insl_ns((u32 __force *) addr, dst, count); 87 _insl_ns((u32 __iomem *) addr, dst, count);
68} 88}
69EXPORT_SYMBOL(ioread8_rep); 89EXPORT_SYMBOL(ioread8_rep);
70EXPORT_SYMBOL(ioread16_rep); 90EXPORT_SYMBOL(ioread16_rep);
@@ -72,15 +92,15 @@ EXPORT_SYMBOL(ioread32_rep);
72 92
73void iowrite8_rep(void __iomem *addr, const void *src, unsigned long count) 93void iowrite8_rep(void __iomem *addr, const void *src, unsigned long count)
74{ 94{
75 _outsb((u8 __force *) addr, src, count); 95 _outsb((u8 __iomem *) addr, src, count);
76} 96}
77void iowrite16_rep(void __iomem *addr, const void *src, unsigned long count) 97void iowrite16_rep(void __iomem *addr, const void *src, unsigned long count)
78{ 98{
79 _outsw_ns((u16 __force *) addr, src, count); 99 _outsw_ns((u16 __iomem *) addr, src, count);
80} 100}
81void iowrite32_rep(void __iomem *addr, const void *src, unsigned long count) 101void iowrite32_rep(void __iomem *addr, const void *src, unsigned long count)
82{ 102{
83 _outsl_ns((u32 __force *) addr, src, count); 103 _outsl_ns((u32 __iomem *) addr, src, count);
84} 104}
85EXPORT_SYMBOL(iowrite8_rep); 105EXPORT_SYMBOL(iowrite8_rep);
86EXPORT_SYMBOL(iowrite16_rep); 106EXPORT_SYMBOL(iowrite16_rep);
diff --git a/arch/ppc64/kernel/iommu.c b/arch/ppc64/kernel/iommu.c
index 845eebd1e28d..9032b6bfe036 100644
--- a/arch/ppc64/kernel/iommu.c
+++ b/arch/ppc64/kernel/iommu.c
@@ -438,7 +438,8 @@ struct iommu_table *iommu_init_table(struct iommu_table *tbl)
438 438
439void iommu_free_table(struct device_node *dn) 439void iommu_free_table(struct device_node *dn)
440{ 440{
441 struct iommu_table *tbl = dn->iommu_table; 441 struct pci_dn *pdn = dn->data;
442 struct iommu_table *tbl = pdn->iommu_table;
442 unsigned long bitmap_sz, i; 443 unsigned long bitmap_sz, i;
443 unsigned int order; 444 unsigned int order;
444 445
diff --git a/arch/ppc64/kernel/kprobes.c b/arch/ppc64/kernel/kprobes.c
index a3d519518fb8..7e80d49c589a 100644
--- a/arch/ppc64/kernel/kprobes.c
+++ b/arch/ppc64/kernel/kprobes.c
@@ -44,7 +44,7 @@ static struct kprobe *kprobe_prev;
44static unsigned long kprobe_status_prev, kprobe_saved_msr_prev; 44static unsigned long kprobe_status_prev, kprobe_saved_msr_prev;
45static struct pt_regs jprobe_saved_regs; 45static struct pt_regs jprobe_saved_regs;
46 46
47int arch_prepare_kprobe(struct kprobe *p) 47int __kprobes arch_prepare_kprobe(struct kprobe *p)
48{ 48{
49 int ret = 0; 49 int ret = 0;
50 kprobe_opcode_t insn = *p->addr; 50 kprobe_opcode_t insn = *p->addr;
@@ -68,27 +68,27 @@ int arch_prepare_kprobe(struct kprobe *p)
68 return ret; 68 return ret;
69} 69}
70 70
71void arch_copy_kprobe(struct kprobe *p) 71void __kprobes arch_copy_kprobe(struct kprobe *p)
72{ 72{
73 memcpy(p->ainsn.insn, p->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t)); 73 memcpy(p->ainsn.insn, p->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
74 p->opcode = *p->addr; 74 p->opcode = *p->addr;
75} 75}
76 76
77void arch_arm_kprobe(struct kprobe *p) 77void __kprobes arch_arm_kprobe(struct kprobe *p)
78{ 78{
79 *p->addr = BREAKPOINT_INSTRUCTION; 79 *p->addr = BREAKPOINT_INSTRUCTION;
80 flush_icache_range((unsigned long) p->addr, 80 flush_icache_range((unsigned long) p->addr,
81 (unsigned long) p->addr + sizeof(kprobe_opcode_t)); 81 (unsigned long) p->addr + sizeof(kprobe_opcode_t));
82} 82}
83 83
84void arch_disarm_kprobe(struct kprobe *p) 84void __kprobes arch_disarm_kprobe(struct kprobe *p)
85{ 85{
86 *p->addr = p->opcode; 86 *p->addr = p->opcode;
87 flush_icache_range((unsigned long) p->addr, 87 flush_icache_range((unsigned long) p->addr,
88 (unsigned long) p->addr + sizeof(kprobe_opcode_t)); 88 (unsigned long) p->addr + sizeof(kprobe_opcode_t));
89} 89}
90 90
91void arch_remove_kprobe(struct kprobe *p) 91void __kprobes arch_remove_kprobe(struct kprobe *p)
92{ 92{
93 up(&kprobe_mutex); 93 up(&kprobe_mutex);
94 free_insn_slot(p->ainsn.insn); 94 free_insn_slot(p->ainsn.insn);
@@ -102,7 +102,7 @@ static inline void prepare_singlestep(struct kprobe *p, struct pt_regs *regs)
102 regs->msr |= MSR_SE; 102 regs->msr |= MSR_SE;
103 103
104 /* single step inline if it is a trap variant */ 104 /* single step inline if it is a trap variant */
105 if (IS_TW(insn) || IS_TD(insn) || IS_TWI(insn) || IS_TDI(insn)) 105 if (is_trap(insn))
106 regs->nip = (unsigned long)p->addr; 106 regs->nip = (unsigned long)p->addr;
107 else 107 else
108 regs->nip = (unsigned long)p->ainsn.insn; 108 regs->nip = (unsigned long)p->ainsn.insn;
@@ -122,7 +122,8 @@ static inline void restore_previous_kprobe(void)
122 kprobe_saved_msr = kprobe_saved_msr_prev; 122 kprobe_saved_msr = kprobe_saved_msr_prev;
123} 123}
124 124
125void arch_prepare_kretprobe(struct kretprobe *rp, struct pt_regs *regs) 125void __kprobes arch_prepare_kretprobe(struct kretprobe *rp,
126 struct pt_regs *regs)
126{ 127{
127 struct kretprobe_instance *ri; 128 struct kretprobe_instance *ri;
128 129
@@ -151,7 +152,9 @@ static inline int kprobe_handler(struct pt_regs *regs)
151 Disarm the probe we just hit, and ignore it. */ 152 Disarm the probe we just hit, and ignore it. */
152 p = get_kprobe(addr); 153 p = get_kprobe(addr);
153 if (p) { 154 if (p) {
154 if (kprobe_status == KPROBE_HIT_SS) { 155 kprobe_opcode_t insn = *p->ainsn.insn;
156 if (kprobe_status == KPROBE_HIT_SS &&
157 is_trap(insn)) {
155 regs->msr &= ~MSR_SE; 158 regs->msr &= ~MSR_SE;
156 regs->msr |= kprobe_saved_msr; 159 regs->msr |= kprobe_saved_msr;
157 unlock_kprobes(); 160 unlock_kprobes();
@@ -191,8 +194,7 @@ static inline int kprobe_handler(struct pt_regs *regs)
191 * trap variant, it could belong to someone else 194 * trap variant, it could belong to someone else
192 */ 195 */
193 kprobe_opcode_t cur_insn = *addr; 196 kprobe_opcode_t cur_insn = *addr;
194 if (IS_TW(cur_insn) || IS_TD(cur_insn) || 197 if (is_trap(cur_insn))
195 IS_TWI(cur_insn) || IS_TDI(cur_insn))
196 goto no_kprobe; 198 goto no_kprobe;
197 /* 199 /*
198 * The breakpoint instruction was removed right 200 * The breakpoint instruction was removed right
@@ -244,7 +246,7 @@ void kretprobe_trampoline_holder(void)
244/* 246/*
245 * Called when the probe at kretprobe trampoline is hit 247 * Called when the probe at kretprobe trampoline is hit
246 */ 248 */
247int trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs) 249int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
248{ 250{
249 struct kretprobe_instance *ri = NULL; 251 struct kretprobe_instance *ri = NULL;
250 struct hlist_head *head; 252 struct hlist_head *head;
@@ -308,7 +310,7 @@ int trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
308 * single-stepped a copy of the instruction. The address of this 310 * single-stepped a copy of the instruction. The address of this
309 * copy is p->ainsn.insn. 311 * copy is p->ainsn.insn.
310 */ 312 */
311static void resume_execution(struct kprobe *p, struct pt_regs *regs) 313static void __kprobes resume_execution(struct kprobe *p, struct pt_regs *regs)
312{ 314{
313 int ret; 315 int ret;
314 unsigned int insn = *p->ainsn.insn; 316 unsigned int insn = *p->ainsn.insn;
@@ -373,8 +375,8 @@ static inline int kprobe_fault_handler(struct pt_regs *regs, int trapnr)
373/* 375/*
374 * Wrapper routine to for handling exceptions. 376 * Wrapper routine to for handling exceptions.
375 */ 377 */
376int kprobe_exceptions_notify(struct notifier_block *self, unsigned long val, 378int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
377 void *data) 379 unsigned long val, void *data)
378{ 380{
379 struct die_args *args = (struct die_args *)data; 381 struct die_args *args = (struct die_args *)data;
380 int ret = NOTIFY_DONE; 382 int ret = NOTIFY_DONE;
@@ -402,11 +404,11 @@ int kprobe_exceptions_notify(struct notifier_block *self, unsigned long val,
402 default: 404 default:
403 break; 405 break;
404 } 406 }
405 preempt_enable(); 407 preempt_enable_no_resched();
406 return ret; 408 return ret;
407} 409}
408 410
409int setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs) 411int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
410{ 412{
411 struct jprobe *jp = container_of(p, struct jprobe, kp); 413 struct jprobe *jp = container_of(p, struct jprobe, kp);
412 414
@@ -419,16 +421,16 @@ int setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
419 return 1; 421 return 1;
420} 422}
421 423
422void jprobe_return(void) 424void __kprobes jprobe_return(void)
423{ 425{
424 asm volatile("trap" ::: "memory"); 426 asm volatile("trap" ::: "memory");
425} 427}
426 428
427void jprobe_return_end(void) 429void __kprobes jprobe_return_end(void)
428{ 430{
429}; 431};
430 432
431int longjmp_break_handler(struct kprobe *p, struct pt_regs *regs) 433int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
432{ 434{
433 /* 435 /*
434 * FIXME - we should ideally be validating that we got here 'cos 436 * FIXME - we should ideally be validating that we got here 'cos
diff --git a/arch/ppc64/kernel/lmb.c b/arch/ppc64/kernel/lmb.c
index d6c6bd03d2a4..5adaca2ddc9d 100644
--- a/arch/ppc64/kernel/lmb.c
+++ b/arch/ppc64/kernel/lmb.c
@@ -28,33 +28,28 @@ void lmb_dump_all(void)
28{ 28{
29#ifdef DEBUG 29#ifdef DEBUG
30 unsigned long i; 30 unsigned long i;
31 struct lmb *_lmb = &lmb;
32 31
33 udbg_printf("lmb_dump_all:\n"); 32 udbg_printf("lmb_dump_all:\n");
34 udbg_printf(" memory.cnt = 0x%lx\n", 33 udbg_printf(" memory.cnt = 0x%lx\n",
35 _lmb->memory.cnt); 34 lmb.memory.cnt);
36 udbg_printf(" memory.size = 0x%lx\n", 35 udbg_printf(" memory.size = 0x%lx\n",
37 _lmb->memory.size); 36 lmb.memory.size);
38 for (i=0; i < _lmb->memory.cnt ;i++) { 37 for (i=0; i < lmb.memory.cnt ;i++) {
39 udbg_printf(" memory.region[0x%x].base = 0x%lx\n", 38 udbg_printf(" memory.region[0x%x].base = 0x%lx\n",
40 i, _lmb->memory.region[i].base); 39 i, lmb.memory.region[i].base);
41 udbg_printf(" .physbase = 0x%lx\n",
42 _lmb->memory.region[i].physbase);
43 udbg_printf(" .size = 0x%lx\n", 40 udbg_printf(" .size = 0x%lx\n",
44 _lmb->memory.region[i].size); 41 lmb.memory.region[i].size);
45 } 42 }
46 43
47 udbg_printf("\n reserved.cnt = 0x%lx\n", 44 udbg_printf("\n reserved.cnt = 0x%lx\n",
48 _lmb->reserved.cnt); 45 lmb.reserved.cnt);
49 udbg_printf(" reserved.size = 0x%lx\n", 46 udbg_printf(" reserved.size = 0x%lx\n",
50 _lmb->reserved.size); 47 lmb.reserved.size);
51 for (i=0; i < _lmb->reserved.cnt ;i++) { 48 for (i=0; i < lmb.reserved.cnt ;i++) {
52 udbg_printf(" reserved.region[0x%x].base = 0x%lx\n", 49 udbg_printf(" reserved.region[0x%x].base = 0x%lx\n",
53 i, _lmb->reserved.region[i].base); 50 i, lmb.reserved.region[i].base);
54 udbg_printf(" .physbase = 0x%lx\n",
55 _lmb->reserved.region[i].physbase);
56 udbg_printf(" .size = 0x%lx\n", 51 udbg_printf(" .size = 0x%lx\n",
57 _lmb->reserved.region[i].size); 52 lmb.reserved.region[i].size);
58 } 53 }
59#endif /* DEBUG */ 54#endif /* DEBUG */
60} 55}
@@ -98,7 +93,6 @@ lmb_coalesce_regions(struct lmb_region *rgn, unsigned long r1, unsigned long r2)
98 rgn->region[r1].size += rgn->region[r2].size; 93 rgn->region[r1].size += rgn->region[r2].size;
99 for (i=r2; i < rgn->cnt-1; i++) { 94 for (i=r2; i < rgn->cnt-1; i++) {
100 rgn->region[i].base = rgn->region[i+1].base; 95 rgn->region[i].base = rgn->region[i+1].base;
101 rgn->region[i].physbase = rgn->region[i+1].physbase;
102 rgn->region[i].size = rgn->region[i+1].size; 96 rgn->region[i].size = rgn->region[i+1].size;
103 } 97 }
104 rgn->cnt--; 98 rgn->cnt--;
@@ -108,49 +102,29 @@ lmb_coalesce_regions(struct lmb_region *rgn, unsigned long r1, unsigned long r2)
108void __init 102void __init
109lmb_init(void) 103lmb_init(void)
110{ 104{
111 struct lmb *_lmb = &lmb;
112
113 /* Create a dummy zero size LMB which will get coalesced away later. 105 /* Create a dummy zero size LMB which will get coalesced away later.
114 * This simplifies the lmb_add() code below... 106 * This simplifies the lmb_add() code below...
115 */ 107 */
116 _lmb->memory.region[0].base = 0; 108 lmb.memory.region[0].base = 0;
117 _lmb->memory.region[0].size = 0; 109 lmb.memory.region[0].size = 0;
118 _lmb->memory.cnt = 1; 110 lmb.memory.cnt = 1;
119 111
120 /* Ditto. */ 112 /* Ditto. */
121 _lmb->reserved.region[0].base = 0; 113 lmb.reserved.region[0].base = 0;
122 _lmb->reserved.region[0].size = 0; 114 lmb.reserved.region[0].size = 0;
123 _lmb->reserved.cnt = 1; 115 lmb.reserved.cnt = 1;
124} 116}
125 117
126/* This routine called with relocation disabled. */ 118/* This routine called with relocation disabled. */
127void __init 119void __init
128lmb_analyze(void) 120lmb_analyze(void)
129{ 121{
130 unsigned long i; 122 int i;
131 unsigned long mem_size = 0; 123
132 unsigned long size_mask = 0; 124 lmb.memory.size = 0;
133 struct lmb *_lmb = &lmb;
134#ifdef CONFIG_MSCHUNKS
135 unsigned long physbase = 0;
136#endif
137
138 for (i=0; i < _lmb->memory.cnt; i++) {
139 unsigned long lmb_size;
140
141 lmb_size = _lmb->memory.region[i].size;
142
143#ifdef CONFIG_MSCHUNKS
144 _lmb->memory.region[i].physbase = physbase;
145 physbase += lmb_size;
146#else
147 _lmb->memory.region[i].physbase = _lmb->memory.region[i].base;
148#endif
149 mem_size += lmb_size;
150 size_mask |= lmb_size;
151 }
152 125
153 _lmb->memory.size = mem_size; 126 for (i = 0; i < lmb.memory.cnt; i++)
127 lmb.memory.size += lmb.memory.region[i].size;
154} 128}
155 129
156/* This routine called with relocation disabled. */ 130/* This routine called with relocation disabled. */
@@ -168,7 +142,6 @@ lmb_add_region(struct lmb_region *rgn, unsigned long base, unsigned long size)
168 adjacent = lmb_addrs_adjacent(base,size,rgnbase,rgnsize); 142 adjacent = lmb_addrs_adjacent(base,size,rgnbase,rgnsize);
169 if ( adjacent > 0 ) { 143 if ( adjacent > 0 ) {
170 rgn->region[i].base -= size; 144 rgn->region[i].base -= size;
171 rgn->region[i].physbase -= size;
172 rgn->region[i].size += size; 145 rgn->region[i].size += size;
173 coalesced++; 146 coalesced++;
174 break; 147 break;
@@ -195,11 +168,9 @@ lmb_add_region(struct lmb_region *rgn, unsigned long base, unsigned long size)
195 for (i=rgn->cnt-1; i >= 0; i--) { 168 for (i=rgn->cnt-1; i >= 0; i--) {
196 if (base < rgn->region[i].base) { 169 if (base < rgn->region[i].base) {
197 rgn->region[i+1].base = rgn->region[i].base; 170 rgn->region[i+1].base = rgn->region[i].base;
198 rgn->region[i+1].physbase = rgn->region[i].physbase;
199 rgn->region[i+1].size = rgn->region[i].size; 171 rgn->region[i+1].size = rgn->region[i].size;
200 } else { 172 } else {
201 rgn->region[i+1].base = base; 173 rgn->region[i+1].base = base;
202 rgn->region[i+1].physbase = lmb_abs_to_phys(base);
203 rgn->region[i+1].size = size; 174 rgn->region[i+1].size = size;
204 break; 175 break;
205 } 176 }
@@ -213,12 +184,11 @@ lmb_add_region(struct lmb_region *rgn, unsigned long base, unsigned long size)
213long __init 184long __init
214lmb_add(unsigned long base, unsigned long size) 185lmb_add(unsigned long base, unsigned long size)
215{ 186{
216 struct lmb *_lmb = &lmb; 187 struct lmb_region *_rgn = &(lmb.memory);
217 struct lmb_region *_rgn = &(_lmb->memory);
218 188
219 /* On pSeries LPAR systems, the first LMB is our RMO region. */ 189 /* On pSeries LPAR systems, the first LMB is our RMO region. */
220 if ( base == 0 ) 190 if ( base == 0 )
221 _lmb->rmo_size = size; 191 lmb.rmo_size = size;
222 192
223 return lmb_add_region(_rgn, base, size); 193 return lmb_add_region(_rgn, base, size);
224 194
@@ -227,8 +197,7 @@ lmb_add(unsigned long base, unsigned long size)
227long __init 197long __init
228lmb_reserve(unsigned long base, unsigned long size) 198lmb_reserve(unsigned long base, unsigned long size)
229{ 199{
230 struct lmb *_lmb = &lmb; 200 struct lmb_region *_rgn = &(lmb.reserved);
231 struct lmb_region *_rgn = &(_lmb->reserved);
232 201
233 return lmb_add_region(_rgn, base, size); 202 return lmb_add_region(_rgn, base, size);
234} 203}
@@ -260,13 +229,10 @@ lmb_alloc_base(unsigned long size, unsigned long align, unsigned long max_addr)
260{ 229{
261 long i, j; 230 long i, j;
262 unsigned long base = 0; 231 unsigned long base = 0;
263 struct lmb *_lmb = &lmb;
264 struct lmb_region *_mem = &(_lmb->memory);
265 struct lmb_region *_rsv = &(_lmb->reserved);
266 232
267 for (i=_mem->cnt-1; i >= 0; i--) { 233 for (i=lmb.memory.cnt-1; i >= 0; i--) {
268 unsigned long lmbbase = _mem->region[i].base; 234 unsigned long lmbbase = lmb.memory.region[i].base;
269 unsigned long lmbsize = _mem->region[i].size; 235 unsigned long lmbsize = lmb.memory.region[i].size;
270 236
271 if ( max_addr == LMB_ALLOC_ANYWHERE ) 237 if ( max_addr == LMB_ALLOC_ANYWHERE )
272 base = _ALIGN_DOWN(lmbbase+lmbsize-size, align); 238 base = _ALIGN_DOWN(lmbbase+lmbsize-size, align);
@@ -276,8 +242,8 @@ lmb_alloc_base(unsigned long size, unsigned long align, unsigned long max_addr)
276 continue; 242 continue;
277 243
278 while ( (lmbbase <= base) && 244 while ( (lmbbase <= base) &&
279 ((j = lmb_overlaps_region(_rsv,base,size)) >= 0) ) { 245 ((j = lmb_overlaps_region(&lmb.reserved,base,size)) >= 0) ) {
280 base = _ALIGN_DOWN(_rsv->region[j].base-size, align); 246 base = _ALIGN_DOWN(lmb.reserved.region[j].base-size, align);
281 } 247 }
282 248
283 if ( (base != 0) && (lmbbase <= base) ) 249 if ( (base != 0) && (lmbbase <= base) )
@@ -287,62 +253,24 @@ lmb_alloc_base(unsigned long size, unsigned long align, unsigned long max_addr)
287 if ( i < 0 ) 253 if ( i < 0 )
288 return 0; 254 return 0;
289 255
290 lmb_add_region(_rsv, base, size); 256 lmb_add_region(&lmb.reserved, base, size);
291 257
292 return base; 258 return base;
293} 259}
294 260
261/* You must call lmb_analyze() before this. */
295unsigned long __init 262unsigned long __init
296lmb_phys_mem_size(void) 263lmb_phys_mem_size(void)
297{ 264{
298 struct lmb *_lmb = &lmb; 265 return lmb.memory.size;
299#ifdef CONFIG_MSCHUNKS
300 return _lmb->memory.size;
301#else
302 struct lmb_region *_mem = &(_lmb->memory);
303 unsigned long total = 0;
304 int i;
305
306 /* add all physical memory to the bootmem map */
307 for (i=0; i < _mem->cnt; i++)
308 total += _mem->region[i].size;
309 return total;
310#endif /* CONFIG_MSCHUNKS */
311} 266}
312 267
313unsigned long __init 268unsigned long __init
314lmb_end_of_DRAM(void) 269lmb_end_of_DRAM(void)
315{ 270{
316 struct lmb *_lmb = &lmb; 271 int idx = lmb.memory.cnt - 1;
317 struct lmb_region *_mem = &(_lmb->memory);
318 int idx = _mem->cnt - 1;
319
320#ifdef CONFIG_MSCHUNKS
321 return (_mem->region[idx].physbase + _mem->region[idx].size);
322#else
323 return (_mem->region[idx].base + _mem->region[idx].size);
324#endif /* CONFIG_MSCHUNKS */
325
326 return 0;
327}
328
329unsigned long __init
330lmb_abs_to_phys(unsigned long aa)
331{
332 unsigned long i, pa = aa;
333 struct lmb *_lmb = &lmb;
334 struct lmb_region *_mem = &(_lmb->memory);
335
336 for (i=0; i < _mem->cnt; i++) {
337 unsigned long lmbbase = _mem->region[i].base;
338 unsigned long lmbsize = _mem->region[i].size;
339 if ( lmb_addrs_overlap(aa,1,lmbbase,lmbsize) ) {
340 pa = _mem->region[i].physbase + (aa - lmbbase);
341 break;
342 }
343 }
344 272
345 return pa; 273 return (lmb.memory.region[idx].base + lmb.memory.region[idx].size);
346} 274}
347 275
348/* 276/*
@@ -353,20 +281,19 @@ void __init lmb_enforce_memory_limit(void)
353{ 281{
354 extern unsigned long memory_limit; 282 extern unsigned long memory_limit;
355 unsigned long i, limit; 283 unsigned long i, limit;
356 struct lmb_region *mem = &(lmb.memory);
357 284
358 if (! memory_limit) 285 if (! memory_limit)
359 return; 286 return;
360 287
361 limit = memory_limit; 288 limit = memory_limit;
362 for (i = 0; i < mem->cnt; i++) { 289 for (i = 0; i < lmb.memory.cnt; i++) {
363 if (limit > mem->region[i].size) { 290 if (limit > lmb.memory.region[i].size) {
364 limit -= mem->region[i].size; 291 limit -= lmb.memory.region[i].size;
365 continue; 292 continue;
366 } 293 }
367 294
368 mem->region[i].size = limit; 295 lmb.memory.region[i].size = limit;
369 mem->cnt = i + 1; 296 lmb.memory.cnt = i + 1;
370 break; 297 break;
371 } 298 }
372} 299}
diff --git a/arch/ppc64/kernel/lparcfg.c b/arch/ppc64/kernel/lparcfg.c
index 02e96627fa66..cae19bbd5acd 100644
--- a/arch/ppc64/kernel/lparcfg.c
+++ b/arch/ppc64/kernel/lparcfg.c
@@ -29,7 +29,7 @@
29#include <asm/iSeries/HvLpConfig.h> 29#include <asm/iSeries/HvLpConfig.h>
30#include <asm/lppaca.h> 30#include <asm/lppaca.h>
31#include <asm/hvcall.h> 31#include <asm/hvcall.h>
32#include <asm/cputable.h> 32#include <asm/firmware.h>
33#include <asm/rtas.h> 33#include <asm/rtas.h>
34#include <asm/system.h> 34#include <asm/system.h>
35#include <asm/time.h> 35#include <asm/time.h>
@@ -273,6 +273,7 @@ static void parse_system_parameter_string(struct seq_file *m)
273 if (!workbuffer) { 273 if (!workbuffer) {
274 printk(KERN_ERR "%s %s kmalloc failure at line %d \n", 274 printk(KERN_ERR "%s %s kmalloc failure at line %d \n",
275 __FILE__, __FUNCTION__, __LINE__); 275 __FILE__, __FUNCTION__, __LINE__);
276 kfree(local_buffer);
276 return; 277 return;
277 } 278 }
278#ifdef LPARCFG_DEBUG 279#ifdef LPARCFG_DEBUG
@@ -377,7 +378,7 @@ static int lparcfg_data(struct seq_file *m, void *v)
377 378
378 partition_active_processors = lparcfg_count_active_processors(); 379 partition_active_processors = lparcfg_count_active_processors();
379 380
380 if (cur_cpu_spec->firmware_features & FW_FEATURE_SPLPAR) { 381 if (firmware_has_feature(FW_FEATURE_SPLPAR)) {
381 unsigned long h_entitled, h_unallocated; 382 unsigned long h_entitled, h_unallocated;
382 unsigned long h_aggregation, h_resource; 383 unsigned long h_aggregation, h_resource;
383 unsigned long pool_idle_time, pool_procs; 384 unsigned long pool_idle_time, pool_procs;
@@ -568,10 +569,10 @@ struct file_operations lparcfg_fops = {
568int __init lparcfg_init(void) 569int __init lparcfg_init(void)
569{ 570{
570 struct proc_dir_entry *ent; 571 struct proc_dir_entry *ent;
571 mode_t mode = S_IRUSR; 572 mode_t mode = S_IRUSR | S_IRGRP | S_IROTH;
572 573
573 /* Allow writing if we have FW_FEATURE_SPLPAR */ 574 /* Allow writing if we have FW_FEATURE_SPLPAR */
574 if (cur_cpu_spec->firmware_features & FW_FEATURE_SPLPAR) { 575 if (firmware_has_feature(FW_FEATURE_SPLPAR)) {
575 lparcfg_fops.write = lparcfg_write; 576 lparcfg_fops.write = lparcfg_write;
576 mode |= S_IWUSR; 577 mode |= S_IWUSR;
577 } 578 }
diff --git a/arch/ppc64/kernel/maple_pci.c b/arch/ppc64/kernel/maple_pci.c
index 53993999b265..1d297e0edfc0 100644
--- a/arch/ppc64/kernel/maple_pci.c
+++ b/arch/ppc64/kernel/maple_pci.c
@@ -283,7 +283,7 @@ static void __init setup_u3_agp(struct pci_controller* hose)
283 * the reg address cell, we shall fix that by killing struct 283 * the reg address cell, we shall fix that by killing struct
284 * reg_property and using some accessor functions instead 284 * reg_property and using some accessor functions instead
285 */ 285 */
286 hose->first_busno = 0xf0; 286 hose->first_busno = 0xf0;
287 hose->last_busno = 0xff; 287 hose->last_busno = 0xff;
288 hose->ops = &u3_agp_pci_ops; 288 hose->ops = &u3_agp_pci_ops;
289 hose->cfg_addr = ioremap(0xf0000000 + 0x800000, 0x1000); 289 hose->cfg_addr = ioremap(0xf0000000 + 0x800000, 0x1000);
@@ -315,24 +315,24 @@ static int __init add_bridge(struct device_node *dev)
315 char* disp_name; 315 char* disp_name;
316 int *bus_range; 316 int *bus_range;
317 int primary = 1; 317 int primary = 1;
318 struct property *of_prop; 318 struct property *of_prop;
319 319
320 DBG("Adding PCI host bridge %s\n", dev->full_name); 320 DBG("Adding PCI host bridge %s\n", dev->full_name);
321 321
322 bus_range = (int *) get_property(dev, "bus-range", &len); 322 bus_range = (int *) get_property(dev, "bus-range", &len);
323 if (bus_range == NULL || len < 2 * sizeof(int)) { 323 if (bus_range == NULL || len < 2 * sizeof(int)) {
324 printk(KERN_WARNING "Can't get bus-range for %s, assume bus 0\n", 324 printk(KERN_WARNING "Can't get bus-range for %s, assume bus 0\n",
325 dev->full_name); 325 dev->full_name);
326 } 326 }
327 327
328 hose = alloc_bootmem(sizeof(struct pci_controller)); 328 hose = alloc_bootmem(sizeof(struct pci_controller));
329 if (hose == NULL) 329 if (hose == NULL)
330 return -ENOMEM; 330 return -ENOMEM;
331 pci_setup_pci_controller(hose); 331 pci_setup_pci_controller(hose);
332 332
333 hose->arch_data = dev; 333 hose->arch_data = dev;
334 hose->first_busno = bus_range ? bus_range[0] : 0; 334 hose->first_busno = bus_range ? bus_range[0] : 0;
335 hose->last_busno = bus_range ? bus_range[1] : 0xff; 335 hose->last_busno = bus_range ? bus_range[1] : 0xff;
336 336
337 of_prop = alloc_bootmem(sizeof(struct property) + 337 of_prop = alloc_bootmem(sizeof(struct property) +
338 sizeof(hose->global_number)); 338 sizeof(hose->global_number));
@@ -346,25 +346,25 @@ static int __init add_bridge(struct device_node *dev)
346 } 346 }
347 347
348 disp_name = NULL; 348 disp_name = NULL;
349 if (device_is_compatible(dev, "u3-agp")) { 349 if (device_is_compatible(dev, "u3-agp")) {
350 setup_u3_agp(hose); 350 setup_u3_agp(hose);
351 disp_name = "U3-AGP"; 351 disp_name = "U3-AGP";
352 primary = 0; 352 primary = 0;
353 } else if (device_is_compatible(dev, "u3-ht")) { 353 } else if (device_is_compatible(dev, "u3-ht")) {
354 setup_u3_ht(hose); 354 setup_u3_ht(hose);
355 disp_name = "U3-HT"; 355 disp_name = "U3-HT";
356 primary = 1; 356 primary = 1;
357 } 357 }
358 printk(KERN_INFO "Found %s PCI host bridge. Firmware bus number: %d->%d\n", 358 printk(KERN_INFO "Found %s PCI host bridge. Firmware bus number: %d->%d\n",
359 disp_name, hose->first_busno, hose->last_busno); 359 disp_name, hose->first_busno, hose->last_busno);
360 360
361 /* Interpret the "ranges" property */ 361 /* Interpret the "ranges" property */
362 /* This also maps the I/O region and sets isa_io/mem_base */ 362 /* This also maps the I/O region and sets isa_io/mem_base */
363 pci_process_bridge_OF_ranges(hose, dev); 363 pci_process_bridge_OF_ranges(hose, dev);
364 pci_setup_phb_io(hose, primary); 364 pci_setup_phb_io(hose, primary);
365 365
366 /* Fixup "bus-range" OF property */ 366 /* Fixup "bus-range" OF property */
367 fixup_bus_range(dev); 367 fixup_bus_range(dev);
368 368
369 return 0; 369 return 0;
370} 370}
@@ -447,9 +447,9 @@ void __init maple_pci_init(void)
447 */ 447 */
448 if (u3_agp) { 448 if (u3_agp) {
449 struct device_node *np = u3_agp->arch_data; 449 struct device_node *np = u3_agp->arch_data;
450 np->busno = 0xf0; 450 PCI_DN(np)->busno = 0xf0;
451 for (np = np->child; np; np = np->sibling) 451 for (np = np->child; np; np = np->sibling)
452 np->busno = 0xf0; 452 PCI_DN(np)->busno = 0xf0;
453 } 453 }
454 454
455 /* Tell pci.c to use the common resource allocation mecanism */ 455 /* Tell pci.c to use the common resource allocation mecanism */
diff --git a/arch/ppc64/kernel/maple_setup.c b/arch/ppc64/kernel/maple_setup.c
index bb55b5a56910..fc0567498a3a 100644
--- a/arch/ppc64/kernel/maple_setup.c
+++ b/arch/ppc64/kernel/maple_setup.c
@@ -207,9 +207,6 @@ static void __init maple_init_early(void)
207 comport = (void *)ioremap(physport, 16); 207 comport = (void *)ioremap(physport, 16);
208 udbg_init_uart(comport, default_speed); 208 udbg_init_uart(comport, default_speed);
209 209
210 ppc_md.udbg_putc = udbg_putc;
211 ppc_md.udbg_getc = udbg_getc;
212 ppc_md.udbg_getc_poll = udbg_getc_poll;
213 DBG("Hello World !\n"); 210 DBG("Hello World !\n");
214 } 211 }
215 212
diff --git a/arch/ppc64/kernel/misc.S b/arch/ppc64/kernel/misc.S
index a05b50b738e9..e7241ad80a08 100644
--- a/arch/ppc64/kernel/misc.S
+++ b/arch/ppc64/kernel/misc.S
@@ -26,7 +26,7 @@
26#include <asm/page.h> 26#include <asm/page.h>
27#include <asm/cache.h> 27#include <asm/cache.h>
28#include <asm/ppc_asm.h> 28#include <asm/ppc_asm.h>
29#include <asm/offsets.h> 29#include <asm/asm-offsets.h>
30#include <asm/cputable.h> 30#include <asm/cputable.h>
31 31
32 .text 32 .text
@@ -183,7 +183,7 @@ PPC64_CACHES:
183 * flush all bytes from start through stop-1 inclusive 183 * flush all bytes from start through stop-1 inclusive
184 */ 184 */
185 185
186_GLOBAL(__flush_icache_range) 186_KPROBE(__flush_icache_range)
187 187
188/* 188/*
189 * Flush the data cache to memory 189 * Flush the data cache to memory
@@ -223,7 +223,7 @@ _GLOBAL(__flush_icache_range)
223 bdnz 2b 223 bdnz 2b
224 isync 224 isync
225 blr 225 blr
226 226 .previous .text
227/* 227/*
228 * Like above, but only do the D-cache. 228 * Like above, but only do the D-cache.
229 * 229 *
@@ -680,6 +680,104 @@ _GLOBAL(kernel_thread)
680 ld r30,-16(r1) 680 ld r30,-16(r1)
681 blr 681 blr
682 682
683/*
684 * disable_kernel_fp()
685 * Disable the FPU.
686 */
687_GLOBAL(disable_kernel_fp)
688 mfmsr r3
689 rldicl r0,r3,(63-MSR_FP_LG),1
690 rldicl r3,r0,(MSR_FP_LG+1),0
691 mtmsrd r3 /* disable use of fpu now */
692 isync
693 blr
694
695/*
696 * giveup_fpu(tsk)
697 * Disable FP for the task given as the argument,
698 * and save the floating-point registers in its thread_struct.
699 * Enables the FPU for use in the kernel on return.
700 */
701_GLOBAL(giveup_fpu)
702 mfmsr r5
703 ori r5,r5,MSR_FP
704 mtmsrd r5 /* enable use of fpu now */
705 isync
706 cmpdi 0,r3,0
707 beqlr- /* if no previous owner, done */
708 addi r3,r3,THREAD /* want THREAD of task */
709 ld r5,PT_REGS(r3)
710 cmpdi 0,r5,0
711 SAVE_32FPRS(0, r3)
712 mffs fr0
713 stfd fr0,THREAD_FPSCR(r3)
714 beq 1f
715 ld r4,_MSR-STACK_FRAME_OVERHEAD(r5)
716 li r3,MSR_FP|MSR_FE0|MSR_FE1
717 andc r4,r4,r3 /* disable FP for previous task */
718 std r4,_MSR-STACK_FRAME_OVERHEAD(r5)
7191:
720#ifndef CONFIG_SMP
721 li r5,0
722 ld r4,last_task_used_math@got(r2)
723 std r5,0(r4)
724#endif /* CONFIG_SMP */
725 blr
726
727#ifdef CONFIG_ALTIVEC
728
729#if 0 /* this has no callers for now */
730/*
731 * disable_kernel_altivec()
732 * Disable the VMX.
733 */
734_GLOBAL(disable_kernel_altivec)
735 mfmsr r3
736 rldicl r0,r3,(63-MSR_VEC_LG),1
737 rldicl r3,r0,(MSR_VEC_LG+1),0
738 mtmsrd r3 /* disable use of VMX now */
739 isync
740 blr
741#endif /* 0 */
742
743/*
744 * giveup_altivec(tsk)
745 * Disable VMX for the task given as the argument,
746 * and save the vector registers in its thread_struct.
747 * Enables the VMX for use in the kernel on return.
748 */
749_GLOBAL(giveup_altivec)
750 mfmsr r5
751 oris r5,r5,MSR_VEC@h
752 mtmsrd r5 /* enable use of VMX now */
753 isync
754 cmpdi 0,r3,0
755 beqlr- /* if no previous owner, done */
756 addi r3,r3,THREAD /* want THREAD of task */
757 ld r5,PT_REGS(r3)
758 cmpdi 0,r5,0
759 SAVE_32VRS(0,r4,r3)
760 mfvscr vr0
761 li r4,THREAD_VSCR
762 stvx vr0,r4,r3
763 beq 1f
764 ld r4,_MSR-STACK_FRAME_OVERHEAD(r5)
765 lis r3,MSR_VEC@h
766 andc r4,r4,r3 /* disable FP for previous task */
767 std r4,_MSR-STACK_FRAME_OVERHEAD(r5)
7681:
769#ifndef CONFIG_SMP
770 li r5,0
771 ld r4,last_task_used_altivec@got(r2)
772 std r5,0(r4)
773#endif /* CONFIG_SMP */
774 blr
775
776#endif /* CONFIG_ALTIVEC */
777
778_GLOBAL(__setup_cpu_power3)
779 blr
780
683/* kexec_wait(phys_cpu) 781/* kexec_wait(phys_cpu)
684 * 782 *
685 * wait for the flag to change, indicating this kernel is going away but 783 * wait for the flag to change, indicating this kernel is going away but
@@ -859,7 +957,7 @@ _GLOBAL(sys_call_table32)
859 .llong .ppc_fork 957 .llong .ppc_fork
860 .llong .sys_read 958 .llong .sys_read
861 .llong .sys_write 959 .llong .sys_write
862 .llong .sys32_open /* 5 */ 960 .llong .compat_sys_open /* 5 */
863 .llong .sys_close 961 .llong .sys_close
864 .llong .sys32_waitpid 962 .llong .sys32_waitpid
865 .llong .sys32_creat 963 .llong .sys32_creat
@@ -1333,9 +1431,9 @@ _GLOBAL(sys_call_table)
1333 .llong .sys_ni_syscall /* 195 - 32bit only stat64 */ 1431 .llong .sys_ni_syscall /* 195 - 32bit only stat64 */
1334 .llong .sys_ni_syscall /* 32bit only lstat64 */ 1432 .llong .sys_ni_syscall /* 32bit only lstat64 */
1335 .llong .sys_ni_syscall /* 32bit only fstat64 */ 1433 .llong .sys_ni_syscall /* 32bit only fstat64 */
1336 .llong .sys_ni_syscall /* 32bit only pciconfig_read */ 1434 .llong .sys_pciconfig_read
1337 .llong .sys_ni_syscall /* 32bit only pciconfig_write */ 1435 .llong .sys_pciconfig_write
1338 .llong .sys_ni_syscall /* 32bit only pciconfig_iobase */ 1436 .llong .sys_pciconfig_iobase /* 200 - pciconfig_iobase */
1339 .llong .sys_ni_syscall /* reserved for MacOnLinux */ 1437 .llong .sys_ni_syscall /* reserved for MacOnLinux */
1340 .llong .sys_getdents64 1438 .llong .sys_getdents64
1341 .llong .sys_pivot_root 1439 .llong .sys_pivot_root
diff --git a/arch/ppc64/kernel/of_device.c b/arch/ppc64/kernel/of_device.c
index b80e81984ba8..da580812ddfe 100644
--- a/arch/ppc64/kernel/of_device.c
+++ b/arch/ppc64/kernel/of_device.c
@@ -236,7 +236,6 @@ void of_device_unregister(struct of_device *ofdev)
236struct of_device* of_platform_device_create(struct device_node *np, const char *bus_id) 236struct of_device* of_platform_device_create(struct device_node *np, const char *bus_id)
237{ 237{
238 struct of_device *dev; 238 struct of_device *dev;
239 u32 *reg;
240 239
241 dev = kmalloc(sizeof(*dev), GFP_KERNEL); 240 dev = kmalloc(sizeof(*dev), GFP_KERNEL);
242 if (!dev) 241 if (!dev)
@@ -250,7 +249,6 @@ struct of_device* of_platform_device_create(struct device_node *np, const char *
250 dev->dev.bus = &of_platform_bus_type; 249 dev->dev.bus = &of_platform_bus_type;
251 dev->dev.release = of_release_dev; 250 dev->dev.release = of_release_dev;
252 251
253 reg = (u32 *)get_property(np, "reg", NULL);
254 strlcpy(dev->dev.bus_id, bus_id, BUS_ID_SIZE); 252 strlcpy(dev->dev.bus_id, bus_id, BUS_ID_SIZE);
255 253
256 if (of_device_register(dev) != 0) { 254 if (of_device_register(dev) != 0) {
diff --git a/arch/ppc64/kernel/pSeries_iommu.c b/arch/ppc64/kernel/pSeries_iommu.c
index 69130522a87e..f0fd7fbd6531 100644
--- a/arch/ppc64/kernel/pSeries_iommu.c
+++ b/arch/ppc64/kernel/pSeries_iommu.c
@@ -45,6 +45,7 @@
45#include <asm/plpar_wrappers.h> 45#include <asm/plpar_wrappers.h>
46#include <asm/pSeries_reconfig.h> 46#include <asm/pSeries_reconfig.h>
47#include <asm/systemcfg.h> 47#include <asm/systemcfg.h>
48#include <asm/firmware.h>
48#include "pci.h" 49#include "pci.h"
49 50
50#define DBG(fmt...) 51#define DBG(fmt...)
@@ -294,7 +295,7 @@ static void iommu_table_setparms_lpar(struct pci_controller *phb,
294 struct iommu_table *tbl, 295 struct iommu_table *tbl,
295 unsigned int *dma_window) 296 unsigned int *dma_window)
296{ 297{
297 tbl->it_busno = dn->bussubno; 298 tbl->it_busno = PCI_DN(dn)->bussubno;
298 299
299 /* TODO: Parse field size properties properly. */ 300 /* TODO: Parse field size properties properly. */
300 tbl->it_size = (((unsigned long)dma_window[4] << 32) | 301 tbl->it_size = (((unsigned long)dma_window[4] << 32) |
@@ -310,6 +311,7 @@ static void iommu_table_setparms_lpar(struct pci_controller *phb,
310static void iommu_bus_setup_pSeries(struct pci_bus *bus) 311static void iommu_bus_setup_pSeries(struct pci_bus *bus)
311{ 312{
312 struct device_node *dn, *pdn; 313 struct device_node *dn, *pdn;
314 struct pci_dn *pci;
313 struct iommu_table *tbl; 315 struct iommu_table *tbl;
314 316
315 DBG("iommu_bus_setup_pSeries, bus %p, bus->self %p\n", bus, bus->self); 317 DBG("iommu_bus_setup_pSeries, bus %p, bus->self %p\n", bus, bus->self);
@@ -324,6 +326,7 @@ static void iommu_bus_setup_pSeries(struct pci_bus *bus)
324 */ 326 */
325 327
326 dn = pci_bus_to_OF_node(bus); 328 dn = pci_bus_to_OF_node(bus);
329 pci = dn->data;
327 330
328 if (!bus->self) { 331 if (!bus->self) {
329 /* Root bus */ 332 /* Root bus */
@@ -340,18 +343,18 @@ static void iommu_bus_setup_pSeries(struct pci_bus *bus)
340 * alltogether. This leaves 768MB for the window. 343 * alltogether. This leaves 768MB for the window.
341 */ 344 */
342 DBG("PHB has io-hole, reserving 256MB\n"); 345 DBG("PHB has io-hole, reserving 256MB\n");
343 dn->phb->dma_window_size = 3 << 28; 346 pci->phb->dma_window_size = 3 << 28;
344 dn->phb->dma_window_base_cur = 1 << 28; 347 pci->phb->dma_window_base_cur = 1 << 28;
345 } else { 348 } else {
346 /* 1GB window by default */ 349 /* 1GB window by default */
347 dn->phb->dma_window_size = 1 << 30; 350 pci->phb->dma_window_size = 1 << 30;
348 dn->phb->dma_window_base_cur = 0; 351 pci->phb->dma_window_base_cur = 0;
349 } 352 }
350 353
351 tbl = kmalloc(sizeof(struct iommu_table), GFP_KERNEL); 354 tbl = kmalloc(sizeof(struct iommu_table), GFP_KERNEL);
352 355
353 iommu_table_setparms(dn->phb, dn, tbl); 356 iommu_table_setparms(pci->phb, dn, tbl);
354 dn->iommu_table = iommu_init_table(tbl); 357 pci->iommu_table = iommu_init_table(tbl);
355 } else { 358 } else {
356 /* Do a 128MB table at root. This is used for the IDE 359 /* Do a 128MB table at root. This is used for the IDE
357 * controller on some SMP-mode POWER4 machines. It 360 * controller on some SMP-mode POWER4 machines. It
@@ -362,16 +365,16 @@ static void iommu_bus_setup_pSeries(struct pci_bus *bus)
362 * Allocate at offset 128MB to avoid having to deal 365 * Allocate at offset 128MB to avoid having to deal
363 * with ISA holes; 128MB table for IDE is plenty. 366 * with ISA holes; 128MB table for IDE is plenty.
364 */ 367 */
365 dn->phb->dma_window_size = 1 << 27; 368 pci->phb->dma_window_size = 1 << 27;
366 dn->phb->dma_window_base_cur = 1 << 27; 369 pci->phb->dma_window_base_cur = 1 << 27;
367 370
368 tbl = kmalloc(sizeof(struct iommu_table), GFP_KERNEL); 371 tbl = kmalloc(sizeof(struct iommu_table), GFP_KERNEL);
369 372
370 iommu_table_setparms(dn->phb, dn, tbl); 373 iommu_table_setparms(pci->phb, dn, tbl);
371 dn->iommu_table = iommu_init_table(tbl); 374 pci->iommu_table = iommu_init_table(tbl);
372 375
373 /* All child buses have 256MB tables */ 376 /* All child buses have 256MB tables */
374 dn->phb->dma_window_size = 1 << 28; 377 pci->phb->dma_window_size = 1 << 28;
375 } 378 }
376 } else { 379 } else {
377 pdn = pci_bus_to_OF_node(bus->parent); 380 pdn = pci_bus_to_OF_node(bus->parent);
@@ -385,12 +388,12 @@ static void iommu_bus_setup_pSeries(struct pci_bus *bus)
385 388
386 tbl = kmalloc(sizeof(struct iommu_table), GFP_KERNEL); 389 tbl = kmalloc(sizeof(struct iommu_table), GFP_KERNEL);
387 390
388 iommu_table_setparms(dn->phb, dn, tbl); 391 iommu_table_setparms(pci->phb, dn, tbl);
389 392
390 dn->iommu_table = iommu_init_table(tbl); 393 pci->iommu_table = iommu_init_table(tbl);
391 } else { 394 } else {
392 /* Lower than first child or under python, use parent table */ 395 /* Lower than first child or under python, use parent table */
393 dn->iommu_table = pdn->iommu_table; 396 pci->iommu_table = PCI_DN(pdn)->iommu_table;
394 } 397 }
395 } 398 }
396} 399}
@@ -400,6 +403,7 @@ static void iommu_bus_setup_pSeriesLP(struct pci_bus *bus)
400{ 403{
401 struct iommu_table *tbl; 404 struct iommu_table *tbl;
402 struct device_node *dn, *pdn; 405 struct device_node *dn, *pdn;
406 struct pci_dn *ppci;
403 unsigned int *dma_window = NULL; 407 unsigned int *dma_window = NULL;
404 408
405 DBG("iommu_bus_setup_pSeriesLP, bus %p, bus->self %p\n", bus, bus->self); 409 DBG("iommu_bus_setup_pSeriesLP, bus %p, bus->self %p\n", bus, bus->self);
@@ -418,22 +422,24 @@ static void iommu_bus_setup_pSeriesLP(struct pci_bus *bus)
418 return; 422 return;
419 } 423 }
420 424
421 if (!pdn->iommu_table) { 425 ppci = pdn->data;
426 if (!ppci->iommu_table) {
422 /* Bussubno hasn't been copied yet. 427 /* Bussubno hasn't been copied yet.
423 * Do it now because iommu_table_setparms_lpar needs it. 428 * Do it now because iommu_table_setparms_lpar needs it.
424 */ 429 */
425 pdn->bussubno = bus->number; 430
431 ppci->bussubno = bus->number;
426 432
427 tbl = (struct iommu_table *)kmalloc(sizeof(struct iommu_table), 433 tbl = (struct iommu_table *)kmalloc(sizeof(struct iommu_table),
428 GFP_KERNEL); 434 GFP_KERNEL);
429 435
430 iommu_table_setparms_lpar(pdn->phb, pdn, tbl, dma_window); 436 iommu_table_setparms_lpar(ppci->phb, pdn, tbl, dma_window);
431 437
432 pdn->iommu_table = iommu_init_table(tbl); 438 ppci->iommu_table = iommu_init_table(tbl);
433 } 439 }
434 440
435 if (pdn != dn) 441 if (pdn != dn)
436 dn->iommu_table = pdn->iommu_table; 442 PCI_DN(dn)->iommu_table = ppci->iommu_table;
437} 443}
438 444
439 445
@@ -448,11 +454,11 @@ static void iommu_dev_setup_pSeries(struct pci_dev *dev)
448 */ 454 */
449 mydn = dn = pci_device_to_OF_node(dev); 455 mydn = dn = pci_device_to_OF_node(dev);
450 456
451 while (dn && dn->iommu_table == NULL) 457 while (dn && dn->data && PCI_DN(dn)->iommu_table == NULL)
452 dn = dn->parent; 458 dn = dn->parent;
453 459
454 if (dn) { 460 if (dn && dn->data) {
455 mydn->iommu_table = dn->iommu_table; 461 PCI_DN(mydn)->iommu_table = PCI_DN(dn)->iommu_table;
456 } else { 462 } else {
457 DBG("iommu_dev_setup_pSeries, dev %p (%s) has no iommu table\n", dev, dev->pretty_name); 463 DBG("iommu_dev_setup_pSeries, dev %p (%s) has no iommu table\n", dev, dev->pretty_name);
458 } 464 }
@@ -462,10 +468,11 @@ static int iommu_reconfig_notifier(struct notifier_block *nb, unsigned long acti
462{ 468{
463 int err = NOTIFY_OK; 469 int err = NOTIFY_OK;
464 struct device_node *np = node; 470 struct device_node *np = node;
471 struct pci_dn *pci = np->data;
465 472
466 switch (action) { 473 switch (action) {
467 case PSERIES_RECONFIG_REMOVE: 474 case PSERIES_RECONFIG_REMOVE:
468 if (np->iommu_table && 475 if (pci->iommu_table &&
469 get_property(np, "ibm,dma-window", NULL)) 476 get_property(np, "ibm,dma-window", NULL))
470 iommu_free_table(np); 477 iommu_free_table(np);
471 break; 478 break;
@@ -485,6 +492,7 @@ static void iommu_dev_setup_pSeriesLP(struct pci_dev *dev)
485 struct device_node *pdn, *dn; 492 struct device_node *pdn, *dn;
486 struct iommu_table *tbl; 493 struct iommu_table *tbl;
487 int *dma_window = NULL; 494 int *dma_window = NULL;
495 struct pci_dn *pci;
488 496
489 DBG("iommu_dev_setup_pSeriesLP, dev %p (%s)\n", dev, dev->pretty_name); 497 DBG("iommu_dev_setup_pSeriesLP, dev %p (%s)\n", dev, dev->pretty_name);
490 498
@@ -496,8 +504,10 @@ static void iommu_dev_setup_pSeriesLP(struct pci_dev *dev)
496 */ 504 */
497 dn = pci_device_to_OF_node(dev); 505 dn = pci_device_to_OF_node(dev);
498 506
499 for (pdn = dn; pdn && !pdn->iommu_table; pdn = pdn->parent) { 507 for (pdn = dn; pdn && pdn->data && !PCI_DN(pdn)->iommu_table;
500 dma_window = (unsigned int *)get_property(pdn, "ibm,dma-window", NULL); 508 pdn = pdn->parent) {
509 dma_window = (unsigned int *)
510 get_property(pdn, "ibm,dma-window", NULL);
501 if (dma_window) 511 if (dma_window)
502 break; 512 break;
503 } 513 }
@@ -514,20 +524,21 @@ static void iommu_dev_setup_pSeriesLP(struct pci_dev *dev)
514 DBG("Found DMA window, allocating table\n"); 524 DBG("Found DMA window, allocating table\n");
515 } 525 }
516 526
517 if (!pdn->iommu_table) { 527 pci = pdn->data;
528 if (!pci->iommu_table) {
518 /* iommu_table_setparms_lpar needs bussubno. */ 529 /* iommu_table_setparms_lpar needs bussubno. */
519 pdn->bussubno = pdn->phb->bus->number; 530 pci->bussubno = pci->phb->bus->number;
520 531
521 tbl = (struct iommu_table *)kmalloc(sizeof(struct iommu_table), 532 tbl = (struct iommu_table *)kmalloc(sizeof(struct iommu_table),
522 GFP_KERNEL); 533 GFP_KERNEL);
523 534
524 iommu_table_setparms_lpar(pdn->phb, pdn, tbl, dma_window); 535 iommu_table_setparms_lpar(pci->phb, pdn, tbl, dma_window);
525 536
526 pdn->iommu_table = iommu_init_table(tbl); 537 pci->iommu_table = iommu_init_table(tbl);
527 } 538 }
528 539
529 if (pdn != dn) 540 if (pdn != dn)
530 dn->iommu_table = pdn->iommu_table; 541 PCI_DN(dn)->iommu_table = pci->iommu_table;
531} 542}
532 543
533static void iommu_bus_setup_null(struct pci_bus *b) { } 544static void iommu_bus_setup_null(struct pci_bus *b) { }
@@ -546,7 +557,7 @@ void iommu_init_early_pSeries(void)
546 } 557 }
547 558
548 if (systemcfg->platform & PLATFORM_LPAR) { 559 if (systemcfg->platform & PLATFORM_LPAR) {
549 if (cur_cpu_spec->firmware_features & FW_FEATURE_MULTITCE) { 560 if (firmware_has_feature(FW_FEATURE_MULTITCE)) {
550 ppc_md.tce_build = tce_buildmulti_pSeriesLP; 561 ppc_md.tce_build = tce_buildmulti_pSeriesLP;
551 ppc_md.tce_free = tce_freemulti_pSeriesLP; 562 ppc_md.tce_free = tce_freemulti_pSeriesLP;
552 } else { 563 } else {
diff --git a/arch/ppc64/kernel/pSeries_lpar.c b/arch/ppc64/kernel/pSeries_lpar.c
index 74dd144dcce8..a6de83f2078f 100644
--- a/arch/ppc64/kernel/pSeries_lpar.c
+++ b/arch/ppc64/kernel/pSeries_lpar.c
@@ -52,7 +52,6 @@ EXPORT_SYMBOL(plpar_hcall_4out);
52EXPORT_SYMBOL(plpar_hcall_norets); 52EXPORT_SYMBOL(plpar_hcall_norets);
53EXPORT_SYMBOL(plpar_hcall_8arg_2ret); 53EXPORT_SYMBOL(plpar_hcall_8arg_2ret);
54 54
55extern void fw_feature_init(void);
56extern void pSeries_find_serial_port(void); 55extern void pSeries_find_serial_port(void);
57 56
58 57
@@ -193,9 +192,9 @@ static unsigned char udbg_getcLP(void)
193void udbg_init_debug_lpar(void) 192void udbg_init_debug_lpar(void)
194{ 193{
195 vtermno = 0; 194 vtermno = 0;
196 ppc_md.udbg_putc = udbg_putcLP; 195 udbg_putc = udbg_putcLP;
197 ppc_md.udbg_getc = udbg_getcLP; 196 udbg_getc = udbg_getcLP;
198 ppc_md.udbg_getc_poll = udbg_getc_pollLP; 197 udbg_getc_poll = udbg_getc_pollLP;
199} 198}
200 199
201/* returns 0 if couldn't find or use /chosen/stdout as console */ 200/* returns 0 if couldn't find or use /chosen/stdout as console */
@@ -228,18 +227,18 @@ int find_udbg_vterm(void)
228 termno = (u32 *)get_property(stdout_node, "reg", NULL); 227 termno = (u32 *)get_property(stdout_node, "reg", NULL);
229 if (termno) { 228 if (termno) {
230 vtermno = termno[0]; 229 vtermno = termno[0];
231 ppc_md.udbg_putc = udbg_putcLP; 230 udbg_putc = udbg_putcLP;
232 ppc_md.udbg_getc = udbg_getcLP; 231 udbg_getc = udbg_getcLP;
233 ppc_md.udbg_getc_poll = udbg_getc_pollLP; 232 udbg_getc_poll = udbg_getc_pollLP;
234 found = 1; 233 found = 1;
235 } 234 }
236 } else if (device_is_compatible(stdout_node, "hvterm-protocol")) { 235 } else if (device_is_compatible(stdout_node, "hvterm-protocol")) {
237 termno = (u32 *)get_property(stdout_node, "reg", NULL); 236 termno = (u32 *)get_property(stdout_node, "reg", NULL);
238 if (termno) { 237 if (termno) {
239 vtermno = termno[0]; 238 vtermno = termno[0];
240 ppc_md.udbg_putc = udbg_hvsi_putc; 239 udbg_putc = udbg_hvsi_putc;
241 ppc_md.udbg_getc = udbg_hvsi_getc; 240 udbg_getc = udbg_hvsi_getc;
242 ppc_md.udbg_getc_poll = udbg_hvsi_getc_poll; 241 udbg_getc_poll = udbg_hvsi_getc_poll;
243 found = 1; 242 found = 1;
244 } 243 }
245 } 244 }
@@ -267,6 +266,10 @@ void vpa_init(int cpu)
267 266
268 /* Register the Virtual Processor Area (VPA) */ 267 /* Register the Virtual Processor Area (VPA) */
269 flags = 1UL << (63 - 18); 268 flags = 1UL << (63 - 18);
269
270 if (cpu_has_feature(CPU_FTR_ALTIVEC))
271 paca[cpu].lppaca.vmxregs_in_use = 1;
272
270 ret = register_vpa(flags, hwcpu, __pa(vpa)); 273 ret = register_vpa(flags, hwcpu, __pa(vpa));
271 274
272 if (ret) 275 if (ret)
@@ -279,7 +282,6 @@ long pSeries_lpar_hpte_insert(unsigned long hpte_group,
279 unsigned long va, unsigned long prpn, 282 unsigned long va, unsigned long prpn,
280 unsigned long vflags, unsigned long rflags) 283 unsigned long vflags, unsigned long rflags)
281{ 284{
282 unsigned long arpn = physRpn_to_absRpn(prpn);
283 unsigned long lpar_rc; 285 unsigned long lpar_rc;
284 unsigned long flags; 286 unsigned long flags;
285 unsigned long slot; 287 unsigned long slot;
@@ -290,7 +292,7 @@ long pSeries_lpar_hpte_insert(unsigned long hpte_group,
290 if (vflags & HPTE_V_LARGE) 292 if (vflags & HPTE_V_LARGE)
291 hpte_v &= ~(1UL << HPTE_V_AVPN_SHIFT); 293 hpte_v &= ~(1UL << HPTE_V_AVPN_SHIFT);
292 294
293 hpte_r = (arpn << HPTE_R_RPN_SHIFT) | rflags; 295 hpte_r = (prpn << HPTE_R_RPN_SHIFT) | rflags;
294 296
295 /* Now fill in the actual HPTE */ 297 /* Now fill in the actual HPTE */
296 /* Set CEC cookie to 0 */ 298 /* Set CEC cookie to 0 */
diff --git a/arch/ppc64/kernel/pSeries_reconfig.c b/arch/ppc64/kernel/pSeries_reconfig.c
index dc2a69d412a2..58c61219d08e 100644
--- a/arch/ppc64/kernel/pSeries_reconfig.c
+++ b/arch/ppc64/kernel/pSeries_reconfig.c
@@ -111,7 +111,7 @@ static int pSeries_reconfig_add_node(const char *path, struct property *proplist
111 struct device_node *np; 111 struct device_node *np;
112 int err = -ENOMEM; 112 int err = -ENOMEM;
113 113
114 np = kcalloc(1, sizeof(*np), GFP_KERNEL); 114 np = kzalloc(sizeof(*np), GFP_KERNEL);
115 if (!np) 115 if (!np)
116 goto out_err; 116 goto out_err;
117 117
diff --git a/arch/ppc64/kernel/pSeries_setup.c b/arch/ppc64/kernel/pSeries_setup.c
index 5bec956e44a0..3009701eb90d 100644
--- a/arch/ppc64/kernel/pSeries_setup.c
+++ b/arch/ppc64/kernel/pSeries_setup.c
@@ -37,7 +37,7 @@
37#include <linux/ioport.h> 37#include <linux/ioport.h>
38#include <linux/console.h> 38#include <linux/console.h>
39#include <linux/pci.h> 39#include <linux/pci.h>
40#include <linux/version.h> 40#include <linux/utsname.h>
41#include <linux/adb.h> 41#include <linux/adb.h>
42#include <linux/module.h> 42#include <linux/module.h>
43#include <linux/delay.h> 43#include <linux/delay.h>
@@ -60,7 +60,8 @@
60#include <asm/nvram.h> 60#include <asm/nvram.h>
61#include <asm/plpar_wrappers.h> 61#include <asm/plpar_wrappers.h>
62#include <asm/xics.h> 62#include <asm/xics.h>
63#include <asm/cputable.h> 63#include <asm/firmware.h>
64#include <asm/pmc.h>
64 65
65#include "i8259.h" 66#include "i8259.h"
66#include "mpic.h" 67#include "mpic.h"
@@ -187,6 +188,21 @@ static void __init pSeries_setup_mpic(void)
187 " MPIC "); 188 " MPIC ");
188} 189}
189 190
191static void pseries_lpar_enable_pmcs(void)
192{
193 unsigned long set, reset;
194
195 power4_enable_pmcs();
196
197 set = 1UL << 63;
198 reset = 0;
199 plpar_hcall_norets(H_PERFMON, set, reset);
200
201 /* instruct hypervisor to maintain PMCs */
202 if (firmware_has_feature(FW_FEATURE_SPLPAR))
203 get_paca()->lppaca.pmcregs_in_use = 1;
204}
205
190static void __init pSeries_setup_arch(void) 206static void __init pSeries_setup_arch(void)
191{ 207{
192 /* Fixup ppc_md depending on the type of interrupt controller */ 208 /* Fixup ppc_md depending on the type of interrupt controller */
@@ -222,8 +238,8 @@ static void __init pSeries_setup_arch(void)
222 238
223 /* Find and initialize PCI host bridges */ 239 /* Find and initialize PCI host bridges */
224 init_pci_config_tokens(); 240 init_pci_config_tokens();
225 eeh_init();
226 find_and_init_phbs(); 241 find_and_init_phbs();
242 eeh_init();
227 243
228#ifdef CONFIG_DUMMY_CONSOLE 244#ifdef CONFIG_DUMMY_CONSOLE
229 conswitchp = &dummy_con; 245 conswitchp = &dummy_con;
@@ -231,11 +247,9 @@ static void __init pSeries_setup_arch(void)
231 247
232 pSeries_nvram_init(); 248 pSeries_nvram_init();
233 249
234 if (cur_cpu_spec->firmware_features & FW_FEATURE_SPLPAR)
235 vpa_init(boot_cpuid);
236
237 /* Choose an idle loop */ 250 /* Choose an idle loop */
238 if (cur_cpu_spec->firmware_features & FW_FEATURE_SPLPAR) { 251 if (firmware_has_feature(FW_FEATURE_SPLPAR)) {
252 vpa_init(boot_cpuid);
239 if (get_paca()->lppaca.shared_proc) { 253 if (get_paca()->lppaca.shared_proc) {
240 printk(KERN_INFO "Using shared processor idle loop\n"); 254 printk(KERN_INFO "Using shared processor idle loop\n");
241 ppc_md.idle_loop = pseries_shared_idle; 255 ppc_md.idle_loop = pseries_shared_idle;
@@ -247,24 +261,29 @@ static void __init pSeries_setup_arch(void)
247 printk(KERN_INFO "Using default idle loop\n"); 261 printk(KERN_INFO "Using default idle loop\n");
248 ppc_md.idle_loop = default_idle; 262 ppc_md.idle_loop = default_idle;
249 } 263 }
264
265 if (systemcfg->platform & PLATFORM_LPAR)
266 ppc_md.enable_pmcs = pseries_lpar_enable_pmcs;
267 else
268 ppc_md.enable_pmcs = power4_enable_pmcs;
250} 269}
251 270
252static int __init pSeries_init_panel(void) 271static int __init pSeries_init_panel(void)
253{ 272{
254 /* Manually leave the kernel version on the panel. */ 273 /* Manually leave the kernel version on the panel. */
255 ppc_md.progress("Linux ppc64\n", 0); 274 ppc_md.progress("Linux ppc64\n", 0);
256 ppc_md.progress(UTS_RELEASE, 0); 275 ppc_md.progress(system_utsname.version, 0);
257 276
258 return 0; 277 return 0;
259} 278}
260arch_initcall(pSeries_init_panel); 279arch_initcall(pSeries_init_panel);
261 280
262 281
263/* Build up the firmware_features bitmask field 282/* Build up the ppc64_firmware_features bitmask field
264 * using contents of device-tree/ibm,hypertas-functions. 283 * using contents of device-tree/ibm,hypertas-functions.
265 * Ultimately this functionality may be moved into prom.c prom_init(). 284 * Ultimately this functionality may be moved into prom.c prom_init().
266 */ 285 */
267void __init fw_feature_init(void) 286static void __init fw_feature_init(void)
268{ 287{
269 struct device_node * dn; 288 struct device_node * dn;
270 char * hypertas; 289 char * hypertas;
@@ -272,7 +291,7 @@ void __init fw_feature_init(void)
272 291
273 DBG(" -> fw_feature_init()\n"); 292 DBG(" -> fw_feature_init()\n");
274 293
275 cur_cpu_spec->firmware_features = 0; 294 ppc64_firmware_features = 0;
276 dn = of_find_node_by_path("/rtas"); 295 dn = of_find_node_by_path("/rtas");
277 if (dn == NULL) { 296 if (dn == NULL) {
278 printk(KERN_ERR "WARNING ! Cannot find RTAS in device-tree !\n"); 297 printk(KERN_ERR "WARNING ! Cannot find RTAS in device-tree !\n");
@@ -288,7 +307,7 @@ void __init fw_feature_init(void)
288 if ((firmware_features_table[i].name) && 307 if ((firmware_features_table[i].name) &&
289 (strcmp(firmware_features_table[i].name,hypertas))==0) { 308 (strcmp(firmware_features_table[i].name,hypertas))==0) {
290 /* we have a match */ 309 /* we have a match */
291 cur_cpu_spec->firmware_features |= 310 ppc64_firmware_features |=
292 (firmware_features_table[i].val); 311 (firmware_features_table[i].val);
293 break; 312 break;
294 } 313 }
@@ -302,7 +321,7 @@ void __init fw_feature_init(void)
302 of_node_put(dn); 321 of_node_put(dn);
303 no_rtas: 322 no_rtas:
304 printk(KERN_INFO "firmware_features = 0x%lx\n", 323 printk(KERN_INFO "firmware_features = 0x%lx\n",
305 cur_cpu_spec->firmware_features); 324 ppc64_firmware_features);
306 325
307 DBG(" <- fw_feature_init()\n"); 326 DBG(" <- fw_feature_init()\n");
308} 327}
@@ -378,9 +397,6 @@ static void __init pSeries_init_early(void)
378 comport = (void *)ioremap(physport, 16); 397 comport = (void *)ioremap(physport, 16);
379 udbg_init_uart(comport, default_speed); 398 udbg_init_uart(comport, default_speed);
380 399
381 ppc_md.udbg_putc = udbg_putc;
382 ppc_md.udbg_getc = udbg_getc;
383 ppc_md.udbg_getc_poll = udbg_getc_poll;
384 DBG("Hello World !\n"); 400 DBG("Hello World !\n");
385 } 401 }
386 402
@@ -574,6 +590,13 @@ static int pseries_shared_idle(void)
574 return 0; 590 return 0;
575} 591}
576 592
593static int pSeries_pci_probe_mode(struct pci_bus *bus)
594{
595 if (systemcfg->platform & PLATFORM_LPAR)
596 return PCI_PROBE_DEVTREE;
597 return PCI_PROBE_NORMAL;
598}
599
577struct machdep_calls __initdata pSeries_md = { 600struct machdep_calls __initdata pSeries_md = {
578 .probe = pSeries_probe, 601 .probe = pSeries_probe,
579 .setup_arch = pSeries_setup_arch, 602 .setup_arch = pSeries_setup_arch,
@@ -581,6 +604,7 @@ struct machdep_calls __initdata pSeries_md = {
581 .get_cpuinfo = pSeries_get_cpuinfo, 604 .get_cpuinfo = pSeries_get_cpuinfo,
582 .log_error = pSeries_log_error, 605 .log_error = pSeries_log_error,
583 .pcibios_fixup = pSeries_final_fixup, 606 .pcibios_fixup = pSeries_final_fixup,
607 .pci_probe_mode = pSeries_pci_probe_mode,
584 .irq_bus_setup = pSeries_irq_bus_setup, 608 .irq_bus_setup = pSeries_irq_bus_setup,
585 .restart = rtas_restart, 609 .restart = rtas_restart,
586 .power_off = rtas_power_off, 610 .power_off = rtas_power_off,
diff --git a/arch/ppc64/kernel/pSeries_smp.c b/arch/ppc64/kernel/pSeries_smp.c
index 62c55a123560..d2c7e2c4733b 100644
--- a/arch/ppc64/kernel/pSeries_smp.c
+++ b/arch/ppc64/kernel/pSeries_smp.c
@@ -41,6 +41,7 @@
41#include <asm/machdep.h> 41#include <asm/machdep.h>
42#include <asm/xics.h> 42#include <asm/xics.h>
43#include <asm/cputable.h> 43#include <asm/cputable.h>
44#include <asm/firmware.h>
44#include <asm/system.h> 45#include <asm/system.h>
45#include <asm/rtas.h> 46#include <asm/rtas.h>
46#include <asm/plpar_wrappers.h> 47#include <asm/plpar_wrappers.h>
@@ -271,6 +272,7 @@ static inline int __devinit smp_startup_cpu(unsigned int lcpu)
271 unsigned long start_here = __pa((u32)*((unsigned long *) 272 unsigned long start_here = __pa((u32)*((unsigned long *)
272 pSeries_secondary_smp_init)); 273 pSeries_secondary_smp_init));
273 unsigned int pcpu; 274 unsigned int pcpu;
275 int start_cpu;
274 276
275 if (cpu_isset(lcpu, of_spin_map)) 277 if (cpu_isset(lcpu, of_spin_map))
276 /* Already started by OF and sitting in spin loop */ 278 /* Already started by OF and sitting in spin loop */
@@ -281,12 +283,20 @@ static inline int __devinit smp_startup_cpu(unsigned int lcpu)
281 /* Fixup atomic count: it exited inside IRQ handler. */ 283 /* Fixup atomic count: it exited inside IRQ handler. */
282 paca[lcpu].__current->thread_info->preempt_count = 0; 284 paca[lcpu].__current->thread_info->preempt_count = 0;
283 285
284 status = rtas_call(rtas_token("start-cpu"), 3, 1, NULL, 286 /*
285 pcpu, start_here, lcpu); 287 * If the RTAS start-cpu token does not exist then presume the
288 * cpu is already spinning.
289 */
290 start_cpu = rtas_token("start-cpu");
291 if (start_cpu == RTAS_UNKNOWN_SERVICE)
292 return 1;
293
294 status = rtas_call(start_cpu, 3, 1, NULL, pcpu, start_here, lcpu);
286 if (status != 0) { 295 if (status != 0) {
287 printk(KERN_ERR "start-cpu failed: %i\n", status); 296 printk(KERN_ERR "start-cpu failed: %i\n", status);
288 return 0; 297 return 0;
289 } 298 }
299
290 return 1; 300 return 1;
291} 301}
292 302
@@ -326,7 +336,7 @@ static void __devinit smp_xics_setup_cpu(int cpu)
326 if (cpu != boot_cpuid) 336 if (cpu != boot_cpuid)
327 xics_setup_cpu(); 337 xics_setup_cpu();
328 338
329 if (cur_cpu_spec->firmware_features & FW_FEATURE_SPLPAR) 339 if (firmware_has_feature(FW_FEATURE_SPLPAR))
330 vpa_init(cpu); 340 vpa_init(cpu);
331 341
332 cpu_clear(cpu, of_spin_map); 342 cpu_clear(cpu, of_spin_map);
diff --git a/arch/ppc64/kernel/pSeries_vio.c b/arch/ppc64/kernel/pSeries_vio.c
new file mode 100644
index 000000000000..e0ae06f58f86
--- /dev/null
+++ b/arch/ppc64/kernel/pSeries_vio.c
@@ -0,0 +1,273 @@
1/*
2 * IBM PowerPC pSeries Virtual I/O Infrastructure Support.
3 *
4 * Copyright (c) 2003-2005 IBM Corp.
5 * Dave Engebretsen engebret@us.ibm.com
6 * Santiago Leon santil@us.ibm.com
7 * Hollis Blanchard <hollisb@us.ibm.com>
8 * Stephen Rothwell
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version
13 * 2 of the License, or (at your option) any later version.
14 */
15
16#include <linux/init.h>
17#include <linux/module.h>
18#include <linux/mm.h>
19#include <linux/kobject.h>
20#include <asm/iommu.h>
21#include <asm/dma.h>
22#include <asm/prom.h>
23#include <asm/vio.h>
24#include <asm/hvcall.h>
25
26extern struct subsystem devices_subsys; /* needed for vio_find_name() */
27
28static void probe_bus_pseries(void)
29{
30 struct device_node *node_vroot, *of_node;
31
32 node_vroot = find_devices("vdevice");
33 if ((node_vroot == NULL) || (node_vroot->child == NULL))
34 /* this machine doesn't do virtual IO, and that's ok */
35 return;
36
37 /*
38 * Create struct vio_devices for each virtual device in the device tree.
39 * Drivers will associate with them later.
40 */
41 for (of_node = node_vroot->child; of_node != NULL;
42 of_node = of_node->sibling) {
43 printk(KERN_DEBUG "%s: processing %p\n", __FUNCTION__, of_node);
44 vio_register_device_node(of_node);
45 }
46}
47
48/**
49 * vio_match_device_pseries: - Tell if a pSeries VIO device matches a
50 * vio_device_id
51 */
52static int vio_match_device_pseries(const struct vio_device_id *id,
53 const struct vio_dev *dev)
54{
55 return (strncmp(dev->type, id->type, strlen(id->type)) == 0) &&
56 device_is_compatible(dev->dev.platform_data, id->compat);
57}
58
59static void vio_release_device_pseries(struct device *dev)
60{
61 /* XXX free TCE table */
62 of_node_put(dev->platform_data);
63}
64
65static ssize_t viodev_show_devspec(struct device *dev,
66 struct device_attribute *attr, char *buf)
67{
68 struct device_node *of_node = dev->platform_data;
69
70 return sprintf(buf, "%s\n", of_node->full_name);
71}
72DEVICE_ATTR(devspec, S_IRUSR | S_IRGRP | S_IROTH, viodev_show_devspec, NULL);
73
74static void vio_unregister_device_pseries(struct vio_dev *viodev)
75{
76 device_remove_file(&viodev->dev, &dev_attr_devspec);
77}
78
79static struct vio_bus_ops vio_bus_ops_pseries = {
80 .match = vio_match_device_pseries,
81 .unregister_device = vio_unregister_device_pseries,
82 .release_device = vio_release_device_pseries,
83};
84
85/**
86 * vio_bus_init_pseries: - Initialize the pSeries virtual IO bus
87 */
88static int __init vio_bus_init_pseries(void)
89{
90 int err;
91
92 err = vio_bus_init(&vio_bus_ops_pseries);
93 if (err == 0)
94 probe_bus_pseries();
95 return err;
96}
97
98__initcall(vio_bus_init_pseries);
99
100/**
101 * vio_build_iommu_table: - gets the dma information from OF and
102 * builds the TCE tree.
103 * @dev: the virtual device.
104 *
105 * Returns a pointer to the built tce tree, or NULL if it can't
106 * find property.
107*/
108static struct iommu_table *vio_build_iommu_table(struct vio_dev *dev)
109{
110 unsigned int *dma_window;
111 struct iommu_table *newTceTable;
112 unsigned long offset;
113 int dma_window_property_size;
114
115 dma_window = (unsigned int *) get_property(dev->dev.platform_data, "ibm,my-dma-window", &dma_window_property_size);
116 if(!dma_window) {
117 return NULL;
118 }
119
120 newTceTable = (struct iommu_table *) kmalloc(sizeof(struct iommu_table), GFP_KERNEL);
121
122 /* There should be some code to extract the phys-encoded offset
123 using prom_n_addr_cells(). However, according to a comment
124 on earlier versions, it's always zero, so we don't bother */
125 offset = dma_window[1] >> PAGE_SHIFT;
126
127 /* TCE table size - measured in tce entries */
128 newTceTable->it_size = dma_window[4] >> PAGE_SHIFT;
129 /* offset for VIO should always be 0 */
130 newTceTable->it_offset = offset;
131 newTceTable->it_busno = 0;
132 newTceTable->it_index = (unsigned long)dma_window[0];
133 newTceTable->it_type = TCE_VB;
134
135 return iommu_init_table(newTceTable);
136}
137
138/**
139 * vio_register_device_node: - Register a new vio device.
140 * @of_node: The OF node for this device.
141 *
142 * Creates and initializes a vio_dev structure from the data in
143 * of_node (dev.platform_data) and adds it to the list of virtual devices.
144 * Returns a pointer to the created vio_dev or NULL if node has
145 * NULL device_type or compatible fields.
146 */
147struct vio_dev * __devinit vio_register_device_node(struct device_node *of_node)
148{
149 struct vio_dev *viodev;
150 unsigned int *unit_address;
151 unsigned int *irq_p;
152
153 /* we need the 'device_type' property, in order to match with drivers */
154 if ((NULL == of_node->type)) {
155 printk(KERN_WARNING
156 "%s: node %s missing 'device_type'\n", __FUNCTION__,
157 of_node->name ? of_node->name : "<unknown>");
158 return NULL;
159 }
160
161 unit_address = (unsigned int *)get_property(of_node, "reg", NULL);
162 if (!unit_address) {
163 printk(KERN_WARNING "%s: node %s missing 'reg'\n", __FUNCTION__,
164 of_node->name ? of_node->name : "<unknown>");
165 return NULL;
166 }
167
168 /* allocate a vio_dev for this node */
169 viodev = kmalloc(sizeof(struct vio_dev), GFP_KERNEL);
170 if (!viodev) {
171 return NULL;
172 }
173 memset(viodev, 0, sizeof(struct vio_dev));
174
175 viodev->dev.platform_data = of_node_get(of_node);
176
177 viodev->irq = NO_IRQ;
178 irq_p = (unsigned int *)get_property(of_node, "interrupts", NULL);
179 if (irq_p) {
180 int virq = virt_irq_create_mapping(*irq_p);
181 if (virq == NO_IRQ) {
182 printk(KERN_ERR "Unable to allocate interrupt "
183 "number for %s\n", of_node->full_name);
184 } else
185 viodev->irq = irq_offset_up(virq);
186 }
187
188 snprintf(viodev->dev.bus_id, BUS_ID_SIZE, "%x", *unit_address);
189 viodev->name = of_node->name;
190 viodev->type = of_node->type;
191 viodev->unit_address = *unit_address;
192 viodev->iommu_table = vio_build_iommu_table(viodev);
193
194 /* register with generic device framework */
195 if (vio_register_device(viodev) == NULL) {
196 /* XXX free TCE table */
197 kfree(viodev);
198 return NULL;
199 }
200 device_create_file(&viodev->dev, &dev_attr_devspec);
201
202 return viodev;
203}
204EXPORT_SYMBOL(vio_register_device_node);
205
206/**
207 * vio_get_attribute: - get attribute for virtual device
208 * @vdev: The vio device to get property.
209 * @which: The property/attribute to be extracted.
210 * @length: Pointer to length of returned data size (unused if NULL).
211 *
212 * Calls prom.c's get_property() to return the value of the
213 * attribute specified by the preprocessor constant @which
214*/
215const void * vio_get_attribute(struct vio_dev *vdev, void* which, int* length)
216{
217 return get_property(vdev->dev.platform_data, (char*)which, length);
218}
219EXPORT_SYMBOL(vio_get_attribute);
220
221/* vio_find_name() - internal because only vio.c knows how we formatted the
222 * kobject name
223 * XXX once vio_bus_type.devices is actually used as a kset in
224 * drivers/base/bus.c, this function should be removed in favor of
225 * "device_find(kobj_name, &vio_bus_type)"
226 */
227static struct vio_dev *vio_find_name(const char *kobj_name)
228{
229 struct kobject *found;
230
231 found = kset_find_obj(&devices_subsys.kset, kobj_name);
232 if (!found)
233 return NULL;
234
235 return to_vio_dev(container_of(found, struct device, kobj));
236}
237
238/**
239 * vio_find_node - find an already-registered vio_dev
240 * @vnode: device_node of the virtual device we're looking for
241 */
242struct vio_dev *vio_find_node(struct device_node *vnode)
243{
244 uint32_t *unit_address;
245 char kobj_name[BUS_ID_SIZE];
246
247 /* construct the kobject name from the device node */
248 unit_address = (uint32_t *)get_property(vnode, "reg", NULL);
249 if (!unit_address)
250 return NULL;
251 snprintf(kobj_name, BUS_ID_SIZE, "%x", *unit_address);
252
253 return vio_find_name(kobj_name);
254}
255EXPORT_SYMBOL(vio_find_node);
256
257int vio_enable_interrupts(struct vio_dev *dev)
258{
259 int rc = h_vio_signal(dev->unit_address, VIO_IRQ_ENABLE);
260 if (rc != H_Success)
261 printk(KERN_ERR "vio: Error 0x%x enabling interrupts\n", rc);
262 return rc;
263}
264EXPORT_SYMBOL(vio_enable_interrupts);
265
266int vio_disable_interrupts(struct vio_dev *dev)
267{
268 int rc = h_vio_signal(dev->unit_address, VIO_IRQ_DISABLE);
269 if (rc != H_Success)
270 printk(KERN_ERR "vio: Error 0x%x disabling interrupts\n", rc);
271 return rc;
272}
273EXPORT_SYMBOL(vio_disable_interrupts);
diff --git a/arch/ppc64/kernel/pacaData.c b/arch/ppc64/kernel/pacaData.c
index 6316188737b6..33a2d8db3f21 100644
--- a/arch/ppc64/kernel/pacaData.c
+++ b/arch/ppc64/kernel/pacaData.c
@@ -59,6 +59,7 @@ extern unsigned long __toc_start;
59 .fpregs_in_use = 1, \ 59 .fpregs_in_use = 1, \
60 .end_of_quantum = 0xfffffffffffffffful, \ 60 .end_of_quantum = 0xfffffffffffffffful, \
61 .slb_count = 64, \ 61 .slb_count = 64, \
62 .vmxregs_in_use = 0, \
62 }, \ 63 }, \
63 64
64#ifdef CONFIG_PPC_ISERIES 65#ifdef CONFIG_PPC_ISERIES
@@ -78,7 +79,7 @@ extern unsigned long __toc_start;
78 79
79#define BOOTCPU_PACA_INIT(number) \ 80#define BOOTCPU_PACA_INIT(number) \
80{ \ 81{ \
81 PACA_INIT_COMMON(number, 1, 0, STAB0_VIRT_ADDR) \ 82 PACA_INIT_COMMON(number, 1, 0, (u64)&initial_stab) \
82 PACA_INIT_ISERIES(number) \ 83 PACA_INIT_ISERIES(number) \
83} 84}
84 85
@@ -90,7 +91,7 @@ extern unsigned long __toc_start;
90 91
91#define BOOTCPU_PACA_INIT(number) \ 92#define BOOTCPU_PACA_INIT(number) \
92{ \ 93{ \
93 PACA_INIT_COMMON(number, 1, STAB0_PHYS_ADDR, STAB0_VIRT_ADDR) \ 94 PACA_INIT_COMMON(number, 1, STAB0_PHYS_ADDR, (u64)&initial_stab) \
94} 95}
95#endif 96#endif
96 97
diff --git a/arch/ppc64/kernel/pci.c b/arch/ppc64/kernel/pci.c
index d0d55c7908ef..861138ad092c 100644
--- a/arch/ppc64/kernel/pci.c
+++ b/arch/ppc64/kernel/pci.c
@@ -21,6 +21,7 @@
21#include <linux/bootmem.h> 21#include <linux/bootmem.h>
22#include <linux/mm.h> 22#include <linux/mm.h>
23#include <linux/list.h> 23#include <linux/list.h>
24#include <linux/syscalls.h>
24 25
25#include <asm/processor.h> 26#include <asm/processor.h>
26#include <asm/io.h> 27#include <asm/io.h>
@@ -50,6 +51,10 @@ unsigned long io_page_mask;
50 51
51EXPORT_SYMBOL(io_page_mask); 52EXPORT_SYMBOL(io_page_mask);
52 53
54#ifdef CONFIG_PPC_MULTIPLATFORM
55static void fixup_resource(struct resource *res, struct pci_dev *dev);
56static void do_bus_setup(struct pci_bus *bus);
57#endif
53 58
54unsigned int pcibios_assign_all_busses(void) 59unsigned int pcibios_assign_all_busses(void)
55{ 60{
@@ -84,7 +89,6 @@ static void fixup_broken_pcnet32(struct pci_dev* dev)
84 if ((dev->class>>8 == PCI_CLASS_NETWORK_ETHERNET)) { 89 if ((dev->class>>8 == PCI_CLASS_NETWORK_ETHERNET)) {
85 dev->vendor = PCI_VENDOR_ID_AMD; 90 dev->vendor = PCI_VENDOR_ID_AMD;
86 pci_write_config_word(dev, PCI_VENDOR_ID, PCI_VENDOR_ID_AMD); 91 pci_write_config_word(dev, PCI_VENDOR_ID, PCI_VENDOR_ID_AMD);
87 pci_name_device(dev);
88 } 92 }
89} 93}
90DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_TRIDENT, PCI_ANY_ID, fixup_broken_pcnet32); 94DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_TRIDENT, PCI_ANY_ID, fixup_broken_pcnet32);
@@ -225,10 +229,287 @@ static void __init pcibios_claim_of_setup(void)
225} 229}
226#endif 230#endif
227 231
232#ifdef CONFIG_PPC_MULTIPLATFORM
233static u32 get_int_prop(struct device_node *np, const char *name, u32 def)
234{
235 u32 *prop;
236 int len;
237
238 prop = (u32 *) get_property(np, name, &len);
239 if (prop && len >= 4)
240 return *prop;
241 return def;
242}
243
244static unsigned int pci_parse_of_flags(u32 addr0)
245{
246 unsigned int flags = 0;
247
248 if (addr0 & 0x02000000) {
249 flags |= IORESOURCE_MEM;
250 if (addr0 & 0x40000000)
251 flags |= IORESOURCE_PREFETCH;
252 } else if (addr0 & 0x01000000)
253 flags |= IORESOURCE_IO;
254 return flags;
255}
256
257#define GET_64BIT(prop, i) ((((u64) (prop)[(i)]) << 32) | (prop)[(i)+1])
258
259static void pci_parse_of_addrs(struct device_node *node, struct pci_dev *dev)
260{
261 u64 base, size;
262 unsigned int flags;
263 struct resource *res;
264 u32 *addrs, i;
265 int proplen;
266
267 addrs = (u32 *) get_property(node, "assigned-addresses", &proplen);
268 if (!addrs)
269 return;
270 for (; proplen >= 20; proplen -= 20, addrs += 5) {
271 flags = pci_parse_of_flags(addrs[0]);
272 if (!flags)
273 continue;
274 base = GET_64BIT(addrs, 1);
275 size = GET_64BIT(addrs, 3);
276 if (!size)
277 continue;
278 i = addrs[0] & 0xff;
279 if (PCI_BASE_ADDRESS_0 <= i && i <= PCI_BASE_ADDRESS_5) {
280 res = &dev->resource[(i - PCI_BASE_ADDRESS_0) >> 2];
281 } else if (i == dev->rom_base_reg) {
282 res = &dev->resource[PCI_ROM_RESOURCE];
283 flags |= IORESOURCE_READONLY | IORESOURCE_CACHEABLE;
284 } else {
285 printk(KERN_ERR "PCI: bad cfg reg num 0x%x\n", i);
286 continue;
287 }
288 res->start = base;
289 res->end = base + size - 1;
290 res->flags = flags;
291 res->name = pci_name(dev);
292 fixup_resource(res, dev);
293 }
294}
295
296static struct pci_dev *of_create_pci_dev(struct device_node *node,
297 struct pci_bus *bus, int devfn)
298{
299 struct pci_dev *dev;
300 const char *type;
301
302 dev = kmalloc(sizeof(struct pci_dev), GFP_KERNEL);
303 if (!dev)
304 return NULL;
305 type = get_property(node, "device_type", NULL);
306 if (type == NULL)
307 type = "";
308
309 memset(dev, 0, sizeof(struct pci_dev));
310 dev->bus = bus;
311 dev->sysdata = node;
312 dev->dev.parent = bus->bridge;
313 dev->dev.bus = &pci_bus_type;
314 dev->devfn = devfn;
315 dev->multifunction = 0; /* maybe a lie? */
316
317 dev->vendor = get_int_prop(node, "vendor-id", 0xffff);
318 dev->device = get_int_prop(node, "device-id", 0xffff);
319 dev->subsystem_vendor = get_int_prop(node, "subsystem-vendor-id", 0);
320 dev->subsystem_device = get_int_prop(node, "subsystem-id", 0);
321
322 dev->cfg_size = 256; /*pci_cfg_space_size(dev);*/
323
324 sprintf(pci_name(dev), "%04x:%02x:%02x.%d", pci_domain_nr(bus),
325 dev->bus->number, PCI_SLOT(devfn), PCI_FUNC(devfn));
326 dev->class = get_int_prop(node, "class-code", 0);
327
328 dev->current_state = 4; /* unknown power state */
329
330 if (!strcmp(type, "pci")) {
331 /* a PCI-PCI bridge */
332 dev->hdr_type = PCI_HEADER_TYPE_BRIDGE;
333 dev->rom_base_reg = PCI_ROM_ADDRESS1;
334 } else if (!strcmp(type, "cardbus")) {
335 dev->hdr_type = PCI_HEADER_TYPE_CARDBUS;
336 } else {
337 dev->hdr_type = PCI_HEADER_TYPE_NORMAL;
338 dev->rom_base_reg = PCI_ROM_ADDRESS;
339 dev->irq = NO_IRQ;
340 if (node->n_intrs > 0) {
341 dev->irq = node->intrs[0].line;
342 pci_write_config_byte(dev, PCI_INTERRUPT_LINE,
343 dev->irq);
344 }
345 }
346
347 pci_parse_of_addrs(node, dev);
348
349 pci_device_add(dev, bus);
350
351 /* XXX pci_scan_msi_device(dev); */
352
353 return dev;
354}
355
356static void of_scan_pci_bridge(struct device_node *node, struct pci_dev *dev);
357
358static void __devinit of_scan_bus(struct device_node *node,
359 struct pci_bus *bus)
360{
361 struct device_node *child = NULL;
362 u32 *reg;
363 int reglen, devfn;
364 struct pci_dev *dev;
365
366 while ((child = of_get_next_child(node, child)) != NULL) {
367 reg = (u32 *) get_property(child, "reg", &reglen);
368 if (reg == NULL || reglen < 20)
369 continue;
370 devfn = (reg[0] >> 8) & 0xff;
371 /* create a new pci_dev for this device */
372 dev = of_create_pci_dev(child, bus, devfn);
373 if (!dev)
374 continue;
375 if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE ||
376 dev->hdr_type == PCI_HEADER_TYPE_CARDBUS)
377 of_scan_pci_bridge(child, dev);
378 }
379
380 do_bus_setup(bus);
381}
382
383static void __devinit of_scan_pci_bridge(struct device_node *node,
384 struct pci_dev *dev)
385{
386 struct pci_bus *bus;
387 u32 *busrange, *ranges;
388 int len, i, mode;
389 struct resource *res;
390 unsigned int flags;
391 u64 size;
392
393 /* parse bus-range property */
394 busrange = (u32 *) get_property(node, "bus-range", &len);
395 if (busrange == NULL || len != 8) {
396 printk(KERN_ERR "Can't get bus-range for PCI-PCI bridge %s\n",
397 node->full_name);
398 return;
399 }
400 ranges = (u32 *) get_property(node, "ranges", &len);
401 if (ranges == NULL) {
402 printk(KERN_ERR "Can't get ranges for PCI-PCI bridge %s\n",
403 node->full_name);
404 return;
405 }
406
407 bus = pci_add_new_bus(dev->bus, dev, busrange[0]);
408 if (!bus) {
409 printk(KERN_ERR "Failed to create pci bus for %s\n",
410 node->full_name);
411 return;
412 }
413
414 bus->primary = dev->bus->number;
415 bus->subordinate = busrange[1];
416 bus->bridge_ctl = 0;
417 bus->sysdata = node;
418
419 /* parse ranges property */
420 /* PCI #address-cells == 3 and #size-cells == 2 always */
421 res = &dev->resource[PCI_BRIDGE_RESOURCES];
422 for (i = 0; i < PCI_NUM_RESOURCES - PCI_BRIDGE_RESOURCES; ++i) {
423 res->flags = 0;
424 bus->resource[i] = res;
425 ++res;
426 }
427 i = 1;
428 for (; len >= 32; len -= 32, ranges += 8) {
429 flags = pci_parse_of_flags(ranges[0]);
430 size = GET_64BIT(ranges, 6);
431 if (flags == 0 || size == 0)
432 continue;
433 if (flags & IORESOURCE_IO) {
434 res = bus->resource[0];
435 if (res->flags) {
436 printk(KERN_ERR "PCI: ignoring extra I/O range"
437 " for bridge %s\n", node->full_name);
438 continue;
439 }
440 } else {
441 if (i >= PCI_NUM_RESOURCES - PCI_BRIDGE_RESOURCES) {
442 printk(KERN_ERR "PCI: too many memory ranges"
443 " for bridge %s\n", node->full_name);
444 continue;
445 }
446 res = bus->resource[i];
447 ++i;
448 }
449 res->start = GET_64BIT(ranges, 1);
450 res->end = res->start + size - 1;
451 res->flags = flags;
452 fixup_resource(res, dev);
453 }
454 sprintf(bus->name, "PCI Bus %04x:%02x", pci_domain_nr(bus),
455 bus->number);
456
457 mode = PCI_PROBE_NORMAL;
458 if (ppc_md.pci_probe_mode)
459 mode = ppc_md.pci_probe_mode(bus);
460 if (mode == PCI_PROBE_DEVTREE)
461 of_scan_bus(node, bus);
462 else if (mode == PCI_PROBE_NORMAL)
463 pci_scan_child_bus(bus);
464}
465#endif /* CONFIG_PPC_MULTIPLATFORM */
466
467static void __devinit scan_phb(struct pci_controller *hose)
468{
469 struct pci_bus *bus;
470 struct device_node *node = hose->arch_data;
471 int i, mode;
472 struct resource *res;
473
474 bus = pci_create_bus(NULL, hose->first_busno, hose->ops, node);
475 if (bus == NULL) {
476 printk(KERN_ERR "Failed to create bus for PCI domain %04x\n",
477 hose->global_number);
478 return;
479 }
480 bus->secondary = hose->first_busno;
481 hose->bus = bus;
482
483 bus->resource[0] = res = &hose->io_resource;
484 if (res->flags && request_resource(&ioport_resource, res))
485 printk(KERN_ERR "Failed to request PCI IO region "
486 "on PCI domain %04x\n", hose->global_number);
487
488 for (i = 0; i < 3; ++i) {
489 res = &hose->mem_resources[i];
490 bus->resource[i+1] = res;
491 if (res->flags && request_resource(&iomem_resource, res))
492 printk(KERN_ERR "Failed to request PCI memory region "
493 "on PCI domain %04x\n", hose->global_number);
494 }
495
496 mode = PCI_PROBE_NORMAL;
497#ifdef CONFIG_PPC_MULTIPLATFORM
498 if (ppc_md.pci_probe_mode)
499 mode = ppc_md.pci_probe_mode(bus);
500 if (mode == PCI_PROBE_DEVTREE) {
501 bus->subordinate = hose->last_busno;
502 of_scan_bus(node, bus);
503 }
504#endif /* CONFIG_PPC_MULTIPLATFORM */
505 if (mode == PCI_PROBE_NORMAL)
506 hose->last_busno = bus->subordinate = pci_scan_child_bus(bus);
507 pci_bus_add_devices(bus);
508}
509
228static int __init pcibios_init(void) 510static int __init pcibios_init(void)
229{ 511{
230 struct pci_controller *hose, *tmp; 512 struct pci_controller *hose, *tmp;
231 struct pci_bus *bus;
232 513
233 /* For now, override phys_mem_access_prot. If we need it, 514 /* For now, override phys_mem_access_prot. If we need it,
234 * later, we may move that initialization to each ppc_md 515 * later, we may move that initialization to each ppc_md
@@ -242,13 +523,8 @@ static int __init pcibios_init(void)
242 printk("PCI: Probing PCI hardware\n"); 523 printk("PCI: Probing PCI hardware\n");
243 524
244 /* Scan all of the recorded PCI controllers. */ 525 /* Scan all of the recorded PCI controllers. */
245 list_for_each_entry_safe(hose, tmp, &hose_list, list_node) { 526 list_for_each_entry_safe(hose, tmp, &hose_list, list_node)
246 hose->last_busno = 0xff; 527 scan_phb(hose);
247 bus = pci_scan_bus(hose->first_busno, hose->ops,
248 hose->arch_data);
249 hose->bus = bus;
250 hose->last_busno = bus->subordinate;
251 }
252 528
253#ifndef CONFIG_PPC_ISERIES 529#ifndef CONFIG_PPC_ISERIES
254 if (pci_probe_only) 530 if (pci_probe_only)
@@ -820,118 +1096,89 @@ void phbs_remap_io(void)
820/* 1096/*
821 * ppc64 can have multifunction devices that do not respond to function 0. 1097 * ppc64 can have multifunction devices that do not respond to function 0.
822 * In this case we must scan all functions. 1098 * In this case we must scan all functions.
1099 * XXX this can go now, we use the OF device tree in all the
1100 * cases that caused problems. -- paulus
823 */ 1101 */
824int pcibios_scan_all_fns(struct pci_bus *bus, int devfn) 1102int pcibios_scan_all_fns(struct pci_bus *bus, int devfn)
825{ 1103{
826 struct device_node *busdn, *dn; 1104 return 0;
1105}
1106
1107static void __devinit fixup_resource(struct resource *res, struct pci_dev *dev)
1108{
1109 struct pci_controller *hose = pci_bus_to_host(dev->bus);
1110 unsigned long start, end, mask, offset;
827 1111
828 if (bus->self) 1112 if (res->flags & IORESOURCE_IO) {
829 busdn = pci_device_to_OF_node(bus->self); 1113 offset = (unsigned long)hose->io_base_virt - pci_io_base;
830 else
831 busdn = bus->sysdata; /* must be a phb */
832 1114
833 if (busdn == NULL) 1115 start = res->start += offset;
834 return 0; 1116 end = res->end += offset;
835 1117
836 /* 1118 /* Need to allow IO access to pages that are in the
837 * Check to see if there is any of the 8 functions are in the 1119 ISA range */
838 * device tree. If they are then we need to scan all the 1120 if (start < MAX_ISA_PORT) {
839 * functions of this slot. 1121 if (end > MAX_ISA_PORT)
840 */ 1122 end = MAX_ISA_PORT;
841 for (dn = busdn->child; dn; dn = dn->sibling)
842 if ((dn->devfn >> 3) == (devfn >> 3))
843 return 1;
844 1123
845 return 0; 1124 start >>= PAGE_SHIFT;
846} 1125 end >>= PAGE_SHIFT;
847 1126
1127 /* get the range of pages for the map */
1128 mask = ((1 << (end+1)) - 1) ^ ((1 << start) - 1);
1129 io_page_mask |= mask;
1130 }
1131 } else if (res->flags & IORESOURCE_MEM) {
1132 res->start += hose->pci_mem_offset;
1133 res->end += hose->pci_mem_offset;
1134 }
1135}
848 1136
849void __devinit pcibios_fixup_device_resources(struct pci_dev *dev, 1137void __devinit pcibios_fixup_device_resources(struct pci_dev *dev,
850 struct pci_bus *bus) 1138 struct pci_bus *bus)
851{ 1139{
852 /* Update device resources. */ 1140 /* Update device resources. */
853 struct pci_controller *hose = pci_bus_to_host(bus);
854 int i; 1141 int i;
855 1142
856 for (i = 0; i < PCI_NUM_RESOURCES; i++) { 1143 for (i = 0; i < PCI_NUM_RESOURCES; i++)
857 if (dev->resource[i].flags & IORESOURCE_IO) { 1144 if (dev->resource[i].flags)
858 unsigned long offset = (unsigned long)hose->io_base_virt 1145 fixup_resource(&dev->resource[i], dev);
859 - pci_io_base;
860 unsigned long start, end, mask;
861
862 start = dev->resource[i].start += offset;
863 end = dev->resource[i].end += offset;
864
865 /* Need to allow IO access to pages that are in the
866 ISA range */
867 if (start < MAX_ISA_PORT) {
868 if (end > MAX_ISA_PORT)
869 end = MAX_ISA_PORT;
870
871 start >>= PAGE_SHIFT;
872 end >>= PAGE_SHIFT;
873
874 /* get the range of pages for the map */
875 mask = ((1 << (end+1))-1) ^ ((1 << start)-1);
876 io_page_mask |= mask;
877 }
878 }
879 else if (dev->resource[i].flags & IORESOURCE_MEM) {
880 dev->resource[i].start += hose->pci_mem_offset;
881 dev->resource[i].end += hose->pci_mem_offset;
882 }
883 }
884} 1146}
885EXPORT_SYMBOL(pcibios_fixup_device_resources); 1147EXPORT_SYMBOL(pcibios_fixup_device_resources);
886 1148
887void __devinit pcibios_fixup_bus(struct pci_bus *bus) 1149static void __devinit do_bus_setup(struct pci_bus *bus)
888{ 1150{
889 struct pci_controller *hose = pci_bus_to_host(bus); 1151 struct pci_dev *dev;
890 struct pci_dev *dev = bus->self;
891 struct resource *res;
892 int i;
893 1152
894 if (!dev) { 1153 ppc_md.iommu_bus_setup(bus);
895 /* Root bus. */
896 1154
897 hose->bus = bus; 1155 list_for_each_entry(dev, &bus->devices, bus_list)
898 bus->resource[0] = res = &hose->io_resource; 1156 ppc_md.iommu_dev_setup(dev);
899 1157
900 if (res->flags && request_resource(&ioport_resource, res)) 1158 if (ppc_md.irq_bus_setup)
901 printk(KERN_ERR "Failed to request IO on " 1159 ppc_md.irq_bus_setup(bus);
902 "PCI domain %d\n", pci_domain_nr(bus)); 1160}
903 1161
904 for (i = 0; i < 3; ++i) { 1162void __devinit pcibios_fixup_bus(struct pci_bus *bus)
905 res = &hose->mem_resources[i]; 1163{
906 bus->resource[i+1] = res; 1164 struct pci_dev *dev = bus->self;
907 if (res->flags && request_resource(&iomem_resource, res)) 1165
908 printk(KERN_ERR "Failed to request MEM on " 1166 if (dev && pci_probe_only &&
909 "PCI domain %d\n", 1167 (dev->class >> 8) == PCI_CLASS_BRIDGE_PCI) {
910 pci_domain_nr(bus));
911 }
912 } else if (pci_probe_only &&
913 (dev->class >> 8) == PCI_CLASS_BRIDGE_PCI) {
914 /* This is a subordinate bridge */ 1168 /* This is a subordinate bridge */
915 1169
916 pci_read_bridge_bases(bus); 1170 pci_read_bridge_bases(bus);
917 pcibios_fixup_device_resources(dev, bus); 1171 pcibios_fixup_device_resources(dev, bus);
918 } 1172 }
919 1173
920 ppc_md.iommu_bus_setup(bus); 1174 do_bus_setup(bus);
921
922 list_for_each_entry(dev, &bus->devices, bus_list)
923 ppc_md.iommu_dev_setup(dev);
924
925 if (ppc_md.irq_bus_setup)
926 ppc_md.irq_bus_setup(bus);
927 1175
928 if (!pci_probe_only) 1176 if (!pci_probe_only)
929 return; 1177 return;
930 1178
931 list_for_each_entry(dev, &bus->devices, bus_list) { 1179 list_for_each_entry(dev, &bus->devices, bus_list)
932 if ((dev->class >> 8) != PCI_CLASS_BRIDGE_PCI) 1180 if ((dev->class >> 8) != PCI_CLASS_BRIDGE_PCI)
933 pcibios_fixup_device_resources(dev, bus); 1181 pcibios_fixup_device_resources(dev, bus);
934 }
935} 1182}
936EXPORT_SYMBOL(pcibios_fixup_bus); 1183EXPORT_SYMBOL(pcibios_fixup_bus);
937 1184
@@ -983,3 +1230,62 @@ void pci_resource_to_user(const struct pci_dev *dev, int bar,
983} 1230}
984 1231
985#endif /* CONFIG_PPC_MULTIPLATFORM */ 1232#endif /* CONFIG_PPC_MULTIPLATFORM */
1233
1234
1235#define IOBASE_BRIDGE_NUMBER 0
1236#define IOBASE_MEMORY 1
1237#define IOBASE_IO 2
1238#define IOBASE_ISA_IO 3
1239#define IOBASE_ISA_MEM 4
1240
1241long sys_pciconfig_iobase(long which, unsigned long in_bus,
1242 unsigned long in_devfn)
1243{
1244 struct pci_controller* hose;
1245 struct list_head *ln;
1246 struct pci_bus *bus = NULL;
1247 struct device_node *hose_node;
1248
1249 /* Argh ! Please forgive me for that hack, but that's the
1250 * simplest way to get existing XFree to not lockup on some
1251 * G5 machines... So when something asks for bus 0 io base
1252 * (bus 0 is HT root), we return the AGP one instead.
1253 */
1254#ifdef CONFIG_PPC_PMAC
1255 if (systemcfg->platform == PLATFORM_POWERMAC &&
1256 machine_is_compatible("MacRISC4"))
1257 if (in_bus == 0)
1258 in_bus = 0xf0;
1259#endif /* CONFIG_PPC_PMAC */
1260
1261 /* That syscall isn't quite compatible with PCI domains, but it's
1262 * used on pre-domains setup. We return the first match
1263 */
1264
1265 for (ln = pci_root_buses.next; ln != &pci_root_buses; ln = ln->next) {
1266 bus = pci_bus_b(ln);
1267 if (in_bus >= bus->number && in_bus < (bus->number + bus->subordinate))
1268 break;
1269 bus = NULL;
1270 }
1271 if (bus == NULL || bus->sysdata == NULL)
1272 return -ENODEV;
1273
1274 hose_node = (struct device_node *)bus->sysdata;
1275 hose = PCI_DN(hose_node)->phb;
1276
1277 switch (which) {
1278 case IOBASE_BRIDGE_NUMBER:
1279 return (long)hose->first_busno;
1280 case IOBASE_MEMORY:
1281 return (long)hose->pci_mem_offset;
1282 case IOBASE_IO:
1283 return (long)hose->io_base_phys;
1284 case IOBASE_ISA_IO:
1285 return (long)isa_io_base;
1286 case IOBASE_ISA_MEM:
1287 return -EINVAL;
1288 }
1289
1290 return -EOPNOTSUPP;
1291}
diff --git a/arch/ppc64/kernel/pci.h b/arch/ppc64/kernel/pci.h
index 26be78b13af1..5eb2cc320566 100644
--- a/arch/ppc64/kernel/pci.h
+++ b/arch/ppc64/kernel/pci.h
@@ -34,7 +34,6 @@ void *traverse_pci_devices(struct device_node *start, traverse_func pre,
34 34
35void pci_devs_phb_init(void); 35void pci_devs_phb_init(void);
36void pci_devs_phb_init_dynamic(struct pci_controller *phb); 36void pci_devs_phb_init_dynamic(struct pci_controller *phb);
37struct device_node *fetch_dev_dn(struct pci_dev *dev);
38 37
39/* PCI address cache management routines */ 38/* PCI address cache management routines */
40void pci_addr_cache_insert_device(struct pci_dev *dev); 39void pci_addr_cache_insert_device(struct pci_dev *dev);
diff --git a/arch/ppc64/kernel/pci_dn.c b/arch/ppc64/kernel/pci_dn.c
index ec345462afc3..a86389d07d57 100644
--- a/arch/ppc64/kernel/pci_dn.c
+++ b/arch/ppc64/kernel/pci_dn.c
@@ -23,6 +23,8 @@
23#include <linux/pci.h> 23#include <linux/pci.h>
24#include <linux/string.h> 24#include <linux/string.h>
25#include <linux/init.h> 25#include <linux/init.h>
26#include <linux/slab.h>
27#include <linux/bootmem.h>
26 28
27#include <asm/io.h> 29#include <asm/io.h>
28#include <asm/prom.h> 30#include <asm/prom.h>
@@ -40,16 +42,26 @@ static void * __devinit update_dn_pci_info(struct device_node *dn, void *data)
40 struct pci_controller *phb = data; 42 struct pci_controller *phb = data;
41 int *type = (int *)get_property(dn, "ibm,pci-config-space-type", NULL); 43 int *type = (int *)get_property(dn, "ibm,pci-config-space-type", NULL);
42 u32 *regs; 44 u32 *regs;
43 45 struct pci_dn *pdn;
44 dn->phb = phb; 46
47 if (phb->is_dynamic)
48 pdn = kmalloc(sizeof(*pdn), GFP_KERNEL);
49 else
50 pdn = alloc_bootmem(sizeof(*pdn));
51 if (pdn == NULL)
52 return NULL;
53 memset(pdn, 0, sizeof(*pdn));
54 dn->data = pdn;
55 pdn->node = dn;
56 pdn->phb = phb;
45 regs = (u32 *)get_property(dn, "reg", NULL); 57 regs = (u32 *)get_property(dn, "reg", NULL);
46 if (regs) { 58 if (regs) {
47 /* First register entry is addr (00BBSS00) */ 59 /* First register entry is addr (00BBSS00) */
48 dn->busno = (regs[0] >> 16) & 0xff; 60 pdn->busno = (regs[0] >> 16) & 0xff;
49 dn->devfn = (regs[0] >> 8) & 0xff; 61 pdn->devfn = (regs[0] >> 8) & 0xff;
50 } 62 }
51 63
52 dn->pci_ext_config_space = (type && *type == 1); 64 pdn->pci_ext_config_space = (type && *type == 1);
53 return NULL; 65 return NULL;
54} 66}
55 67
@@ -112,10 +124,15 @@ void *traverse_pci_devices(struct device_node *start, traverse_func pre,
112void __devinit pci_devs_phb_init_dynamic(struct pci_controller *phb) 124void __devinit pci_devs_phb_init_dynamic(struct pci_controller *phb)
113{ 125{
114 struct device_node * dn = (struct device_node *) phb->arch_data; 126 struct device_node * dn = (struct device_node *) phb->arch_data;
127 struct pci_dn *pdn;
115 128
116 /* PHB nodes themselves must not match */ 129 /* PHB nodes themselves must not match */
117 dn->devfn = dn->busno = -1; 130 update_dn_pci_info(dn, phb);
118 dn->phb = phb; 131 pdn = dn->data;
132 if (pdn) {
133 pdn->devfn = pdn->busno = -1;
134 pdn->phb = phb;
135 }
119 136
120 /* Update dn->phb ptrs for new phb and children devices */ 137 /* Update dn->phb ptrs for new phb and children devices */
121 traverse_pci_devices(dn, update_dn_pci_info, phb); 138 traverse_pci_devices(dn, update_dn_pci_info, phb);
@@ -123,14 +140,17 @@ void __devinit pci_devs_phb_init_dynamic(struct pci_controller *phb)
123 140
124/* 141/*
125 * Traversal func that looks for a <busno,devfcn> value. 142 * Traversal func that looks for a <busno,devfcn> value.
126 * If found, the device_node is returned (thus terminating the traversal). 143 * If found, the pci_dn is returned (thus terminating the traversal).
127 */ 144 */
128static void *is_devfn_node(struct device_node *dn, void *data) 145static void *is_devfn_node(struct device_node *dn, void *data)
129{ 146{
130 int busno = ((unsigned long)data >> 8) & 0xff; 147 int busno = ((unsigned long)data >> 8) & 0xff;
131 int devfn = ((unsigned long)data) & 0xff; 148 int devfn = ((unsigned long)data) & 0xff;
149 struct pci_dn *pci = dn->data;
132 150
133 return ((devfn == dn->devfn) && (busno == dn->busno)) ? dn : NULL; 151 if (pci && (devfn == pci->devfn) && (busno == pci->busno))
152 return dn;
153 return NULL;
134} 154}
135 155
136/* 156/*
@@ -149,13 +169,10 @@ static void *is_devfn_node(struct device_node *dn, void *data)
149struct device_node *fetch_dev_dn(struct pci_dev *dev) 169struct device_node *fetch_dev_dn(struct pci_dev *dev)
150{ 170{
151 struct device_node *orig_dn = dev->sysdata; 171 struct device_node *orig_dn = dev->sysdata;
152 struct pci_controller *phb = orig_dn->phb; /* assume same phb as orig_dn */
153 struct device_node *phb_dn;
154 struct device_node *dn; 172 struct device_node *dn;
155 unsigned long searchval = (dev->bus->number << 8) | dev->devfn; 173 unsigned long searchval = (dev->bus->number << 8) | dev->devfn;
156 174
157 phb_dn = phb->arch_data; 175 dn = traverse_pci_devices(orig_dn, is_devfn_node, (void *)searchval);
158 dn = traverse_pci_devices(phb_dn, is_devfn_node, (void *)searchval);
159 if (dn) 176 if (dn)
160 dev->sysdata = dn; 177 dev->sysdata = dn;
161 return dn; 178 return dn;
@@ -165,11 +182,13 @@ EXPORT_SYMBOL(fetch_dev_dn);
165static int pci_dn_reconfig_notifier(struct notifier_block *nb, unsigned long action, void *node) 182static int pci_dn_reconfig_notifier(struct notifier_block *nb, unsigned long action, void *node)
166{ 183{
167 struct device_node *np = node; 184 struct device_node *np = node;
185 struct pci_dn *pci;
168 int err = NOTIFY_OK; 186 int err = NOTIFY_OK;
169 187
170 switch (action) { 188 switch (action) {
171 case PSERIES_RECONFIG_ADD: 189 case PSERIES_RECONFIG_ADD:
172 update_dn_pci_info(np, np->parent->phb); 190 pci = np->parent->data;
191 update_dn_pci_info(np, pci->phb);
173 break; 192 break;
174 default: 193 default:
175 err = NOTIFY_DONE; 194 err = NOTIFY_DONE;
diff --git a/arch/ppc64/kernel/pci_iommu.c b/arch/ppc64/kernel/pci_iommu.c
index ef0a62b916be..14647e09c9cd 100644
--- a/arch/ppc64/kernel/pci_iommu.c
+++ b/arch/ppc64/kernel/pci_iommu.c
@@ -66,7 +66,7 @@ static inline struct iommu_table *devnode_table(struct device *dev)
66#endif /* CONFIG_PPC_ISERIES */ 66#endif /* CONFIG_PPC_ISERIES */
67 67
68#ifdef CONFIG_PPC_MULTIPLATFORM 68#ifdef CONFIG_PPC_MULTIPLATFORM
69 return PCI_GET_DN(pdev)->iommu_table; 69 return PCI_DN(PCI_GET_DN(pdev))->iommu_table;
70#endif /* CONFIG_PPC_MULTIPLATFORM */ 70#endif /* CONFIG_PPC_MULTIPLATFORM */
71} 71}
72 72
diff --git a/arch/ppc64/kernel/pmac_feature.c b/arch/ppc64/kernel/pmac_feature.c
index 98ed2bccab1a..eb4e6c3f694d 100644
--- a/arch/ppc64/kernel/pmac_feature.c
+++ b/arch/ppc64/kernel/pmac_feature.c
@@ -674,6 +674,7 @@ void __init pmac_check_ht_link(void)
674#if 0 /* Disabled for now */ 674#if 0 /* Disabled for now */
675 u32 ufreq, freq, ucfg, cfg; 675 u32 ufreq, freq, ucfg, cfg;
676 struct device_node *pcix_node; 676 struct device_node *pcix_node;
677 struct pci_dn *pdn;
677 u8 px_bus, px_devfn; 678 u8 px_bus, px_devfn;
678 struct pci_controller *px_hose; 679 struct pci_controller *px_hose;
679 680
@@ -687,9 +688,10 @@ void __init pmac_check_ht_link(void)
687 printk("No PCI-X bridge found\n"); 688 printk("No PCI-X bridge found\n");
688 return; 689 return;
689 } 690 }
690 px_hose = pcix_node->phb; 691 pdn = pcix_node->data;
691 px_bus = pcix_node->busno; 692 px_hose = pdn->phb;
692 px_devfn = pcix_node->devfn; 693 px_bus = pdn->busno;
694 px_devfn = pdn->devfn;
693 695
694 early_read_config_dword(px_hose, px_bus, px_devfn, 0xc4, &cfg); 696 early_read_config_dword(px_hose, px_bus, px_devfn, 0xc4, &cfg);
695 early_read_config_dword(px_hose, px_bus, px_devfn, 0xcc, &freq); 697 early_read_config_dword(px_hose, px_bus, px_devfn, 0xcc, &freq);
diff --git a/arch/ppc64/kernel/pmac_pci.c b/arch/ppc64/kernel/pmac_pci.c
index 71fe911ad183..dc40a0cad0b4 100644
--- a/arch/ppc64/kernel/pmac_pci.c
+++ b/arch/ppc64/kernel/pmac_pci.c
@@ -242,7 +242,7 @@ static int u3_ht_skip_device(struct pci_controller *hose,
242 else 242 else
243 busdn = hose->arch_data; 243 busdn = hose->arch_data;
244 for (dn = busdn->child; dn; dn = dn->sibling) 244 for (dn = busdn->child; dn; dn = dn->sibling)
245 if (dn->devfn == devfn) 245 if (dn->data && PCI_DN(dn)->devfn == devfn)
246 break; 246 break;
247 if (dn == NULL) 247 if (dn == NULL)
248 return -1; 248 return -1;
@@ -388,7 +388,7 @@ static void __init setup_u3_agp(struct pci_controller* hose)
388 * the reg address cell, we shall fix that by killing struct 388 * the reg address cell, we shall fix that by killing struct
389 * reg_property and using some accessor functions instead 389 * reg_property and using some accessor functions instead
390 */ 390 */
391 hose->first_busno = 0xf0; 391 hose->first_busno = 0xf0;
392 hose->last_busno = 0xff; 392 hose->last_busno = 0xff;
393 has_uninorth = 1; 393 has_uninorth = 1;
394 hose->ops = &macrisc_pci_ops; 394 hose->ops = &macrisc_pci_ops;
@@ -473,7 +473,7 @@ static void __init setup_u3_ht(struct pci_controller* hose)
473 continue; 473 continue;
474 } 474 }
475 cur++; 475 cur++;
476 DBG("U3/HT: hole, %d end at %08lx, %d start at %08lx\n", 476 DBG("U3/HT: hole, %d end at %08lx, %d start at %08lx\n",
477 cur-1, res->start - 1, cur, res->end + 1); 477 cur-1, res->start - 1, cur, res->end + 1);
478 hose->mem_resources[cur].name = np->full_name; 478 hose->mem_resources[cur].name = np->full_name;
479 hose->mem_resources[cur].flags = IORESOURCE_MEM; 479 hose->mem_resources[cur].flags = IORESOURCE_MEM;
@@ -603,24 +603,24 @@ static int __init add_bridge(struct device_node *dev)
603 char* disp_name; 603 char* disp_name;
604 int *bus_range; 604 int *bus_range;
605 int primary = 1; 605 int primary = 1;
606 struct property *of_prop; 606 struct property *of_prop;
607 607
608 DBG("Adding PCI host bridge %s\n", dev->full_name); 608 DBG("Adding PCI host bridge %s\n", dev->full_name);
609 609
610 bus_range = (int *) get_property(dev, "bus-range", &len); 610 bus_range = (int *) get_property(dev, "bus-range", &len);
611 if (bus_range == NULL || len < 2 * sizeof(int)) { 611 if (bus_range == NULL || len < 2 * sizeof(int)) {
612 printk(KERN_WARNING "Can't get bus-range for %s, assume bus 0\n", 612 printk(KERN_WARNING "Can't get bus-range for %s, assume bus 0\n",
613 dev->full_name); 613 dev->full_name);
614 } 614 }
615 615
616 hose = alloc_bootmem(sizeof(struct pci_controller)); 616 hose = alloc_bootmem(sizeof(struct pci_controller));
617 if (hose == NULL) 617 if (hose == NULL)
618 return -ENOMEM; 618 return -ENOMEM;
619 pci_setup_pci_controller(hose); 619 pci_setup_pci_controller(hose);
620 620
621 hose->arch_data = dev; 621 hose->arch_data = dev;
622 hose->first_busno = bus_range ? bus_range[0] : 0; 622 hose->first_busno = bus_range ? bus_range[0] : 0;
623 hose->last_busno = bus_range ? bus_range[1] : 0xff; 623 hose->last_busno = bus_range ? bus_range[1] : 0xff;
624 624
625 of_prop = alloc_bootmem(sizeof(struct property) + 625 of_prop = alloc_bootmem(sizeof(struct property) +
626 sizeof(hose->global_number)); 626 sizeof(hose->global_number));
@@ -634,24 +634,24 @@ static int __init add_bridge(struct device_node *dev)
634 } 634 }
635 635
636 disp_name = NULL; 636 disp_name = NULL;
637 if (device_is_compatible(dev, "u3-agp")) { 637 if (device_is_compatible(dev, "u3-agp")) {
638 setup_u3_agp(hose); 638 setup_u3_agp(hose);
639 disp_name = "U3-AGP"; 639 disp_name = "U3-AGP";
640 primary = 0; 640 primary = 0;
641 } else if (device_is_compatible(dev, "u3-ht")) { 641 } else if (device_is_compatible(dev, "u3-ht")) {
642 setup_u3_ht(hose); 642 setup_u3_ht(hose);
643 disp_name = "U3-HT"; 643 disp_name = "U3-HT";
644 primary = 1; 644 primary = 1;
645 } 645 }
646 printk(KERN_INFO "Found %s PCI host bridge. Firmware bus number: %d->%d\n", 646 printk(KERN_INFO "Found %s PCI host bridge. Firmware bus number: %d->%d\n",
647 disp_name, hose->first_busno, hose->last_busno); 647 disp_name, hose->first_busno, hose->last_busno);
648 648
649 /* Interpret the "ranges" property */ 649 /* Interpret the "ranges" property */
650 /* This also maps the I/O region and sets isa_io/mem_base */ 650 /* This also maps the I/O region and sets isa_io/mem_base */
651 pmac_process_bridge_OF_ranges(hose, dev, primary); 651 pmac_process_bridge_OF_ranges(hose, dev, primary);
652 652
653 /* Fixup "bus-range" OF property */ 653 /* Fixup "bus-range" OF property */
654 fixup_bus_range(dev); 654 fixup_bus_range(dev);
655 655
656 return 0; 656 return 0;
657} 657}
@@ -746,9 +746,9 @@ void __init pmac_pci_init(void)
746 */ 746 */
747 if (u3_agp) { 747 if (u3_agp) {
748 struct device_node *np = u3_agp->arch_data; 748 struct device_node *np = u3_agp->arch_data;
749 np->busno = 0xf0; 749 PCI_DN(np)->busno = 0xf0;
750 for (np = np->child; np; np = np->sibling) 750 for (np = np->child; np; np = np->sibling)
751 np->busno = 0xf0; 751 PCI_DN(np)->busno = 0xf0;
752 } 752 }
753 753
754 pmac_check_ht_link(); 754 pmac_check_ht_link();
diff --git a/arch/ppc64/kernel/pmac_setup.c b/arch/ppc64/kernel/pmac_setup.c
index e40877fa67cd..325426c7bed0 100644
--- a/arch/ppc64/kernel/pmac_setup.c
+++ b/arch/ppc64/kernel/pmac_setup.c
@@ -71,6 +71,7 @@
71#include <asm/of_device.h> 71#include <asm/of_device.h>
72#include <asm/lmb.h> 72#include <asm/lmb.h>
73#include <asm/smu.h> 73#include <asm/smu.h>
74#include <asm/pmc.h>
74 75
75#include "pmac.h" 76#include "pmac.h"
76#include "mpic.h" 77#include "mpic.h"
@@ -273,16 +274,6 @@ static void __pmac pmac_halt(void)
273} 274}
274 275
275#ifdef CONFIG_BOOTX_TEXT 276#ifdef CONFIG_BOOTX_TEXT
276static int dummy_getc_poll(void)
277{
278 return -1;
279}
280
281static unsigned char dummy_getc(void)
282{
283 return 0;
284}
285
286static void btext_putc(unsigned char c) 277static void btext_putc(unsigned char c)
287{ 278{
288 btext_drawchar(c); 279 btext_drawchar(c);
@@ -341,16 +332,13 @@ static void __init pmac_init_early(void)
341 sccdbg = 1; 332 sccdbg = 1;
342 udbg_init_scc(NULL); 333 udbg_init_scc(NULL);
343 } 334 }
344
345 else {
346#ifdef CONFIG_BOOTX_TEXT 335#ifdef CONFIG_BOOTX_TEXT
336 else {
347 init_boot_display(); 337 init_boot_display();
348 338
349 ppc_md.udbg_putc = btext_putc; 339 udbg_putc = btext_putc;
350 ppc_md.udbg_getc = dummy_getc;
351 ppc_md.udbg_getc_poll = dummy_getc_poll;
352#endif /* CONFIG_BOOTX_TEXT */
353 } 340 }
341#endif /* CONFIG_BOOTX_TEXT */
354 342
355 /* Setup interrupt mapping options */ 343 /* Setup interrupt mapping options */
356 ppc64_interrupt_controller = IC_OPEN_PIC; 344 ppc64_interrupt_controller = IC_OPEN_PIC;
@@ -489,6 +477,18 @@ static int __init pmac_probe(int platform)
489 return 1; 477 return 1;
490} 478}
491 479
480static int pmac_probe_mode(struct pci_bus *bus)
481{
482 struct device_node *node = bus->sysdata;
483
484 /* We need to use normal PCI probing for the AGP bus,
485 since the device for the AGP bridge isn't in the tree. */
486 if (bus->self == NULL && device_is_compatible(node, "u3-agp"))
487 return PCI_PROBE_NORMAL;
488
489 return PCI_PROBE_DEVTREE;
490}
491
492struct machdep_calls __initdata pmac_md = { 492struct machdep_calls __initdata pmac_md = {
493#ifdef CONFIG_HOTPLUG_CPU 493#ifdef CONFIG_HOTPLUG_CPU
494 .cpu_die = generic_mach_cpu_die, 494 .cpu_die = generic_mach_cpu_die,
@@ -500,6 +500,7 @@ struct machdep_calls __initdata pmac_md = {
500 .init_IRQ = pmac_init_IRQ, 500 .init_IRQ = pmac_init_IRQ,
501 .get_irq = mpic_get_irq, 501 .get_irq = mpic_get_irq,
502 .pcibios_fixup = pmac_pcibios_fixup, 502 .pcibios_fixup = pmac_pcibios_fixup,
503 .pci_probe_mode = pmac_probe_mode,
503 .restart = pmac_restart, 504 .restart = pmac_restart,
504 .power_off = pmac_power_off, 505 .power_off = pmac_power_off,
505 .halt = pmac_halt, 506 .halt = pmac_halt,
@@ -511,4 +512,5 @@ struct machdep_calls __initdata pmac_md = {
511 .progress = pmac_progress, 512 .progress = pmac_progress,
512 .check_legacy_ioport = pmac_check_legacy_ioport, 513 .check_legacy_ioport = pmac_check_legacy_ioport,
513 .idle_loop = native_idle, 514 .idle_loop = native_idle,
515 .enable_pmcs = power4_enable_pmcs,
514}; 516};
diff --git a/arch/ppc64/kernel/pmc.c b/arch/ppc64/kernel/pmc.c
index 67be773f9c00..63d9481c3ec2 100644
--- a/arch/ppc64/kernel/pmc.c
+++ b/arch/ppc64/kernel/pmc.c
@@ -26,7 +26,7 @@ static void dummy_perf(struct pt_regs *regs)
26 mtspr(SPRN_MMCR0, mmcr0); 26 mtspr(SPRN_MMCR0, mmcr0);
27} 27}
28 28
29static spinlock_t pmc_owner_lock = SPIN_LOCK_UNLOCKED; 29static DEFINE_SPINLOCK(pmc_owner_lock);
30static void *pmc_owner_caller; /* mostly for debugging */ 30static void *pmc_owner_caller; /* mostly for debugging */
31perf_irq_t perf_irq = dummy_perf; 31perf_irq_t perf_irq = dummy_perf;
32 32
@@ -65,3 +65,24 @@ void release_pmc_hardware(void)
65 spin_unlock(&pmc_owner_lock); 65 spin_unlock(&pmc_owner_lock);
66} 66}
67EXPORT_SYMBOL_GPL(release_pmc_hardware); 67EXPORT_SYMBOL_GPL(release_pmc_hardware);
68
69void power4_enable_pmcs(void)
70{
71 unsigned long hid0;
72
73 hid0 = mfspr(HID0);
74 hid0 |= 1UL << (63 - 20);
75
76 /* POWER4 requires the following sequence */
77 asm volatile(
78 "sync\n"
79 "mtspr %1, %0\n"
80 "mfspr %0, %1\n"
81 "mfspr %0, %1\n"
82 "mfspr %0, %1\n"
83 "mfspr %0, %1\n"
84 "mfspr %0, %1\n"
85 "mfspr %0, %1\n"
86 "isync" : "=&r" (hid0) : "i" (HID0), "0" (hid0):
87 "memory");
88}
diff --git a/arch/ppc64/kernel/process.c b/arch/ppc64/kernel/process.c
index f7cae05e40fb..887005358eb1 100644
--- a/arch/ppc64/kernel/process.c
+++ b/arch/ppc64/kernel/process.c
@@ -50,9 +50,11 @@
50#include <asm/machdep.h> 50#include <asm/machdep.h>
51#include <asm/iSeries/HvCallHpt.h> 51#include <asm/iSeries/HvCallHpt.h>
52#include <asm/cputable.h> 52#include <asm/cputable.h>
53#include <asm/firmware.h>
53#include <asm/sections.h> 54#include <asm/sections.h>
54#include <asm/tlbflush.h> 55#include <asm/tlbflush.h>
55#include <asm/time.h> 56#include <asm/time.h>
57#include <asm/plpar_wrappers.h>
56 58
57#ifndef CONFIG_SMP 59#ifndef CONFIG_SMP
58struct task_struct *last_task_used_math = NULL; 60struct task_struct *last_task_used_math = NULL;
@@ -162,7 +164,30 @@ int dump_task_altivec(struct pt_regs *regs, elf_vrregset_t *vrregs)
162 164
163#endif /* CONFIG_ALTIVEC */ 165#endif /* CONFIG_ALTIVEC */
164 166
167static void set_dabr_spr(unsigned long val)
168{
169 mtspr(SPRN_DABR, val);
170}
171
172int set_dabr(unsigned long dabr)
173{
174 int ret = 0;
175
176 if (firmware_has_feature(FW_FEATURE_XDABR)) {
177 /* We want to catch accesses from kernel and userspace */
178 unsigned long flags = H_DABRX_KERNEL|H_DABRX_USER;
179 ret = plpar_set_xdabr(dabr, flags);
180 } else if (firmware_has_feature(FW_FEATURE_DABR)) {
181 ret = plpar_set_dabr(dabr);
182 } else {
183 set_dabr_spr(dabr);
184 }
185
186 return ret;
187}
188
165DEFINE_PER_CPU(struct cpu_usage, cpu_usage_array); 189DEFINE_PER_CPU(struct cpu_usage, cpu_usage_array);
190static DEFINE_PER_CPU(unsigned long, current_dabr);
166 191
167struct task_struct *__switch_to(struct task_struct *prev, 192struct task_struct *__switch_to(struct task_struct *prev,
168 struct task_struct *new) 193 struct task_struct *new)
@@ -197,16 +222,20 @@ struct task_struct *__switch_to(struct task_struct *prev,
197 new->thread.regs->msr |= MSR_VEC; 222 new->thread.regs->msr |= MSR_VEC;
198#endif /* CONFIG_ALTIVEC */ 223#endif /* CONFIG_ALTIVEC */
199 224
225 if (unlikely(__get_cpu_var(current_dabr) != new->thread.dabr)) {
226 set_dabr(new->thread.dabr);
227 __get_cpu_var(current_dabr) = new->thread.dabr;
228 }
229
200 flush_tlb_pending(); 230 flush_tlb_pending();
201 231
202 new_thread = &new->thread; 232 new_thread = &new->thread;
203 old_thread = &current->thread; 233 old_thread = &current->thread;
204 234
205/* Collect purr utilization data per process and per processor wise */ 235 /* Collect purr utilization data per process and per processor
206/* purr is nothing but processor time base */ 236 * wise purr is nothing but processor time base
207 237 */
208#if defined(CONFIG_PPC_PSERIES) 238 if (firmware_has_feature(FW_FEATURE_SPLPAR)) {
209 if (cur_cpu_spec->firmware_features & FW_FEATURE_SPLPAR) {
210 struct cpu_usage *cu = &__get_cpu_var(cpu_usage_array); 239 struct cpu_usage *cu = &__get_cpu_var(cpu_usage_array);
211 long unsigned start_tb, current_tb; 240 long unsigned start_tb, current_tb;
212 start_tb = old_thread->start_tb; 241 start_tb = old_thread->start_tb;
@@ -214,8 +243,6 @@ struct task_struct *__switch_to(struct task_struct *prev,
214 old_thread->accum_tb += (current_tb - start_tb); 243 old_thread->accum_tb += (current_tb - start_tb);
215 new_thread->start_tb = current_tb; 244 new_thread->start_tb = current_tb;
216 } 245 }
217#endif
218
219 246
220 local_irq_save(flags); 247 local_irq_save(flags);
221 last = _switch(old_thread, new_thread); 248 last = _switch(old_thread, new_thread);
@@ -336,6 +363,11 @@ void flush_thread(void)
336 last_task_used_altivec = NULL; 363 last_task_used_altivec = NULL;
337#endif /* CONFIG_ALTIVEC */ 364#endif /* CONFIG_ALTIVEC */
338#endif /* CONFIG_SMP */ 365#endif /* CONFIG_SMP */
366
367 if (current->thread.dabr) {
368 current->thread.dabr = 0;
369 set_dabr(0);
370 }
339} 371}
340 372
341void 373void
diff --git a/arch/ppc64/kernel/prom.c b/arch/ppc64/kernel/prom.c
index 5aca01ddd81f..7035deb6de92 100644
--- a/arch/ppc64/kernel/prom.c
+++ b/arch/ppc64/kernel/prom.c
@@ -22,7 +22,6 @@
22#include <linux/kernel.h> 22#include <linux/kernel.h>
23#include <linux/string.h> 23#include <linux/string.h>
24#include <linux/init.h> 24#include <linux/init.h>
25#include <linux/version.h>
26#include <linux/threads.h> 25#include <linux/threads.h>
27#include <linux/spinlock.h> 26#include <linux/spinlock.h>
28#include <linux/types.h> 27#include <linux/types.h>
@@ -625,8 +624,8 @@ void __init finish_device_tree(void)
625 624
626static inline char *find_flat_dt_string(u32 offset) 625static inline char *find_flat_dt_string(u32 offset)
627{ 626{
628 return ((char *)initial_boot_params) + initial_boot_params->off_dt_strings 627 return ((char *)initial_boot_params) +
629 + offset; 628 initial_boot_params->off_dt_strings + offset;
630} 629}
631 630
632/** 631/**
@@ -635,26 +634,33 @@ static inline char *find_flat_dt_string(u32 offset)
635 * unflatten the tree 634 * unflatten the tree
636 */ 635 */
637static int __init scan_flat_dt(int (*it)(unsigned long node, 636static int __init scan_flat_dt(int (*it)(unsigned long node,
638 const char *full_path, void *data), 637 const char *uname, int depth,
638 void *data),
639 void *data) 639 void *data)
640{ 640{
641 unsigned long p = ((unsigned long)initial_boot_params) + 641 unsigned long p = ((unsigned long)initial_boot_params) +
642 initial_boot_params->off_dt_struct; 642 initial_boot_params->off_dt_struct;
643 int rc = 0; 643 int rc = 0;
644 int depth = -1;
644 645
645 do { 646 do {
646 u32 tag = *((u32 *)p); 647 u32 tag = *((u32 *)p);
647 char *pathp; 648 char *pathp;
648 649
649 p += 4; 650 p += 4;
650 if (tag == OF_DT_END_NODE) 651 if (tag == OF_DT_END_NODE) {
652 depth --;
653 continue;
654 }
655 if (tag == OF_DT_NOP)
651 continue; 656 continue;
652 if (tag == OF_DT_END) 657 if (tag == OF_DT_END)
653 break; 658 break;
654 if (tag == OF_DT_PROP) { 659 if (tag == OF_DT_PROP) {
655 u32 sz = *((u32 *)p); 660 u32 sz = *((u32 *)p);
656 p += 8; 661 p += 8;
657 p = _ALIGN(p, sz >= 8 ? 8 : 4); 662 if (initial_boot_params->version < 0x10)
663 p = _ALIGN(p, sz >= 8 ? 8 : 4);
658 p += sz; 664 p += sz;
659 p = _ALIGN(p, 4); 665 p = _ALIGN(p, 4);
660 continue; 666 continue;
@@ -664,9 +670,18 @@ static int __init scan_flat_dt(int (*it)(unsigned long node,
664 " device tree !\n", tag); 670 " device tree !\n", tag);
665 return -EINVAL; 671 return -EINVAL;
666 } 672 }
673 depth++;
667 pathp = (char *)p; 674 pathp = (char *)p;
668 p = _ALIGN(p + strlen(pathp) + 1, 4); 675 p = _ALIGN(p + strlen(pathp) + 1, 4);
669 rc = it(p, pathp, data); 676 if ((*pathp) == '/') {
677 char *lp, *np;
678 for (lp = NULL, np = pathp; *np; np++)
679 if ((*np) == '/')
680 lp = np+1;
681 if (lp != NULL)
682 pathp = lp;
683 }
684 rc = it(p, pathp, depth, data);
670 if (rc != 0) 685 if (rc != 0)
671 break; 686 break;
672 } while(1); 687 } while(1);
@@ -689,17 +704,21 @@ static void* __init get_flat_dt_prop(unsigned long node, const char *name,
689 const char *nstr; 704 const char *nstr;
690 705
691 p += 4; 706 p += 4;
707 if (tag == OF_DT_NOP)
708 continue;
692 if (tag != OF_DT_PROP) 709 if (tag != OF_DT_PROP)
693 return NULL; 710 return NULL;
694 711
695 sz = *((u32 *)p); 712 sz = *((u32 *)p);
696 noff = *((u32 *)(p + 4)); 713 noff = *((u32 *)(p + 4));
697 p += 8; 714 p += 8;
698 p = _ALIGN(p, sz >= 8 ? 8 : 4); 715 if (initial_boot_params->version < 0x10)
716 p = _ALIGN(p, sz >= 8 ? 8 : 4);
699 717
700 nstr = find_flat_dt_string(noff); 718 nstr = find_flat_dt_string(noff);
701 if (nstr == NULL) { 719 if (nstr == NULL) {
702 printk(KERN_WARNING "Can't find property index name !\n"); 720 printk(KERN_WARNING "Can't find property index"
721 " name !\n");
703 return NULL; 722 return NULL;
704 } 723 }
705 if (strcmp(name, nstr) == 0) { 724 if (strcmp(name, nstr) == 0) {
@@ -713,7 +732,7 @@ static void* __init get_flat_dt_prop(unsigned long node, const char *name,
713} 732}
714 733
715static void *__init unflatten_dt_alloc(unsigned long *mem, unsigned long size, 734static void *__init unflatten_dt_alloc(unsigned long *mem, unsigned long size,
716 unsigned long align) 735 unsigned long align)
717{ 736{
718 void *res; 737 void *res;
719 738
@@ -727,13 +746,16 @@ static void *__init unflatten_dt_alloc(unsigned long *mem, unsigned long size,
727static unsigned long __init unflatten_dt_node(unsigned long mem, 746static unsigned long __init unflatten_dt_node(unsigned long mem,
728 unsigned long *p, 747 unsigned long *p,
729 struct device_node *dad, 748 struct device_node *dad,
730 struct device_node ***allnextpp) 749 struct device_node ***allnextpp,
750 unsigned long fpsize)
731{ 751{
732 struct device_node *np; 752 struct device_node *np;
733 struct property *pp, **prev_pp = NULL; 753 struct property *pp, **prev_pp = NULL;
734 char *pathp; 754 char *pathp;
735 u32 tag; 755 u32 tag;
736 unsigned int l; 756 unsigned int l, allocl;
757 int has_name = 0;
758 int new_format = 0;
737 759
738 tag = *((u32 *)(*p)); 760 tag = *((u32 *)(*p));
739 if (tag != OF_DT_BEGIN_NODE) { 761 if (tag != OF_DT_BEGIN_NODE) {
@@ -742,21 +764,62 @@ static unsigned long __init unflatten_dt_node(unsigned long mem,
742 } 764 }
743 *p += 4; 765 *p += 4;
744 pathp = (char *)*p; 766 pathp = (char *)*p;
745 l = strlen(pathp) + 1; 767 l = allocl = strlen(pathp) + 1;
746 *p = _ALIGN(*p + l, 4); 768 *p = _ALIGN(*p + l, 4);
747 769
748 np = unflatten_dt_alloc(&mem, sizeof(struct device_node) + l, 770 /* version 0x10 has a more compact unit name here instead of the full
771 * path. we accumulate the full path size using "fpsize", we'll rebuild
772 * it later. We detect this because the first character of the name is
773 * not '/'.
774 */
775 if ((*pathp) != '/') {
776 new_format = 1;
777 if (fpsize == 0) {
778 /* root node: special case. fpsize accounts for path
779 * plus terminating zero. root node only has '/', so
780 * fpsize should be 2, but we want to avoid the first
781 * level nodes to have two '/' so we use fpsize 1 here
782 */
783 fpsize = 1;
784 allocl = 2;
785 } else {
786 /* account for '/' and path size minus terminal 0
787 * already in 'l'
788 */
789 fpsize += l;
790 allocl = fpsize;
791 }
792 }
793
794
795 np = unflatten_dt_alloc(&mem, sizeof(struct device_node) + allocl,
749 __alignof__(struct device_node)); 796 __alignof__(struct device_node));
750 if (allnextpp) { 797 if (allnextpp) {
751 memset(np, 0, sizeof(*np)); 798 memset(np, 0, sizeof(*np));
752 np->full_name = ((char*)np) + sizeof(struct device_node); 799 np->full_name = ((char*)np) + sizeof(struct device_node);
753 memcpy(np->full_name, pathp, l); 800 if (new_format) {
801 char *p = np->full_name;
802 /* rebuild full path for new format */
803 if (dad && dad->parent) {
804 strcpy(p, dad->full_name);
805#ifdef DEBUG
806 if ((strlen(p) + l + 1) != allocl) {
807 DBG("%s: p: %d, l: %d, a: %d\n",
808 pathp, strlen(p), l, allocl);
809 }
810#endif
811 p += strlen(p);
812 }
813 *(p++) = '/';
814 memcpy(p, pathp, l);
815 } else
816 memcpy(np->full_name, pathp, l);
754 prev_pp = &np->properties; 817 prev_pp = &np->properties;
755 **allnextpp = np; 818 **allnextpp = np;
756 *allnextpp = &np->allnext; 819 *allnextpp = &np->allnext;
757 if (dad != NULL) { 820 if (dad != NULL) {
758 np->parent = dad; 821 np->parent = dad;
759 /* we temporarily use the `next' field as `last_child'. */ 822 /* we temporarily use the next field as `last_child'*/
760 if (dad->next == 0) 823 if (dad->next == 0)
761 dad->child = np; 824 dad->child = np;
762 else 825 else
@@ -770,18 +833,26 @@ static unsigned long __init unflatten_dt_node(unsigned long mem,
770 char *pname; 833 char *pname;
771 834
772 tag = *((u32 *)(*p)); 835 tag = *((u32 *)(*p));
836 if (tag == OF_DT_NOP) {
837 *p += 4;
838 continue;
839 }
773 if (tag != OF_DT_PROP) 840 if (tag != OF_DT_PROP)
774 break; 841 break;
775 *p += 4; 842 *p += 4;
776 sz = *((u32 *)(*p)); 843 sz = *((u32 *)(*p));
777 noff = *((u32 *)((*p) + 4)); 844 noff = *((u32 *)((*p) + 4));
778 *p = _ALIGN((*p) + 8, sz >= 8 ? 8 : 4); 845 *p += 8;
846 if (initial_boot_params->version < 0x10)
847 *p = _ALIGN(*p, sz >= 8 ? 8 : 4);
779 848
780 pname = find_flat_dt_string(noff); 849 pname = find_flat_dt_string(noff);
781 if (pname == NULL) { 850 if (pname == NULL) {
782 printk("Can't find property name in list !\n"); 851 printk("Can't find property name in list !\n");
783 break; 852 break;
784 } 853 }
854 if (strcmp(pname, "name") == 0)
855 has_name = 1;
785 l = strlen(pname) + 1; 856 l = strlen(pname) + 1;
786 pp = unflatten_dt_alloc(&mem, sizeof(struct property), 857 pp = unflatten_dt_alloc(&mem, sizeof(struct property),
787 __alignof__(struct property)); 858 __alignof__(struct property));
@@ -801,6 +872,36 @@ static unsigned long __init unflatten_dt_node(unsigned long mem,
801 } 872 }
802 *p = _ALIGN((*p) + sz, 4); 873 *p = _ALIGN((*p) + sz, 4);
803 } 874 }
875 /* with version 0x10 we may not have the name property, recreate
876 * it here from the unit name if absent
877 */
878 if (!has_name) {
879 char *p = pathp, *ps = pathp, *pa = NULL;
880 int sz;
881
882 while (*p) {
883 if ((*p) == '@')
884 pa = p;
885 if ((*p) == '/')
886 ps = p + 1;
887 p++;
888 }
889 if (pa < ps)
890 pa = p;
891 sz = (pa - ps) + 1;
892 pp = unflatten_dt_alloc(&mem, sizeof(struct property) + sz,
893 __alignof__(struct property));
894 if (allnextpp) {
895 pp->name = "name";
896 pp->length = sz;
897 pp->value = (unsigned char *)(pp + 1);
898 *prev_pp = pp;
899 prev_pp = &pp->next;
900 memcpy(pp->value, ps, sz - 1);
901 ((char *)pp->value)[sz - 1] = 0;
902 DBG("fixed up name for %s -> %s\n", pathp, pp->value);
903 }
904 }
804 if (allnextpp) { 905 if (allnextpp) {
805 *prev_pp = NULL; 906 *prev_pp = NULL;
806 np->name = get_property(np, "name", NULL); 907 np->name = get_property(np, "name", NULL);
@@ -812,11 +913,11 @@ static unsigned long __init unflatten_dt_node(unsigned long mem,
812 np->type = "<NULL>"; 913 np->type = "<NULL>";
813 } 914 }
814 while (tag == OF_DT_BEGIN_NODE) { 915 while (tag == OF_DT_BEGIN_NODE) {
815 mem = unflatten_dt_node(mem, p, np, allnextpp); 916 mem = unflatten_dt_node(mem, p, np, allnextpp, fpsize);
816 tag = *((u32 *)(*p)); 917 tag = *((u32 *)(*p));
817 } 918 }
818 if (tag != OF_DT_END_NODE) { 919 if (tag != OF_DT_END_NODE) {
819 printk("Weird tag at start of node: %x\n", tag); 920 printk("Weird tag at end of node: %x\n", tag);
820 return mem; 921 return mem;
821 } 922 }
822 *p += 4; 923 *p += 4;
@@ -842,21 +943,32 @@ void __init unflatten_device_tree(void)
842 /* First pass, scan for size */ 943 /* First pass, scan for size */
843 start = ((unsigned long)initial_boot_params) + 944 start = ((unsigned long)initial_boot_params) +
844 initial_boot_params->off_dt_struct; 945 initial_boot_params->off_dt_struct;
845 size = unflatten_dt_node(0, &start, NULL, NULL); 946 size = unflatten_dt_node(0, &start, NULL, NULL, 0);
947 size = (size | 3) + 1;
846 948
847 DBG(" size is %lx, allocating...\n", size); 949 DBG(" size is %lx, allocating...\n", size);
848 950
849 /* Allocate memory for the expanded device tree */ 951 /* Allocate memory for the expanded device tree */
850 mem = (unsigned long)abs_to_virt(lmb_alloc(size, 952 mem = lmb_alloc(size + 4, __alignof__(struct device_node));
851 __alignof__(struct device_node))); 953 if (!mem) {
954 DBG("Couldn't allocate memory with lmb_alloc()!\n");
955 panic("Couldn't allocate memory with lmb_alloc()!\n");
956 }
957 mem = (unsigned long)abs_to_virt(mem);
958
959 ((u32 *)mem)[size / 4] = 0xdeadbeef;
960
852 DBG(" unflattening...\n", mem); 961 DBG(" unflattening...\n", mem);
853 962
854 /* Second pass, do actual unflattening */ 963 /* Second pass, do actual unflattening */
855 start = ((unsigned long)initial_boot_params) + 964 start = ((unsigned long)initial_boot_params) +
856 initial_boot_params->off_dt_struct; 965 initial_boot_params->off_dt_struct;
857 unflatten_dt_node(mem, &start, NULL, &allnextp); 966 unflatten_dt_node(mem, &start, NULL, &allnextp, 0);
858 if (*((u32 *)start) != OF_DT_END) 967 if (*((u32 *)start) != OF_DT_END)
859 printk(KERN_WARNING "Weird tag at end of tree: %x\n", *((u32 *)start)); 968 printk(KERN_WARNING "Weird tag at end of tree: %08x\n", *((u32 *)start));
969 if (((u32 *)mem)[size / 4] != 0xdeadbeef)
970 printk(KERN_WARNING "End of tree marker overwritten: %08x\n",
971 ((u32 *)mem)[size / 4] );
860 *allnextp = NULL; 972 *allnextp = NULL;
861 973
862 /* Get pointer to OF "/chosen" node for use everywhere */ 974 /* Get pointer to OF "/chosen" node for use everywhere */
@@ -880,7 +992,7 @@ void __init unflatten_device_tree(void)
880 992
881 993
882static int __init early_init_dt_scan_cpus(unsigned long node, 994static int __init early_init_dt_scan_cpus(unsigned long node,
883 const char *full_path, void *data) 995 const char *uname, int depth, void *data)
884{ 996{
885 char *type = get_flat_dt_prop(node, "device_type", NULL); 997 char *type = get_flat_dt_prop(node, "device_type", NULL);
886 u32 *prop; 998 u32 *prop;
@@ -947,13 +1059,15 @@ static int __init early_init_dt_scan_cpus(unsigned long node,
947} 1059}
948 1060
949static int __init early_init_dt_scan_chosen(unsigned long node, 1061static int __init early_init_dt_scan_chosen(unsigned long node,
950 const char *full_path, void *data) 1062 const char *uname, int depth, void *data)
951{ 1063{
952 u32 *prop; 1064 u32 *prop;
953 u64 *prop64; 1065 u64 *prop64;
954 extern unsigned long memory_limit, tce_alloc_start, tce_alloc_end; 1066 extern unsigned long memory_limit, tce_alloc_start, tce_alloc_end;
955 1067
956 if (strcmp(full_path, "/chosen") != 0) 1068 DBG("search \"chosen\", depth: %d, uname: %s\n", depth, uname);
1069
1070 if (depth != 1 || strcmp(uname, "chosen") != 0)
957 return 0; 1071 return 0;
958 1072
959 /* get platform type */ 1073 /* get platform type */
@@ -1003,18 +1117,20 @@ static int __init early_init_dt_scan_chosen(unsigned long node,
1003} 1117}
1004 1118
1005static int __init early_init_dt_scan_root(unsigned long node, 1119static int __init early_init_dt_scan_root(unsigned long node,
1006 const char *full_path, void *data) 1120 const char *uname, int depth, void *data)
1007{ 1121{
1008 u32 *prop; 1122 u32 *prop;
1009 1123
1010 if (strcmp(full_path, "/") != 0) 1124 if (depth != 0)
1011 return 0; 1125 return 0;
1012 1126
1013 prop = (u32 *)get_flat_dt_prop(node, "#size-cells", NULL); 1127 prop = (u32 *)get_flat_dt_prop(node, "#size-cells", NULL);
1014 dt_root_size_cells = (prop == NULL) ? 1 : *prop; 1128 dt_root_size_cells = (prop == NULL) ? 1 : *prop;
1015 1129 DBG("dt_root_size_cells = %x\n", dt_root_size_cells);
1130
1016 prop = (u32 *)get_flat_dt_prop(node, "#address-cells", NULL); 1131 prop = (u32 *)get_flat_dt_prop(node, "#address-cells", NULL);
1017 dt_root_addr_cells = (prop == NULL) ? 2 : *prop; 1132 dt_root_addr_cells = (prop == NULL) ? 2 : *prop;
1133 DBG("dt_root_addr_cells = %x\n", dt_root_addr_cells);
1018 1134
1019 /* break now */ 1135 /* break now */
1020 return 1; 1136 return 1;
@@ -1042,7 +1158,7 @@ static unsigned long __init dt_mem_next_cell(int s, cell_t **cellp)
1042 1158
1043 1159
1044static int __init early_init_dt_scan_memory(unsigned long node, 1160static int __init early_init_dt_scan_memory(unsigned long node,
1045 const char *full_path, void *data) 1161 const char *uname, int depth, void *data)
1046{ 1162{
1047 char *type = get_flat_dt_prop(node, "device_type", NULL); 1163 char *type = get_flat_dt_prop(node, "device_type", NULL);
1048 cell_t *reg, *endp; 1164 cell_t *reg, *endp;
@@ -1058,7 +1174,9 @@ static int __init early_init_dt_scan_memory(unsigned long node,
1058 1174
1059 endp = reg + (l / sizeof(cell_t)); 1175 endp = reg + (l / sizeof(cell_t));
1060 1176
1061 DBG("memory scan node %s ...\n", full_path); 1177 DBG("memory scan node %s ..., reg size %ld, data: %x %x %x %x, ...\n",
1178 uname, l, reg[0], reg[1], reg[2], reg[3]);
1179
1062 while ((endp - reg) >= (dt_root_addr_cells + dt_root_size_cells)) { 1180 while ((endp - reg) >= (dt_root_addr_cells + dt_root_size_cells)) {
1063 unsigned long base, size; 1181 unsigned long base, size;
1064 1182
@@ -1469,10 +1587,11 @@ struct device_node *of_find_node_by_path(const char *path)
1469 struct device_node *np = allnodes; 1587 struct device_node *np = allnodes;
1470 1588
1471 read_lock(&devtree_lock); 1589 read_lock(&devtree_lock);
1472 for (; np != 0; np = np->allnext) 1590 for (; np != 0; np = np->allnext) {
1473 if (np->full_name != 0 && strcasecmp(np->full_name, path) == 0 1591 if (np->full_name != 0 && strcasecmp(np->full_name, path) == 0
1474 && of_node_get(np)) 1592 && of_node_get(np))
1475 break; 1593 break;
1594 }
1476 read_unlock(&devtree_lock); 1595 read_unlock(&devtree_lock);
1477 return np; 1596 return np;
1478} 1597}
@@ -1614,6 +1733,7 @@ static void of_node_release(struct kref *kref)
1614 kfree(node->intrs); 1733 kfree(node->intrs);
1615 kfree(node->addrs); 1734 kfree(node->addrs);
1616 kfree(node->full_name); 1735 kfree(node->full_name);
1736 kfree(node->data);
1617 kfree(node); 1737 kfree(node);
1618} 1738}
1619 1739
diff --git a/arch/ppc64/kernel/prom_init.c b/arch/ppc64/kernel/prom_init.c
index dbbe6c79d8da..9979919cdf92 100644
--- a/arch/ppc64/kernel/prom_init.c
+++ b/arch/ppc64/kernel/prom_init.c
@@ -22,7 +22,6 @@
22#include <linux/kernel.h> 22#include <linux/kernel.h>
23#include <linux/string.h> 23#include <linux/string.h>
24#include <linux/init.h> 24#include <linux/init.h>
25#include <linux/version.h>
26#include <linux/threads.h> 25#include <linux/threads.h>
27#include <linux/spinlock.h> 26#include <linux/spinlock.h>
28#include <linux/types.h> 27#include <linux/types.h>
@@ -892,7 +891,10 @@ static void __init prom_init_mem(void)
892 if ( RELOC(of_platform) == PLATFORM_PSERIES_LPAR ) 891 if ( RELOC(of_platform) == PLATFORM_PSERIES_LPAR )
893 RELOC(alloc_top) = RELOC(rmo_top); 892 RELOC(alloc_top) = RELOC(rmo_top);
894 else 893 else
895 RELOC(alloc_top) = RELOC(rmo_top) = min(0x40000000ul, RELOC(ram_top)); 894 /* Some RS64 machines have buggy firmware where claims up at 1GB
895 * fails. Cap at 768MB as a workaround. Still plenty of room.
896 */
897 RELOC(alloc_top) = RELOC(rmo_top) = min(0x30000000ul, RELOC(ram_top));
896 898
897 prom_printf("memory layout at init:\n"); 899 prom_printf("memory layout at init:\n");
898 prom_printf(" memory_limit : %x (16 MB aligned)\n", RELOC(prom_memory_limit)); 900 prom_printf(" memory_limit : %x (16 MB aligned)\n", RELOC(prom_memory_limit));
@@ -1534,7 +1536,8 @@ static unsigned long __init dt_find_string(char *str)
1534 */ 1536 */
1535#define MAX_PROPERTY_NAME 64 1537#define MAX_PROPERTY_NAME 64
1536 1538
1537static void __init scan_dt_build_strings(phandle node, unsigned long *mem_start, 1539static void __init scan_dt_build_strings(phandle node,
1540 unsigned long *mem_start,
1538 unsigned long *mem_end) 1541 unsigned long *mem_end)
1539{ 1542{
1540 unsigned long offset = reloc_offset(); 1543 unsigned long offset = reloc_offset();
@@ -1547,16 +1550,21 @@ static void __init scan_dt_build_strings(phandle node, unsigned long *mem_start,
1547 /* get and store all property names */ 1550 /* get and store all property names */
1548 prev_name = RELOC(""); 1551 prev_name = RELOC("");
1549 for (;;) { 1552 for (;;) {
1550 int rc;
1551
1552 /* 64 is max len of name including nul. */ 1553 /* 64 is max len of name including nul. */
1553 namep = make_room(mem_start, mem_end, MAX_PROPERTY_NAME, 1); 1554 namep = make_room(mem_start, mem_end, MAX_PROPERTY_NAME, 1);
1554 rc = call_prom("nextprop", 3, 1, node, prev_name, namep); 1555 if (call_prom("nextprop", 3, 1, node, prev_name, namep) != 1) {
1555 if (rc != 1) {
1556 /* No more nodes: unwind alloc */ 1556 /* No more nodes: unwind alloc */
1557 *mem_start = (unsigned long)namep; 1557 *mem_start = (unsigned long)namep;
1558 break; 1558 break;
1559 } 1559 }
1560
1561 /* skip "name" */
1562 if (strcmp(namep, RELOC("name")) == 0) {
1563 *mem_start = (unsigned long)namep;
1564 prev_name = RELOC("name");
1565 continue;
1566 }
1567 /* get/create string entry */
1560 soff = dt_find_string(namep); 1568 soff = dt_find_string(namep);
1561 if (soff != 0) { 1569 if (soff != 0) {
1562 *mem_start = (unsigned long)namep; 1570 *mem_start = (unsigned long)namep;
@@ -1571,7 +1579,7 @@ static void __init scan_dt_build_strings(phandle node, unsigned long *mem_start,
1571 1579
1572 /* do all our children */ 1580 /* do all our children */
1573 child = call_prom("child", 1, 1, node); 1581 child = call_prom("child", 1, 1, node);
1574 while (child != (phandle)0) { 1582 while (child != 0) {
1575 scan_dt_build_strings(child, mem_start, mem_end); 1583 scan_dt_build_strings(child, mem_start, mem_end);
1576 child = call_prom("peer", 1, 1, child); 1584 child = call_prom("peer", 1, 1, child);
1577 } 1585 }
@@ -1580,16 +1588,13 @@ static void __init scan_dt_build_strings(phandle node, unsigned long *mem_start,
1580static void __init scan_dt_build_struct(phandle node, unsigned long *mem_start, 1588static void __init scan_dt_build_struct(phandle node, unsigned long *mem_start,
1581 unsigned long *mem_end) 1589 unsigned long *mem_end)
1582{ 1590{
1583 int l, align;
1584 phandle child; 1591 phandle child;
1585 char *namep, *prev_name, *sstart, *p, *ep; 1592 char *namep, *prev_name, *sstart, *p, *ep, *lp, *path;
1586 unsigned long soff; 1593 unsigned long soff;
1587 unsigned char *valp; 1594 unsigned char *valp;
1588 unsigned long offset = reloc_offset(); 1595 unsigned long offset = reloc_offset();
1589 char pname[MAX_PROPERTY_NAME]; 1596 static char pname[MAX_PROPERTY_NAME];
1590 char *path; 1597 int l;
1591
1592 path = RELOC(prom_scratch);
1593 1598
1594 dt_push_token(OF_DT_BEGIN_NODE, mem_start, mem_end); 1599 dt_push_token(OF_DT_BEGIN_NODE, mem_start, mem_end);
1595 1600
@@ -1599,23 +1604,33 @@ static void __init scan_dt_build_struct(phandle node, unsigned long *mem_start,
1599 namep, *mem_end - *mem_start); 1604 namep, *mem_end - *mem_start);
1600 if (l >= 0) { 1605 if (l >= 0) {
1601 /* Didn't fit? Get more room. */ 1606 /* Didn't fit? Get more room. */
1602 if (l+1 > *mem_end - *mem_start) { 1607 if ((l+1) > (*mem_end - *mem_start)) {
1603 namep = make_room(mem_start, mem_end, l+1, 1); 1608 namep = make_room(mem_start, mem_end, l+1, 1);
1604 call_prom("package-to-path", 3, 1, node, namep, l); 1609 call_prom("package-to-path", 3, 1, node, namep, l);
1605 } 1610 }
1606 namep[l] = '\0'; 1611 namep[l] = '\0';
1612
1607 /* Fixup an Apple bug where they have bogus \0 chars in the 1613 /* Fixup an Apple bug where they have bogus \0 chars in the
1608 * middle of the path in some properties 1614 * middle of the path in some properties
1609 */ 1615 */
1610 for (p = namep, ep = namep + l; p < ep; p++) 1616 for (p = namep, ep = namep + l; p < ep; p++)
1611 if (*p == '\0') { 1617 if (*p == '\0') {
1612 memmove(p, p+1, ep - p); 1618 memmove(p, p+1, ep - p);
1613 ep--; l--; 1619 ep--; l--; p--;
1614 } 1620 }
1615 *mem_start = _ALIGN(((unsigned long) namep) + strlen(namep) + 1, 4); 1621
1622 /* now try to extract the unit name in that mess */
1623 for (p = namep, lp = NULL; *p; p++)
1624 if (*p == '/')
1625 lp = p + 1;
1626 if (lp != NULL)
1627 memmove(namep, lp, strlen(lp) + 1);
1628 *mem_start = _ALIGN(((unsigned long) namep) +
1629 strlen(namep) + 1, 4);
1616 } 1630 }
1617 1631
1618 /* get it again for debugging */ 1632 /* get it again for debugging */
1633 path = RELOC(prom_scratch);
1619 memset(path, 0, PROM_SCRATCH_SIZE); 1634 memset(path, 0, PROM_SCRATCH_SIZE);
1620 call_prom("package-to-path", 3, 1, node, path, PROM_SCRATCH_SIZE-1); 1635 call_prom("package-to-path", 3, 1, node, path, PROM_SCRATCH_SIZE-1);
1621 1636
@@ -1623,23 +1638,27 @@ static void __init scan_dt_build_struct(phandle node, unsigned long *mem_start,
1623 prev_name = RELOC(""); 1638 prev_name = RELOC("");
1624 sstart = (char *)RELOC(dt_string_start); 1639 sstart = (char *)RELOC(dt_string_start);
1625 for (;;) { 1640 for (;;) {
1626 int rc; 1641 if (call_prom("nextprop", 3, 1, node, prev_name,
1627 1642 RELOC(pname)) != 1)
1628 rc = call_prom("nextprop", 3, 1, node, prev_name, pname);
1629 if (rc != 1)
1630 break; 1643 break;
1631 1644
1645 /* skip "name" */
1646 if (strcmp(RELOC(pname), RELOC("name")) == 0) {
1647 prev_name = RELOC("name");
1648 continue;
1649 }
1650
1632 /* find string offset */ 1651 /* find string offset */
1633 soff = dt_find_string(pname); 1652 soff = dt_find_string(RELOC(pname));
1634 if (soff == 0) { 1653 if (soff == 0) {
1635 prom_printf("WARNING: Can't find string index for <%s>, node %s\n", 1654 prom_printf("WARNING: Can't find string index for"
1636 pname, path); 1655 " <%s>, node %s\n", RELOC(pname), path);
1637 break; 1656 break;
1638 } 1657 }
1639 prev_name = sstart + soff; 1658 prev_name = sstart + soff;
1640 1659
1641 /* get length */ 1660 /* get length */
1642 l = call_prom("getproplen", 2, 1, node, pname); 1661 l = call_prom("getproplen", 2, 1, node, RELOC(pname));
1643 1662
1644 /* sanity checks */ 1663 /* sanity checks */
1645 if (l == PROM_ERROR) 1664 if (l == PROM_ERROR)
@@ -1648,7 +1667,7 @@ static void __init scan_dt_build_struct(phandle node, unsigned long *mem_start,
1648 prom_printf("WARNING: ignoring large property "); 1667 prom_printf("WARNING: ignoring large property ");
1649 /* It seems OF doesn't null-terminate the path :-( */ 1668 /* It seems OF doesn't null-terminate the path :-( */
1650 prom_printf("[%s] ", path); 1669 prom_printf("[%s] ", path);
1651 prom_printf("%s length 0x%x\n", pname, l); 1670 prom_printf("%s length 0x%x\n", RELOC(pname), l);
1652 continue; 1671 continue;
1653 } 1672 }
1654 1673
@@ -1658,17 +1677,16 @@ static void __init scan_dt_build_struct(phandle node, unsigned long *mem_start,
1658 dt_push_token(soff, mem_start, mem_end); 1677 dt_push_token(soff, mem_start, mem_end);
1659 1678
1660 /* push property content */ 1679 /* push property content */
1661 align = (l >= 8) ? 8 : 4; 1680 valp = make_room(mem_start, mem_end, l, 4);
1662 valp = make_room(mem_start, mem_end, l, align); 1681 call_prom("getprop", 4, 1, node, RELOC(pname), valp, l);
1663 call_prom("getprop", 4, 1, node, pname, valp, l);
1664 *mem_start = _ALIGN(*mem_start, 4); 1682 *mem_start = _ALIGN(*mem_start, 4);
1665 } 1683 }
1666 1684
1667 /* Add a "linux,phandle" property. */ 1685 /* Add a "linux,phandle" property. */
1668 soff = dt_find_string(RELOC("linux,phandle")); 1686 soff = dt_find_string(RELOC("linux,phandle"));
1669 if (soff == 0) 1687 if (soff == 0)
1670 prom_printf("WARNING: Can't find string index for <linux-phandle>" 1688 prom_printf("WARNING: Can't find string index for"
1671 " node %s\n", path); 1689 " <linux-phandle> node %s\n", path);
1672 else { 1690 else {
1673 dt_push_token(OF_DT_PROP, mem_start, mem_end); 1691 dt_push_token(OF_DT_PROP, mem_start, mem_end);
1674 dt_push_token(4, mem_start, mem_end); 1692 dt_push_token(4, mem_start, mem_end);
@@ -1679,7 +1697,7 @@ static void __init scan_dt_build_struct(phandle node, unsigned long *mem_start,
1679 1697
1680 /* do all our children */ 1698 /* do all our children */
1681 child = call_prom("child", 1, 1, node); 1699 child = call_prom("child", 1, 1, node);
1682 while (child != (phandle)0) { 1700 while (child != 0) {
1683 scan_dt_build_struct(child, mem_start, mem_end); 1701 scan_dt_build_struct(child, mem_start, mem_end);
1684 child = call_prom("peer", 1, 1, child); 1702 child = call_prom("peer", 1, 1, child);
1685 } 1703 }
@@ -1718,7 +1736,8 @@ static void __init flatten_device_tree(void)
1718 1736
1719 /* Build header and make room for mem rsv map */ 1737 /* Build header and make room for mem rsv map */
1720 mem_start = _ALIGN(mem_start, 4); 1738 mem_start = _ALIGN(mem_start, 4);
1721 hdr = make_room(&mem_start, &mem_end, sizeof(struct boot_param_header), 4); 1739 hdr = make_room(&mem_start, &mem_end,
1740 sizeof(struct boot_param_header), 4);
1722 RELOC(dt_header_start) = (unsigned long)hdr; 1741 RELOC(dt_header_start) = (unsigned long)hdr;
1723 rsvmap = make_room(&mem_start, &mem_end, sizeof(mem_reserve_map), 8); 1742 rsvmap = make_room(&mem_start, &mem_end, sizeof(mem_reserve_map), 8);
1724 1743
@@ -1731,11 +1750,11 @@ static void __init flatten_device_tree(void)
1731 namep = make_room(&mem_start, &mem_end, 16, 1); 1750 namep = make_room(&mem_start, &mem_end, 16, 1);
1732 strcpy(namep, RELOC("linux,phandle")); 1751 strcpy(namep, RELOC("linux,phandle"));
1733 mem_start = (unsigned long)namep + strlen(namep) + 1; 1752 mem_start = (unsigned long)namep + strlen(namep) + 1;
1734 RELOC(dt_string_end) = mem_start;
1735 1753
1736 /* Build string array */ 1754 /* Build string array */
1737 prom_printf("Building dt strings...\n"); 1755 prom_printf("Building dt strings...\n");
1738 scan_dt_build_strings(root, &mem_start, &mem_end); 1756 scan_dt_build_strings(root, &mem_start, &mem_end);
1757 RELOC(dt_string_end) = mem_start;
1739 1758
1740 /* Build structure */ 1759 /* Build structure */
1741 mem_start = PAGE_ALIGN(mem_start); 1760 mem_start = PAGE_ALIGN(mem_start);
@@ -1750,9 +1769,11 @@ static void __init flatten_device_tree(void)
1750 hdr->totalsize = RELOC(dt_struct_end) - RELOC(dt_header_start); 1769 hdr->totalsize = RELOC(dt_struct_end) - RELOC(dt_header_start);
1751 hdr->off_dt_struct = RELOC(dt_struct_start) - RELOC(dt_header_start); 1770 hdr->off_dt_struct = RELOC(dt_struct_start) - RELOC(dt_header_start);
1752 hdr->off_dt_strings = RELOC(dt_string_start) - RELOC(dt_header_start); 1771 hdr->off_dt_strings = RELOC(dt_string_start) - RELOC(dt_header_start);
1772 hdr->dt_strings_size = RELOC(dt_string_end) - RELOC(dt_string_start);
1753 hdr->off_mem_rsvmap = ((unsigned long)rsvmap) - RELOC(dt_header_start); 1773 hdr->off_mem_rsvmap = ((unsigned long)rsvmap) - RELOC(dt_header_start);
1754 hdr->version = OF_DT_VERSION; 1774 hdr->version = OF_DT_VERSION;
1755 hdr->last_comp_version = 1; 1775 /* Version 16 is not backward compatible */
1776 hdr->last_comp_version = 0x10;
1756 1777
1757 /* Reserve the whole thing and copy the reserve map in, we 1778 /* Reserve the whole thing and copy the reserve map in, we
1758 * also bump mem_reserve_cnt to cause further reservations to 1779 * also bump mem_reserve_cnt to cause further reservations to
@@ -1808,6 +1829,9 @@ static void __init fixup_device_tree(void)
1808 /* does it need fixup ? */ 1829 /* does it need fixup ? */
1809 if (prom_getproplen(i2c, "interrupts") > 0) 1830 if (prom_getproplen(i2c, "interrupts") > 0)
1810 return; 1831 return;
1832
1833 prom_printf("fixing up bogus interrupts for u3 i2c...\n");
1834
1811 /* interrupt on this revision of u3 is number 0 and level */ 1835 /* interrupt on this revision of u3 is number 0 and level */
1812 interrupts[0] = 0; 1836 interrupts[0] = 0;
1813 interrupts[1] = 1; 1837 interrupts[1] = 1;
diff --git a/arch/ppc64/kernel/ptrace.c b/arch/ppc64/kernel/ptrace.c
index 2993f108d96d..85ed3188a91d 100644
--- a/arch/ppc64/kernel/ptrace.c
+++ b/arch/ppc64/kernel/ptrace.c
@@ -17,6 +17,7 @@
17 * this archive for more details. 17 * this archive for more details.
18 */ 18 */
19 19
20#include <linux/config.h>
20#include <linux/kernel.h> 21#include <linux/kernel.h>
21#include <linux/sched.h> 22#include <linux/sched.h>
22#include <linux/mm.h> 23#include <linux/mm.h>
@@ -206,6 +207,19 @@ int sys_ptrace(long request, long pid, long addr, long data)
206 break; 207 break;
207 } 208 }
208 209
210 case PTRACE_GET_DEBUGREG: {
211 ret = -EINVAL;
212 /* We only support one DABR and no IABRS at the moment */
213 if (addr > 0)
214 break;
215 ret = put_user(child->thread.dabr,
216 (unsigned long __user *)data);
217 break;
218 }
219
220 case PTRACE_SET_DEBUGREG:
221 ret = ptrace_set_debugreg(child, addr, data);
222
209 case PTRACE_DETACH: 223 case PTRACE_DETACH:
210 ret = ptrace_detach(child, data); 224 ret = ptrace_detach(child, data);
211 break; 225 break;
@@ -274,6 +288,20 @@ int sys_ptrace(long request, long pid, long addr, long data)
274 break; 288 break;
275 } 289 }
276 290
291#ifdef CONFIG_ALTIVEC
292 case PTRACE_GETVRREGS:
293 /* Get the child altivec register state. */
294 flush_altivec_to_thread(child);
295 ret = get_vrregs((unsigned long __user *)data, child);
296 break;
297
298 case PTRACE_SETVRREGS:
299 /* Set the child altivec register state. */
300 flush_altivec_to_thread(child);
301 ret = set_vrregs(child, (unsigned long __user *)data);
302 break;
303#endif
304
277 default: 305 default:
278 ret = ptrace_request(child, request, addr, data); 306 ret = ptrace_request(child, request, addr, data);
279 break; 307 break;
diff --git a/arch/ppc64/kernel/ptrace32.c b/arch/ppc64/kernel/ptrace32.c
index 16436426c7e2..fb8c22d6084a 100644
--- a/arch/ppc64/kernel/ptrace32.c
+++ b/arch/ppc64/kernel/ptrace32.c
@@ -17,6 +17,7 @@
17 * this archive for more details. 17 * this archive for more details.
18 */ 18 */
19 19
20#include <linux/config.h>
20#include <linux/kernel.h> 21#include <linux/kernel.h>
21#include <linux/sched.h> 22#include <linux/sched.h>
22#include <linux/mm.h> 23#include <linux/mm.h>
@@ -337,6 +338,19 @@ int sys32_ptrace(long request, long pid, unsigned long addr, unsigned long data)
337 break; 338 break;
338 } 339 }
339 340
341 case PTRACE_GET_DEBUGREG: {
342 ret = -EINVAL;
343 /* We only support one DABR and no IABRS at the moment */
344 if (addr > 0)
345 break;
346 ret = put_user(child->thread.dabr, (u32 __user *)data);
347 break;
348 }
349
350 case PTRACE_SET_DEBUGREG:
351 ret = ptrace_set_debugreg(child, addr, data);
352 break;
353
340 case PTRACE_DETACH: 354 case PTRACE_DETACH:
341 ret = ptrace_detach(child, data); 355 ret = ptrace_detach(child, data);
342 break; 356 break;
@@ -405,9 +419,23 @@ int sys32_ptrace(long request, long pid, unsigned long addr, unsigned long data)
405 break; 419 break;
406 } 420 }
407 421
408 case PTRACE_GETEVENTMSG: 422 case PTRACE_GETEVENTMSG:
409 ret = put_user(child->ptrace_message, (unsigned int __user *) data); 423 ret = put_user(child->ptrace_message, (unsigned int __user *) data);
410 break; 424 break;
425
426#ifdef CONFIG_ALTIVEC
427 case PTRACE_GETVRREGS:
428 /* Get the child altivec register state. */
429 flush_altivec_to_thread(child);
430 ret = get_vrregs((unsigned long __user *)data, child);
431 break;
432
433 case PTRACE_SETVRREGS:
434 /* Set the child altivec register state. */
435 flush_altivec_to_thread(child);
436 ret = set_vrregs(child, (unsigned long __user *)data);
437 break;
438#endif
411 439
412 default: 440 default:
413 ret = ptrace_request(child, request, addr, data); 441 ret = ptrace_request(child, request, addr, data);
diff --git a/arch/ppc64/kernel/ras.c b/arch/ppc64/kernel/ras.c
index 3c00f7bfc1b5..41b97dc9cc0a 100644
--- a/arch/ppc64/kernel/ras.c
+++ b/arch/ppc64/kernel/ras.c
@@ -59,8 +59,6 @@ char mce_data_buf[RTAS_ERROR_LOG_MAX]
59/* This is true if we are using the firmware NMI handler (typically LPAR) */ 59/* This is true if we are using the firmware NMI handler (typically LPAR) */
60extern int fwnmi_active; 60extern int fwnmi_active;
61 61
62extern void _exception(int signr, struct pt_regs *regs, int code, unsigned long addr);
63
64static int ras_get_sensor_state_token; 62static int ras_get_sensor_state_token;
65static int ras_check_exception_token; 63static int ras_check_exception_token;
66 64
diff --git a/arch/ppc64/kernel/rtas_pci.c b/arch/ppc64/kernel/rtas_pci.c
index 1048817befb8..4a9719b48abe 100644
--- a/arch/ppc64/kernel/rtas_pci.c
+++ b/arch/ppc64/kernel/rtas_pci.c
@@ -48,7 +48,7 @@ static int write_pci_config;
48static int ibm_read_pci_config; 48static int ibm_read_pci_config;
49static int ibm_write_pci_config; 49static int ibm_write_pci_config;
50 50
51static int config_access_valid(struct device_node *dn, int where) 51static int config_access_valid(struct pci_dn *dn, int where)
52{ 52{
53 if (where < 256) 53 if (where < 256)
54 return 1; 54 return 1;
@@ -58,20 +58,37 @@ static int config_access_valid(struct device_node *dn, int where)
58 return 0; 58 return 0;
59} 59}
60 60
61static int of_device_available(struct device_node * dn)
62{
63 char * status;
64
65 status = get_property(dn, "status", NULL);
66
67 if (!status)
68 return 1;
69
70 if (!strcmp(status, "okay"))
71 return 1;
72
73 return 0;
74}
75
61static int rtas_read_config(struct device_node *dn, int where, int size, u32 *val) 76static int rtas_read_config(struct device_node *dn, int where, int size, u32 *val)
62{ 77{
63 int returnval = -1; 78 int returnval = -1;
64 unsigned long buid, addr; 79 unsigned long buid, addr;
65 int ret; 80 int ret;
81 struct pci_dn *pdn;
66 82
67 if (!dn) 83 if (!dn || !dn->data)
68 return PCIBIOS_DEVICE_NOT_FOUND; 84 return PCIBIOS_DEVICE_NOT_FOUND;
69 if (!config_access_valid(dn, where)) 85 pdn = dn->data;
86 if (!config_access_valid(pdn, where))
70 return PCIBIOS_BAD_REGISTER_NUMBER; 87 return PCIBIOS_BAD_REGISTER_NUMBER;
71 88
72 addr = ((where & 0xf00) << 20) | (dn->busno << 16) | 89 addr = ((where & 0xf00) << 20) | (pdn->busno << 16) |
73 (dn->devfn << 8) | (where & 0xff); 90 (pdn->devfn << 8) | (where & 0xff);
74 buid = dn->phb->buid; 91 buid = pdn->phb->buid;
75 if (buid) { 92 if (buid) {
76 ret = rtas_call(ibm_read_pci_config, 4, 2, &returnval, 93 ret = rtas_call(ibm_read_pci_config, 4, 2, &returnval,
77 addr, buid >> 32, buid & 0xffffffff, size); 94 addr, buid >> 32, buid & 0xffffffff, size);
@@ -83,8 +100,8 @@ static int rtas_read_config(struct device_node *dn, int where, int size, u32 *va
83 if (ret) 100 if (ret)
84 return PCIBIOS_DEVICE_NOT_FOUND; 101 return PCIBIOS_DEVICE_NOT_FOUND;
85 102
86 if (returnval == EEH_IO_ERROR_VALUE(size) 103 if (returnval == EEH_IO_ERROR_VALUE(size) &&
87 && eeh_dn_check_failure (dn, NULL)) 104 eeh_dn_check_failure (dn, NULL))
88 return PCIBIOS_DEVICE_NOT_FOUND; 105 return PCIBIOS_DEVICE_NOT_FOUND;
89 106
90 return PCIBIOS_SUCCESSFUL; 107 return PCIBIOS_SUCCESSFUL;
@@ -103,24 +120,28 @@ static int rtas_pci_read_config(struct pci_bus *bus,
103 120
104 /* Search only direct children of the bus */ 121 /* Search only direct children of the bus */
105 for (dn = busdn->child; dn; dn = dn->sibling) 122 for (dn = busdn->child; dn; dn = dn->sibling)
106 if (dn->devfn == devfn) 123 if (dn->data && PCI_DN(dn)->devfn == devfn
124 && of_device_available(dn))
107 return rtas_read_config(dn, where, size, val); 125 return rtas_read_config(dn, where, size, val);
126
108 return PCIBIOS_DEVICE_NOT_FOUND; 127 return PCIBIOS_DEVICE_NOT_FOUND;
109} 128}
110 129
111static int rtas_write_config(struct device_node *dn, int where, int size, u32 val) 130int rtas_write_config(struct device_node *dn, int where, int size, u32 val)
112{ 131{
113 unsigned long buid, addr; 132 unsigned long buid, addr;
114 int ret; 133 int ret;
134 struct pci_dn *pdn;
115 135
116 if (!dn) 136 if (!dn || !dn->data)
117 return PCIBIOS_DEVICE_NOT_FOUND; 137 return PCIBIOS_DEVICE_NOT_FOUND;
118 if (!config_access_valid(dn, where)) 138 pdn = dn->data;
139 if (!config_access_valid(pdn, where))
119 return PCIBIOS_BAD_REGISTER_NUMBER; 140 return PCIBIOS_BAD_REGISTER_NUMBER;
120 141
121 addr = ((where & 0xf00) << 20) | (dn->busno << 16) | 142 addr = ((where & 0xf00) << 20) | (pdn->busno << 16) |
122 (dn->devfn << 8) | (where & 0xff); 143 (pdn->devfn << 8) | (where & 0xff);
123 buid = dn->phb->buid; 144 buid = pdn->phb->buid;
124 if (buid) { 145 if (buid) {
125 ret = rtas_call(ibm_write_pci_config, 5, 1, NULL, addr, buid >> 32, buid & 0xffffffff, size, (ulong) val); 146 ret = rtas_call(ibm_write_pci_config, 5, 1, NULL, addr, buid >> 32, buid & 0xffffffff, size, (ulong) val);
126 } else { 147 } else {
@@ -146,7 +167,8 @@ static int rtas_pci_write_config(struct pci_bus *bus,
146 167
147 /* Search only direct children of the bus */ 168 /* Search only direct children of the bus */
148 for (dn = busdn->child; dn; dn = dn->sibling) 169 for (dn = busdn->child; dn; dn = dn->sibling)
149 if (dn->devfn == devfn) 170 if (dn->data && PCI_DN(dn)->devfn == devfn
171 && of_device_available(dn))
150 return rtas_write_config(dn, where, size, val); 172 return rtas_write_config(dn, where, size, val);
151 return PCIBIOS_DEVICE_NOT_FOUND; 173 return PCIBIOS_DEVICE_NOT_FOUND;
152} 174}
diff --git a/arch/ppc64/kernel/rtasd.c b/arch/ppc64/kernel/rtasd.c
index b0c3b829fe47..e26b0420b6dd 100644
--- a/arch/ppc64/kernel/rtasd.c
+++ b/arch/ppc64/kernel/rtasd.c
@@ -19,6 +19,7 @@
19#include <linux/vmalloc.h> 19#include <linux/vmalloc.h>
20#include <linux/spinlock.h> 20#include <linux/spinlock.h>
21#include <linux/cpu.h> 21#include <linux/cpu.h>
22#include <linux/delay.h>
22 23
23#include <asm/uaccess.h> 24#include <asm/uaccess.h>
24#include <asm/io.h> 25#include <asm/io.h>
@@ -412,8 +413,7 @@ static void do_event_scan_all_cpus(long delay)
412 413
413 /* Drop hotplug lock, and sleep for the specified delay */ 414 /* Drop hotplug lock, and sleep for the specified delay */
414 unlock_cpu_hotplug(); 415 unlock_cpu_hotplug();
415 set_current_state(TASK_INTERRUPTIBLE); 416 msleep_interruptible(delay);
416 schedule_timeout(delay);
417 lock_cpu_hotplug(); 417 lock_cpu_hotplug();
418 418
419 cpu = next_cpu(cpu, cpu_online_map); 419 cpu = next_cpu(cpu, cpu_online_map);
@@ -442,7 +442,7 @@ static int rtasd(void *unused)
442 442
443 printk(KERN_INFO "RTAS daemon started\n"); 443 printk(KERN_INFO "RTAS daemon started\n");
444 444
445 DEBUG("will sleep for %d jiffies\n", (HZ*60/rtas_event_scan_rate) / 2); 445 DEBUG("will sleep for %d milliseconds\n", (30000/rtas_event_scan_rate));
446 446
447 /* See if we have any error stored in NVRAM */ 447 /* See if we have any error stored in NVRAM */
448 memset(logdata, 0, rtas_error_log_max); 448 memset(logdata, 0, rtas_error_log_max);
@@ -459,7 +459,7 @@ static int rtasd(void *unused)
459 } 459 }
460 460
461 /* First pass. */ 461 /* First pass. */
462 do_event_scan_all_cpus(HZ); 462 do_event_scan_all_cpus(1000);
463 463
464 if (surveillance_timeout != -1) { 464 if (surveillance_timeout != -1) {
465 DEBUG("enabling surveillance\n"); 465 DEBUG("enabling surveillance\n");
@@ -471,7 +471,7 @@ static int rtasd(void *unused)
471 * machines have problems if we call event-scan too 471 * machines have problems if we call event-scan too
472 * quickly. */ 472 * quickly. */
473 for (;;) 473 for (;;)
474 do_event_scan_all_cpus((HZ*60/rtas_event_scan_rate) / 2); 474 do_event_scan_all_cpus(30000/rtas_event_scan_rate);
475 475
476error: 476error:
477 /* Should delete proc entries */ 477 /* Should delete proc entries */
diff --git a/arch/ppc64/kernel/rtc.c b/arch/ppc64/kernel/rtc.c
index d729fefa0df5..6ff52bc61325 100644
--- a/arch/ppc64/kernel/rtc.c
+++ b/arch/ppc64/kernel/rtc.c
@@ -35,6 +35,7 @@
35#include <linux/spinlock.h> 35#include <linux/spinlock.h>
36#include <linux/bcd.h> 36#include <linux/bcd.h>
37#include <linux/interrupt.h> 37#include <linux/interrupt.h>
38#include <linux/delay.h>
38 39
39#include <asm/io.h> 40#include <asm/io.h>
40#include <asm/uaccess.h> 41#include <asm/uaccess.h>
@@ -351,8 +352,7 @@ void rtas_get_rtc_time(struct rtc_time *rtc_tm)
351 return; /* delay not allowed */ 352 return; /* delay not allowed */
352 } 353 }
353 wait_time = rtas_extended_busy_delay_time(error); 354 wait_time = rtas_extended_busy_delay_time(error);
354 set_current_state(TASK_INTERRUPTIBLE); 355 msleep_interruptible(wait_time);
355 schedule_timeout(wait_time);
356 error = RTAS_CLOCK_BUSY; 356 error = RTAS_CLOCK_BUSY;
357 } 357 }
358 } while (error == RTAS_CLOCK_BUSY && (__get_tb() < max_wait_tb)); 358 } while (error == RTAS_CLOCK_BUSY && (__get_tb() < max_wait_tb));
@@ -386,8 +386,7 @@ int rtas_set_rtc_time(struct rtc_time *tm)
386 if (in_interrupt()) 386 if (in_interrupt())
387 return 1; /* probably decrementer */ 387 return 1; /* probably decrementer */
388 wait_time = rtas_extended_busy_delay_time(error); 388 wait_time = rtas_extended_busy_delay_time(error);
389 set_current_state(TASK_INTERRUPTIBLE); 389 msleep_interruptible(wait_time);
390 schedule_timeout(wait_time);
391 error = RTAS_CLOCK_BUSY; 390 error = RTAS_CLOCK_BUSY;
392 } 391 }
393 } while (error == RTAS_CLOCK_BUSY && (__get_tb() < max_wait_tb)); 392 } while (error == RTAS_CLOCK_BUSY && (__get_tb() < max_wait_tb));
diff --git a/arch/ppc64/kernel/scanlog.c b/arch/ppc64/kernel/scanlog.c
index 4d70736619c7..215bf8900304 100644
--- a/arch/ppc64/kernel/scanlog.c
+++ b/arch/ppc64/kernel/scanlog.c
@@ -25,6 +25,7 @@
25#include <linux/errno.h> 25#include <linux/errno.h>
26#include <linux/proc_fs.h> 26#include <linux/proc_fs.h>
27#include <linux/init.h> 27#include <linux/init.h>
28#include <linux/delay.h>
28#include <asm/uaccess.h> 29#include <asm/uaccess.h>
29#include <asm/rtas.h> 30#include <asm/rtas.h>
30#include <asm/prom.h> 31#include <asm/prom.h>
@@ -77,7 +78,7 @@ static ssize_t scanlog_read(struct file *file, char __user *buf,
77 return -EFAULT; 78 return -EFAULT;
78 79
79 for (;;) { 80 for (;;) {
80 wait_time = HZ/2; /* default wait if no data */ 81 wait_time = 500; /* default wait if no data */
81 spin_lock(&rtas_data_buf_lock); 82 spin_lock(&rtas_data_buf_lock);
82 memcpy(rtas_data_buf, data, RTAS_DATA_BUF_SIZE); 83 memcpy(rtas_data_buf, data, RTAS_DATA_BUF_SIZE);
83 status = rtas_call(ibm_scan_log_dump, 2, 1, NULL, 84 status = rtas_call(ibm_scan_log_dump, 2, 1, NULL,
@@ -107,24 +108,14 @@ static ssize_t scanlog_read(struct file *file, char __user *buf,
107 break; 108 break;
108 default: 109 default:
109 if (status > 9900 && status <= 9905) { 110 if (status > 9900 && status <= 9905) {
110 /* No data. RTAS is hinting at a delay required 111 wait_time = rtas_extended_busy_delay_time(status);
111 * between 1-100000 milliseconds
112 */
113 int ms = 1;
114 for (; status > 9900; status--)
115 ms = ms * 10;
116 /* Use microseconds for reasonable accuracy */
117 ms *= 1000;
118 wait_time = ms / (1000000/HZ); /* round down is fine */
119 /* Fall through to sleep */
120 } else { 112 } else {
121 printk(KERN_ERR "scanlog: unknown error from rtas: %d\n", status); 113 printk(KERN_ERR "scanlog: unknown error from rtas: %d\n", status);
122 return -EIO; 114 return -EIO;
123 } 115 }
124 } 116 }
125 /* Apparently no data yet. Wait and try again. */ 117 /* Apparently no data yet. Wait and try again. */
126 set_current_state(TASK_INTERRUPTIBLE); 118 msleep_interruptible(wait_time);
127 schedule_timeout(wait_time);
128 } 119 }
129 /*NOTREACHED*/ 120 /*NOTREACHED*/
130} 121}
diff --git a/arch/ppc64/kernel/setup.c b/arch/ppc64/kernel/setup.c
index e9c24d2dbd91..5ac48bd64891 100644
--- a/arch/ppc64/kernel/setup.c
+++ b/arch/ppc64/kernel/setup.c
@@ -25,7 +25,7 @@
25#include <linux/seq_file.h> 25#include <linux/seq_file.h>
26#include <linux/ioport.h> 26#include <linux/ioport.h>
27#include <linux/console.h> 27#include <linux/console.h>
28#include <linux/version.h> 28#include <linux/utsname.h>
29#include <linux/tty.h> 29#include <linux/tty.h>
30#include <linux/root_dev.h> 30#include <linux/root_dev.h>
31#include <linux/notifier.h> 31#include <linux/notifier.h>
@@ -89,7 +89,7 @@ extern void udbg_init_maple_realmode(void);
89#define EARLY_DEBUG_INIT() udbg_init_maple_realmode() 89#define EARLY_DEBUG_INIT() udbg_init_maple_realmode()
90#define EARLY_DEBUG_INIT() udbg_init_pmac_realmode() 90#define EARLY_DEBUG_INIT() udbg_init_pmac_realmode()
91#define EARLY_DEBUG_INIT() \ 91#define EARLY_DEBUG_INIT() \
92 do { ppc_md.udbg_putc = call_rtas_display_status_delay; } while(0) 92 do { udbg_putc = call_rtas_display_status_delay; } while(0)
93#endif 93#endif
94 94
95/* extern void *stab; */ 95/* extern void *stab; */
@@ -108,7 +108,6 @@ int boot_cpuid = 0;
108int boot_cpuid_phys = 0; 108int boot_cpuid_phys = 0;
109dev_t boot_dev; 109dev_t boot_dev;
110u64 ppc64_pft_size; 110u64 ppc64_pft_size;
111u64 ppc64_debug_switch;
112 111
113struct ppc64_caches ppc64_caches; 112struct ppc64_caches ppc64_caches;
114EXPORT_SYMBOL_GPL(ppc64_caches); 113EXPORT_SYMBOL_GPL(ppc64_caches);
@@ -154,34 +153,6 @@ struct screen_info screen_info = {
154 .orig_video_points = 16 153 .orig_video_points = 16
155}; 154};
156 155
157/*
158 * Initialize the PPCDBG state. Called before relocation has been enabled.
159 */
160void __init ppcdbg_initialize(void)
161{
162 ppc64_debug_switch = PPC_DEBUG_DEFAULT; /* | PPCDBG_BUSWALK | */
163 /* PPCDBG_PHBINIT | PPCDBG_MM | PPCDBG_MMINIT | PPCDBG_TCEINIT | PPCDBG_TCE */;
164}
165
166/*
167 * Early boot console based on udbg
168 */
169static struct console udbg_console = {
170 .name = "udbg",
171 .write = udbg_console_write,
172 .flags = CON_PRINTBUFFER,
173 .index = -1,
174};
175static int early_console_initialized;
176
177void __init disable_early_printk(void)
178{
179 if (!early_console_initialized)
180 return;
181 unregister_console(&udbg_console);
182 early_console_initialized = 0;
183}
184
185#if defined(CONFIG_PPC_MULTIPLATFORM) && defined(CONFIG_SMP) 156#if defined(CONFIG_PPC_MULTIPLATFORM) && defined(CONFIG_SMP)
186 157
187static int smt_enabled_cmdline; 158static int smt_enabled_cmdline;
@@ -425,12 +396,6 @@ void __init early_setup(unsigned long dt_ptr)
425 } 396 }
426 ppc_md = **mach; 397 ppc_md = **mach;
427 398
428 /* our udbg callbacks got overriden by the above, let's put them
429 * back in. Ultimately, I want those things to be split from the
430 * main ppc_md
431 */
432 EARLY_DEBUG_INIT();
433
434 DBG("Found, Initializing memory management...\n"); 399 DBG("Found, Initializing memory management...\n");
435 400
436 /* 401 /*
@@ -536,15 +501,19 @@ static void __init check_for_initrd(void)
536 501
537 DBG(" -> check_for_initrd()\n"); 502 DBG(" -> check_for_initrd()\n");
538 503
539 prop = (u64 *)get_property(of_chosen, "linux,initrd-start", NULL); 504 if (of_chosen) {
540 if (prop != NULL) { 505 prop = (u64 *)get_property(of_chosen,
541 initrd_start = (unsigned long)__va(*prop); 506 "linux,initrd-start", NULL);
542 prop = (u64 *)get_property(of_chosen, "linux,initrd-end", NULL);
543 if (prop != NULL) { 507 if (prop != NULL) {
544 initrd_end = (unsigned long)__va(*prop); 508 initrd_start = (unsigned long)__va(*prop);
545 initrd_below_start_ok = 1; 509 prop = (u64 *)get_property(of_chosen,
546 } else 510 "linux,initrd-end", NULL);
547 initrd_start = 0; 511 if (prop != NULL) {
512 initrd_end = (unsigned long)__va(*prop);
513 initrd_below_start_ok = 1;
514 } else
515 initrd_start = 0;
516 }
548 } 517 }
549 518
550 /* If we were passed an initrd, set the ROOT_DEV properly if the values 519 /* If we were passed an initrd, set the ROOT_DEV properly if the values
@@ -627,13 +596,12 @@ void __init setup_system(void)
627 * Initialize xmon 596 * Initialize xmon
628 */ 597 */
629#ifdef CONFIG_XMON_DEFAULT 598#ifdef CONFIG_XMON_DEFAULT
630 xmon_init(); 599 xmon_init(1);
631#endif 600#endif
632 /* 601 /*
633 * Register early console 602 * Register early console
634 */ 603 */
635 early_console_initialized = 1; 604 register_early_udbg_console();
636 register_console(&udbg_console);
637 605
638 /* Save unparsed command line copy for /proc/cmdline */ 606 /* Save unparsed command line copy for /proc/cmdline */
639 strlcpy(saved_command_line, cmd_line, COMMAND_LINE_SIZE); 607 strlcpy(saved_command_line, cmd_line, COMMAND_LINE_SIZE);
@@ -653,7 +621,7 @@ void __init setup_system(void)
653 smp_release_cpus(); 621 smp_release_cpus();
654#endif /* defined(CONFIG_SMP) && !defined(CONFIG_PPC_ISERIES) */ 622#endif /* defined(CONFIG_SMP) && !defined(CONFIG_PPC_ISERIES) */
655 623
656 printk("Starting Linux PPC64 %s\n", UTS_RELEASE); 624 printk("Starting Linux PPC64 %s\n", system_utsname.version);
657 625
658 printk("-----------------------------------------------------\n"); 626 printk("-----------------------------------------------------\n");
659 printk("ppc64_pft_size = 0x%lx\n", ppc64_pft_size); 627 printk("ppc64_pft_size = 0x%lx\n", ppc64_pft_size);
@@ -1096,8 +1064,6 @@ void __init setup_arch(char **cmdline_p)
1096#define PPC64_LINUX_FUNCTION 0x0f000000 1064#define PPC64_LINUX_FUNCTION 0x0f000000
1097#define PPC64_IPL_MESSAGE 0xc0000000 1065#define PPC64_IPL_MESSAGE 0xc0000000
1098#define PPC64_TERM_MESSAGE 0xb0000000 1066#define PPC64_TERM_MESSAGE 0xb0000000
1099#define PPC64_ATTN_MESSAGE 0xa0000000
1100#define PPC64_DUMP_MESSAGE 0xd0000000
1101 1067
1102static void ppc64_do_msg(unsigned int src, const char *msg) 1068static void ppc64_do_msg(unsigned int src, const char *msg)
1103{ 1069{
@@ -1125,20 +1091,6 @@ void ppc64_terminate_msg(unsigned int src, const char *msg)
1125 printk("[terminate]%04x %s\n", src, msg); 1091 printk("[terminate]%04x %s\n", src, msg);
1126} 1092}
1127 1093
1128/* Print something that needs attention (device error, etc) */
1129void ppc64_attention_msg(unsigned int src, const char *msg)
1130{
1131 ppc64_do_msg(PPC64_LINUX_FUNCTION|PPC64_ATTN_MESSAGE|src, msg);
1132 printk("[attention]%04x %s\n", src, msg);
1133}
1134
1135/* Print a dump progress message. */
1136void ppc64_dump_msg(unsigned int src, const char *msg)
1137{
1138 ppc64_do_msg(PPC64_LINUX_FUNCTION|PPC64_DUMP_MESSAGE|src, msg);
1139 printk("[dump]%04x %s\n", src, msg);
1140}
1141
1142/* This should only be called on processor 0 during calibrate decr */ 1094/* This should only be called on processor 0 during calibrate decr */
1143void __init setup_default_decr(void) 1095void __init setup_default_decr(void)
1144{ 1096{
@@ -1315,7 +1267,7 @@ void __init generic_find_legacy_serial_ports(u64 *physport,
1315 1267
1316static struct platform_device serial_device = { 1268static struct platform_device serial_device = {
1317 .name = "serial8250", 1269 .name = "serial8250",
1318 .id = 0, 1270 .id = PLAT8250_DEV_PLATFORM,
1319 .dev = { 1271 .dev = {
1320 .platform_data = serial_ports, 1272 .platform_data = serial_ports,
1321 }, 1273 },
@@ -1343,11 +1295,13 @@ static int __init early_xmon(char *p)
1343 /* ensure xmon is enabled */ 1295 /* ensure xmon is enabled */
1344 if (p) { 1296 if (p) {
1345 if (strncmp(p, "on", 2) == 0) 1297 if (strncmp(p, "on", 2) == 0)
1346 xmon_init(); 1298 xmon_init(1);
1299 if (strncmp(p, "off", 3) == 0)
1300 xmon_init(0);
1347 if (strncmp(p, "early", 5) != 0) 1301 if (strncmp(p, "early", 5) != 0)
1348 return 0; 1302 return 0;
1349 } 1303 }
1350 xmon_init(); 1304 xmon_init(1);
1351 debugger(NULL); 1305 debugger(NULL);
1352 1306
1353 return 0; 1307 return 0;
diff --git a/arch/ppc64/kernel/signal.c b/arch/ppc64/kernel/signal.c
index bf782276984c..347112cca3c0 100644
--- a/arch/ppc64/kernel/signal.c
+++ b/arch/ppc64/kernel/signal.c
@@ -481,10 +481,11 @@ static int handle_signal(unsigned long sig, struct k_sigaction *ka,
481 /* Set up Signal Frame */ 481 /* Set up Signal Frame */
482 ret = setup_rt_frame(sig, ka, info, oldset, regs); 482 ret = setup_rt_frame(sig, ka, info, oldset, regs);
483 483
484 if (ret && !(ka->sa.sa_flags & SA_NODEFER)) { 484 if (ret) {
485 spin_lock_irq(&current->sighand->siglock); 485 spin_lock_irq(&current->sighand->siglock);
486 sigorsets(&current->blocked, &current->blocked, &ka->sa.sa_mask); 486 sigorsets(&current->blocked, &current->blocked, &ka->sa.sa_mask);
487 sigaddset(&current->blocked,sig); 487 if (!(ka->sa.sa_flags & SA_NODEFER))
488 sigaddset(&current->blocked,sig);
488 recalc_sigpending(); 489 recalc_sigpending();
489 spin_unlock_irq(&current->sighand->siglock); 490 spin_unlock_irq(&current->sighand->siglock);
490 } 491 }
@@ -549,6 +550,15 @@ int do_signal(sigset_t *oldset, struct pt_regs *regs)
549 /* Whee! Actually deliver the signal. */ 550 /* Whee! Actually deliver the signal. */
550 if (TRAP(regs) == 0x0C00) 551 if (TRAP(regs) == 0x0C00)
551 syscall_restart(regs, &ka); 552 syscall_restart(regs, &ka);
553
554 /*
555 * Reenable the DABR before delivering the signal to
556 * user space. The DABR will have been cleared if it
557 * triggered inside the kernel.
558 */
559 if (current->thread.dabr)
560 set_dabr(current->thread.dabr);
561
552 return handle_signal(signr, &ka, &info, oldset, regs); 562 return handle_signal(signr, &ka, &info, oldset, regs);
553 } 563 }
554 564
diff --git a/arch/ppc64/kernel/signal32.c b/arch/ppc64/kernel/signal32.c
index 3c2fa5c284c0..a8b7a5a56bb4 100644
--- a/arch/ppc64/kernel/signal32.c
+++ b/arch/ppc64/kernel/signal32.c
@@ -970,17 +970,26 @@ int do_signal32(sigset_t *oldset, struct pt_regs *regs)
970 newsp = regs->gpr[1]; 970 newsp = regs->gpr[1];
971 newsp &= ~0xfUL; 971 newsp &= ~0xfUL;
972 972
973 /*
974 * Reenable the DABR before delivering the signal to
975 * user space. The DABR will have been cleared if it
976 * triggered inside the kernel.
977 */
978 if (current->thread.dabr)
979 set_dabr(current->thread.dabr);
980
973 /* Whee! Actually deliver the signal. */ 981 /* Whee! Actually deliver the signal. */
974 if (ka.sa.sa_flags & SA_SIGINFO) 982 if (ka.sa.sa_flags & SA_SIGINFO)
975 ret = handle_rt_signal32(signr, &ka, &info, oldset, regs, newsp); 983 ret = handle_rt_signal32(signr, &ka, &info, oldset, regs, newsp);
976 else 984 else
977 ret = handle_signal32(signr, &ka, &info, oldset, regs, newsp); 985 ret = handle_signal32(signr, &ka, &info, oldset, regs, newsp);
978 986
979 if (ret && !(ka.sa.sa_flags & SA_NODEFER)) { 987 if (ret) {
980 spin_lock_irq(&current->sighand->siglock); 988 spin_lock_irq(&current->sighand->siglock);
981 sigorsets(&current->blocked, &current->blocked, 989 sigorsets(&current->blocked, &current->blocked,
982 &ka.sa.sa_mask); 990 &ka.sa.sa_mask);
983 sigaddset(&current->blocked, signr); 991 if (!(ka.sa.sa_flags & SA_NODEFER))
992 sigaddset(&current->blocked, signr);
984 recalc_sigpending(); 993 recalc_sigpending();
985 spin_unlock_irq(&current->sighand->siglock); 994 spin_unlock_irq(&current->sighand->siglock);
986 } 995 }
diff --git a/arch/ppc64/kernel/sys_ppc32.c b/arch/ppc64/kernel/sys_ppc32.c
index 206619080e66..e93c13458910 100644
--- a/arch/ppc64/kernel/sys_ppc32.c
+++ b/arch/ppc64/kernel/sys_ppc32.c
@@ -708,62 +708,9 @@ asmlinkage int sys32_pciconfig_write(u32 bus, u32 dfn, u32 off, u32 len, u32 ubu
708 compat_ptr(ubuf)); 708 compat_ptr(ubuf));
709} 709}
710 710
711#define IOBASE_BRIDGE_NUMBER 0
712#define IOBASE_MEMORY 1
713#define IOBASE_IO 2
714#define IOBASE_ISA_IO 3
715#define IOBASE_ISA_MEM 4
716
717asmlinkage int sys32_pciconfig_iobase(u32 which, u32 in_bus, u32 in_devfn) 711asmlinkage int sys32_pciconfig_iobase(u32 which, u32 in_bus, u32 in_devfn)
718{ 712{
719#ifdef CONFIG_PCI 713 return sys_pciconfig_iobase(which, in_bus, in_devfn);
720 struct pci_controller* hose;
721 struct list_head *ln;
722 struct pci_bus *bus = NULL;
723 struct device_node *hose_node;
724
725 /* Argh ! Please forgive me for that hack, but that's the
726 * simplest way to get existing XFree to not lockup on some
727 * G5 machines... So when something asks for bus 0 io base
728 * (bus 0 is HT root), we return the AGP one instead.
729 */
730#ifdef CONFIG_PPC_PMAC
731 if (systemcfg->platform == PLATFORM_POWERMAC &&
732 machine_is_compatible("MacRISC4"))
733 if (in_bus == 0)
734 in_bus = 0xf0;
735#endif /* CONFIG_PPC_PMAC */
736
737 /* That syscall isn't quite compatible with PCI domains, but it's
738 * used on pre-domains setup. We return the first match
739 */
740
741 for (ln = pci_root_buses.next; ln != &pci_root_buses; ln = ln->next) {
742 bus = pci_bus_b(ln);
743 if (in_bus >= bus->number && in_bus < (bus->number + bus->subordinate))
744 break;
745 bus = NULL;
746 }
747 if (bus == NULL || bus->sysdata == NULL)
748 return -ENODEV;
749
750 hose_node = (struct device_node *)bus->sysdata;
751 hose = hose_node->phb;
752
753 switch (which) {
754 case IOBASE_BRIDGE_NUMBER:
755 return (long)hose->first_busno;
756 case IOBASE_MEMORY:
757 return (long)hose->pci_mem_offset;
758 case IOBASE_IO:
759 return (long)hose->io_base_phys;
760 case IOBASE_ISA_IO:
761 return (long)isa_io_base;
762 case IOBASE_ISA_MEM:
763 return -EINVAL;
764 }
765#endif /* CONFIG_PCI */
766 return -EOPNOTSUPP;
767} 714}
768 715
769 716
@@ -867,37 +814,6 @@ off_t ppc32_lseek(unsigned int fd, u32 offset, unsigned int origin)
867 return sys_lseek(fd, (int)offset, origin); 814 return sys_lseek(fd, (int)offset, origin);
868} 815}
869 816
870/*
871 * This is just a version for 32-bit applications which does
872 * not force O_LARGEFILE on.
873 */
874asmlinkage long sys32_open(const char __user * filename, int flags, int mode)
875{
876 char * tmp;
877 int fd, error;
878
879 tmp = getname(filename);
880 fd = PTR_ERR(tmp);
881 if (!IS_ERR(tmp)) {
882 fd = get_unused_fd();
883 if (fd >= 0) {
884 struct file * f = filp_open(tmp, flags, mode);
885 error = PTR_ERR(f);
886 if (IS_ERR(f))
887 goto out_error;
888 fd_install(fd, f);
889 }
890out:
891 putname(tmp);
892 }
893 return fd;
894
895out_error:
896 put_unused_fd(fd);
897 fd = error;
898 goto out;
899}
900
901/* Note: it is necessary to treat bufsiz as an unsigned int, 817/* Note: it is necessary to treat bufsiz as an unsigned int,
902 * with the corresponding cast to a signed int to insure that the 818 * with the corresponding cast to a signed int to insure that the
903 * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode) 819 * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
diff --git a/arch/ppc64/kernel/syscalls.c b/arch/ppc64/kernel/syscalls.c
index a8cbb202b8cd..05f16633bd2c 100644
--- a/arch/ppc64/kernel/syscalls.c
+++ b/arch/ppc64/kernel/syscalls.c
@@ -46,10 +46,6 @@
46 46
47extern unsigned long wall_jiffies; 47extern unsigned long wall_jiffies;
48 48
49void
50check_bugs(void)
51{
52}
53 49
54/* 50/*
55 * sys_ipc() is the de-multiplexer for the SysV IPC calls.. 51 * sys_ipc() is the de-multiplexer for the SysV IPC calls..
diff --git a/arch/ppc64/kernel/sysfs.c b/arch/ppc64/kernel/sysfs.c
index 02b8ac4e0168..6654b350979c 100644
--- a/arch/ppc64/kernel/sysfs.c
+++ b/arch/ppc64/kernel/sysfs.c
@@ -13,6 +13,7 @@
13#include <asm/current.h> 13#include <asm/current.h>
14#include <asm/processor.h> 14#include <asm/processor.h>
15#include <asm/cputable.h> 15#include <asm/cputable.h>
16#include <asm/firmware.h>
16#include <asm/hvcall.h> 17#include <asm/hvcall.h>
17#include <asm/prom.h> 18#include <asm/prom.h>
18#include <asm/systemcfg.h> 19#include <asm/systemcfg.h>
@@ -100,6 +101,8 @@ static int __init setup_smt_snooze_delay(char *str)
100} 101}
101__setup("smt-snooze-delay=", setup_smt_snooze_delay); 102__setup("smt-snooze-delay=", setup_smt_snooze_delay);
102 103
104#endif /* CONFIG_PPC_MULTIPLATFORM */
105
103/* 106/*
104 * Enabling PMCs will slow partition context switch times so we only do 107 * Enabling PMCs will slow partition context switch times so we only do
105 * it the first time we write to the PMCs. 108 * it the first time we write to the PMCs.
@@ -109,65 +112,15 @@ static DEFINE_PER_CPU(char, pmcs_enabled);
109 112
110void ppc64_enable_pmcs(void) 113void ppc64_enable_pmcs(void)
111{ 114{
112 unsigned long hid0;
113#ifdef CONFIG_PPC_PSERIES
114 unsigned long set, reset;
115#endif /* CONFIG_PPC_PSERIES */
116
117 /* Only need to enable them once */ 115 /* Only need to enable them once */
118 if (__get_cpu_var(pmcs_enabled)) 116 if (__get_cpu_var(pmcs_enabled))
119 return; 117 return;
120 118
121 __get_cpu_var(pmcs_enabled) = 1; 119 __get_cpu_var(pmcs_enabled) = 1;
122 120
123 switch (systemcfg->platform) { 121 if (ppc_md.enable_pmcs)
124 case PLATFORM_PSERIES: 122 ppc_md.enable_pmcs();
125 case PLATFORM_POWERMAC:
126 hid0 = mfspr(HID0);
127 hid0 |= 1UL << (63 - 20);
128
129 /* POWER4 requires the following sequence */
130 asm volatile(
131 "sync\n"
132 "mtspr %1, %0\n"
133 "mfspr %0, %1\n"
134 "mfspr %0, %1\n"
135 "mfspr %0, %1\n"
136 "mfspr %0, %1\n"
137 "mfspr %0, %1\n"
138 "mfspr %0, %1\n"
139 "isync" : "=&r" (hid0) : "i" (HID0), "0" (hid0):
140 "memory");
141 break;
142
143#ifdef CONFIG_PPC_PSERIES
144 case PLATFORM_PSERIES_LPAR:
145 set = 1UL << 63;
146 reset = 0;
147 plpar_hcall_norets(H_PERFMON, set, reset);
148 break;
149#endif /* CONFIG_PPC_PSERIES */
150
151 default:
152 break;
153 }
154
155#ifdef CONFIG_PPC_PSERIES
156 /* instruct hypervisor to maintain PMCs */
157 if (cur_cpu_spec->firmware_features & FW_FEATURE_SPLPAR)
158 get_paca()->lppaca.pmcregs_in_use = 1;
159#endif /* CONFIG_PPC_PSERIES */
160} 123}
161
162#else
163
164/* PMC stuff */
165void ppc64_enable_pmcs(void)
166{
167 /* XXX Implement for iseries */
168}
169#endif /* CONFIG_PPC_MULTIPLATFORM */
170
171EXPORT_SYMBOL(ppc64_enable_pmcs); 124EXPORT_SYMBOL(ppc64_enable_pmcs);
172 125
173/* XXX convert to rusty's on_one_cpu */ 126/* XXX convert to rusty's on_one_cpu */
@@ -262,18 +215,23 @@ static void register_cpu_online(unsigned int cpu)
262 if (cpu_has_feature(CPU_FTR_MMCRA)) 215 if (cpu_has_feature(CPU_FTR_MMCRA))
263 sysdev_create_file(s, &attr_mmcra); 216 sysdev_create_file(s, &attr_mmcra);
264 217
265 sysdev_create_file(s, &attr_pmc1); 218 if (cur_cpu_spec->num_pmcs >= 1)
266 sysdev_create_file(s, &attr_pmc2); 219 sysdev_create_file(s, &attr_pmc1);
267 sysdev_create_file(s, &attr_pmc3); 220 if (cur_cpu_spec->num_pmcs >= 2)
268 sysdev_create_file(s, &attr_pmc4); 221 sysdev_create_file(s, &attr_pmc2);
269 sysdev_create_file(s, &attr_pmc5); 222 if (cur_cpu_spec->num_pmcs >= 3)
270 sysdev_create_file(s, &attr_pmc6); 223 sysdev_create_file(s, &attr_pmc3);
271 224 if (cur_cpu_spec->num_pmcs >= 4)
272 if (cpu_has_feature(CPU_FTR_PMC8)) { 225 sysdev_create_file(s, &attr_pmc4);
226 if (cur_cpu_spec->num_pmcs >= 5)
227 sysdev_create_file(s, &attr_pmc5);
228 if (cur_cpu_spec->num_pmcs >= 6)
229 sysdev_create_file(s, &attr_pmc6);
230 if (cur_cpu_spec->num_pmcs >= 7)
273 sysdev_create_file(s, &attr_pmc7); 231 sysdev_create_file(s, &attr_pmc7);
232 if (cur_cpu_spec->num_pmcs >= 8)
274 sysdev_create_file(s, &attr_pmc8); 233 sysdev_create_file(s, &attr_pmc8);
275 } 234
276
277 if (cpu_has_feature(CPU_FTR_SMT)) 235 if (cpu_has_feature(CPU_FTR_SMT))
278 sysdev_create_file(s, &attr_purr); 236 sysdev_create_file(s, &attr_purr);
279} 237}
@@ -299,17 +257,22 @@ static void unregister_cpu_online(unsigned int cpu)
299 if (cpu_has_feature(CPU_FTR_MMCRA)) 257 if (cpu_has_feature(CPU_FTR_MMCRA))
300 sysdev_remove_file(s, &attr_mmcra); 258 sysdev_remove_file(s, &attr_mmcra);
301 259
302 sysdev_remove_file(s, &attr_pmc1); 260 if (cur_cpu_spec->num_pmcs >= 1)
303 sysdev_remove_file(s, &attr_pmc2); 261 sysdev_remove_file(s, &attr_pmc1);
304 sysdev_remove_file(s, &attr_pmc3); 262 if (cur_cpu_spec->num_pmcs >= 2)
305 sysdev_remove_file(s, &attr_pmc4); 263 sysdev_remove_file(s, &attr_pmc2);
306 sysdev_remove_file(s, &attr_pmc5); 264 if (cur_cpu_spec->num_pmcs >= 3)
307 sysdev_remove_file(s, &attr_pmc6); 265 sysdev_remove_file(s, &attr_pmc3);
308 266 if (cur_cpu_spec->num_pmcs >= 4)
309 if (cpu_has_feature(CPU_FTR_PMC8)) { 267 sysdev_remove_file(s, &attr_pmc4);
268 if (cur_cpu_spec->num_pmcs >= 5)
269 sysdev_remove_file(s, &attr_pmc5);
270 if (cur_cpu_spec->num_pmcs >= 6)
271 sysdev_remove_file(s, &attr_pmc6);
272 if (cur_cpu_spec->num_pmcs >= 7)
310 sysdev_remove_file(s, &attr_pmc7); 273 sysdev_remove_file(s, &attr_pmc7);
274 if (cur_cpu_spec->num_pmcs >= 8)
311 sysdev_remove_file(s, &attr_pmc8); 275 sysdev_remove_file(s, &attr_pmc8);
312 }
313 276
314 if (cpu_has_feature(CPU_FTR_SMT)) 277 if (cpu_has_feature(CPU_FTR_SMT))
315 sysdev_remove_file(s, &attr_purr); 278 sysdev_remove_file(s, &attr_purr);
diff --git a/arch/ppc64/kernel/time.c b/arch/ppc64/kernel/time.c
index 909462e1adea..9939c206afa4 100644
--- a/arch/ppc64/kernel/time.c
+++ b/arch/ppc64/kernel/time.c
@@ -51,7 +51,6 @@
51#include <linux/cpu.h> 51#include <linux/cpu.h>
52#include <linux/security.h> 52#include <linux/security.h>
53 53
54#include <asm/segment.h>
55#include <asm/io.h> 54#include <asm/io.h>
56#include <asm/processor.h> 55#include <asm/processor.h>
57#include <asm/nvram.h> 56#include <asm/nvram.h>
@@ -67,6 +66,7 @@
67#include <asm/prom.h> 66#include <asm/prom.h>
68#include <asm/sections.h> 67#include <asm/sections.h>
69#include <asm/systemcfg.h> 68#include <asm/systemcfg.h>
69#include <asm/firmware.h>
70 70
71u64 jiffies_64 __cacheline_aligned_in_smp = INITIAL_JIFFIES; 71u64 jiffies_64 __cacheline_aligned_in_smp = INITIAL_JIFFIES;
72 72
@@ -128,7 +128,7 @@ static __inline__ void timer_check_rtc(void)
128 * We should have an rtc call that only sets the minutes and 128 * We should have an rtc call that only sets the minutes and
129 * seconds like on Intel to avoid problems with non UTC clocks. 129 * seconds like on Intel to avoid problems with non UTC clocks.
130 */ 130 */
131 if ( (time_status & STA_UNSYNC) == 0 && 131 if (ntp_synced() &&
132 xtime.tv_sec - last_rtc_update >= 659 && 132 xtime.tv_sec - last_rtc_update >= 659 &&
133 abs((xtime.tv_nsec/1000) - (1000000-1000000/HZ)) < 500000/HZ && 133 abs((xtime.tv_nsec/1000) - (1000000-1000000/HZ)) < 500000/HZ &&
134 jiffies - wall_jiffies == 1) { 134 jiffies - wall_jiffies == 1) {
@@ -370,13 +370,11 @@ int timer_interrupt(struct pt_regs * regs)
370 process_hvlpevents(regs); 370 process_hvlpevents(regs);
371#endif 371#endif
372 372
373/* collect purr register values often, for accurate calculations */ 373 /* collect purr register values often, for accurate calculations */
374#if defined(CONFIG_PPC_PSERIES) 374 if (firmware_has_feature(FW_FEATURE_SPLPAR)) {
375 if (cur_cpu_spec->firmware_features & FW_FEATURE_SPLPAR) {
376 struct cpu_usage *cu = &__get_cpu_var(cpu_usage_array); 375 struct cpu_usage *cu = &__get_cpu_var(cpu_usage_array);
377 cu->current_tb = mfspr(SPRN_PURR); 376 cu->current_tb = mfspr(SPRN_PURR);
378 } 377 }
379#endif
380 378
381 irq_exit(); 379 irq_exit();
382 380
@@ -437,10 +435,7 @@ int do_settimeofday(struct timespec *tv)
437 */ 435 */
438 last_rtc_update = new_sec - 658; 436 last_rtc_update = new_sec - 658;
439 437
440 time_adjust = 0; /* stop active adjtime() */ 438 ntp_clear();
441 time_status |= STA_UNSYNC;
442 time_maxerror = NTP_PHASE_LIMIT;
443 time_esterror = NTP_PHASE_LIMIT;
444 439
445 delta_xsec = mulhdu( (tb_last_stamp-do_gtod.varp->tb_orig_stamp), 440 delta_xsec = mulhdu( (tb_last_stamp-do_gtod.varp->tb_orig_stamp),
446 do_gtod.varp->tb_to_xs ); 441 do_gtod.varp->tb_to_xs );
diff --git a/arch/ppc64/kernel/traps.c b/arch/ppc64/kernel/traps.c
index a8d5e83ee89f..7467ae508e6e 100644
--- a/arch/ppc64/kernel/traps.c
+++ b/arch/ppc64/kernel/traps.c
@@ -30,6 +30,7 @@
30#include <linux/init.h> 30#include <linux/init.h>
31#include <linux/module.h> 31#include <linux/module.h>
32#include <linux/delay.h> 32#include <linux/delay.h>
33#include <linux/kprobes.h>
33#include <asm/kdebug.h> 34#include <asm/kdebug.h>
34 35
35#include <asm/pgtable.h> 36#include <asm/pgtable.h>
@@ -220,7 +221,7 @@ void instruction_breakpoint_exception(struct pt_regs *regs)
220 _exception(SIGTRAP, regs, TRAP_BRKPT, regs->nip); 221 _exception(SIGTRAP, regs, TRAP_BRKPT, regs->nip);
221} 222}
222 223
223void single_step_exception(struct pt_regs *regs) 224void __kprobes single_step_exception(struct pt_regs *regs)
224{ 225{
225 regs->msr &= ~MSR_SE; /* Turn off 'trace' bit */ 226 regs->msr &= ~MSR_SE; /* Turn off 'trace' bit */
226 227
@@ -398,7 +399,7 @@ check_bug_trap(struct pt_regs *regs)
398 return 0; 399 return 0;
399} 400}
400 401
401void program_check_exception(struct pt_regs *regs) 402void __kprobes program_check_exception(struct pt_regs *regs)
402{ 403{
403 if (debugger_fault_handler(regs)) 404 if (debugger_fault_handler(regs))
404 return; 405 return;
diff --git a/arch/ppc64/kernel/u3_iommu.c b/arch/ppc64/kernel/u3_iommu.c
index b6e3bca4102d..41ea09cb9ac7 100644
--- a/arch/ppc64/kernel/u3_iommu.c
+++ b/arch/ppc64/kernel/u3_iommu.c
@@ -276,7 +276,7 @@ static void iommu_dev_setup_u3(struct pci_dev *dev)
276 dn = pci_device_to_OF_node(dev); 276 dn = pci_device_to_OF_node(dev);
277 277
278 if (dn) 278 if (dn)
279 dn->iommu_table = &iommu_table_u3; 279 PCI_DN(dn)->iommu_table = &iommu_table_u3;
280} 280}
281 281
282static void iommu_bus_setup_u3(struct pci_bus *bus) 282static void iommu_bus_setup_u3(struct pci_bus *bus)
@@ -291,7 +291,7 @@ static void iommu_bus_setup_u3(struct pci_bus *bus)
291 dn = pci_bus_to_OF_node(bus); 291 dn = pci_bus_to_OF_node(bus);
292 292
293 if (dn) 293 if (dn)
294 dn->iommu_table = &iommu_table_u3; 294 PCI_DN(dn)->iommu_table = &iommu_table_u3;
295} 295}
296 296
297static void iommu_dev_setup_null(struct pci_dev *dev) { } 297static void iommu_dev_setup_null(struct pci_dev *dev) { }
diff --git a/arch/ppc64/kernel/udbg.c b/arch/ppc64/kernel/udbg.c
index c0da45540f0f..d49c3613c8ec 100644
--- a/arch/ppc64/kernel/udbg.c
+++ b/arch/ppc64/kernel/udbg.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * NS16550 Serial Port (uart) debugging stuff. 2 * polling mode stateless debugging stuff, originally for NS16550 Serial Ports
3 * 3 *
4 * c 2001 PPC 64 Team, IBM Corp 4 * c 2001 PPC 64 Team, IBM Corp
5 * 5 *
@@ -13,249 +13,24 @@
13#define WANT_PPCDBG_TAB /* Only defined here */ 13#define WANT_PPCDBG_TAB /* Only defined here */
14#include <linux/config.h> 14#include <linux/config.h>
15#include <linux/types.h> 15#include <linux/types.h>
16#include <linux/sched.h>
17#include <linux/console.h>
16#include <asm/ppcdebug.h> 18#include <asm/ppcdebug.h>
17#include <asm/processor.h> 19#include <asm/processor.h>
18#include <asm/uaccess.h>
19#include <asm/machdep.h>
20#include <asm/io.h>
21#include <asm/prom.h>
22#include <asm/pmac_feature.h>
23 20
24extern u8 real_readb(volatile u8 __iomem *addr); 21void (*udbg_putc)(unsigned char c);
25extern void real_writeb(u8 data, volatile u8 __iomem *addr); 22unsigned char (*udbg_getc)(void);
26 23int (*udbg_getc_poll)(void);
27struct NS16550 {
28 /* this struct must be packed */
29 unsigned char rbr; /* 0 */
30 unsigned char ier; /* 1 */
31 unsigned char fcr; /* 2 */
32 unsigned char lcr; /* 3 */
33 unsigned char mcr; /* 4 */
34 unsigned char lsr; /* 5 */
35 unsigned char msr; /* 6 */
36 unsigned char scr; /* 7 */
37};
38
39#define thr rbr
40#define iir fcr
41#define dll rbr
42#define dlm ier
43#define dlab lcr
44
45#define LSR_DR 0x01 /* Data ready */
46#define LSR_OE 0x02 /* Overrun */
47#define LSR_PE 0x04 /* Parity error */
48#define LSR_FE 0x08 /* Framing error */
49#define LSR_BI 0x10 /* Break */
50#define LSR_THRE 0x20 /* Xmit holding register empty */
51#define LSR_TEMT 0x40 /* Xmitter empty */
52#define LSR_ERR 0x80 /* Error */
53
54static volatile struct NS16550 __iomem *udbg_comport;
55
56void udbg_init_uart(void __iomem *comport, unsigned int speed)
57{
58 u16 dll = speed ? (115200 / speed) : 12;
59
60 if (comport) {
61 udbg_comport = (struct NS16550 __iomem *)comport;
62 out_8(&udbg_comport->lcr, 0x00);
63 out_8(&udbg_comport->ier, 0xff);
64 out_8(&udbg_comport->ier, 0x00);
65 out_8(&udbg_comport->lcr, 0x80); /* Access baud rate */
66 out_8(&udbg_comport->dll, dll & 0xff); /* 1 = 115200, 2 = 57600,
67 3 = 38400, 12 = 9600 baud */
68 out_8(&udbg_comport->dlm, dll >> 8); /* dll >> 8 which should be zero
69 for fast rates; */
70 out_8(&udbg_comport->lcr, 0x03); /* 8 data, 1 stop, no parity */
71 out_8(&udbg_comport->mcr, 0x03); /* RTS/DTR */
72 out_8(&udbg_comport->fcr ,0x07); /* Clear & enable FIFOs */
73 }
74}
75
76#ifdef CONFIG_PPC_PMAC
77
78#define SCC_TXRDY 4
79#define SCC_RXRDY 1
80
81static volatile u8 __iomem *sccc;
82static volatile u8 __iomem *sccd;
83
84static unsigned char scc_inittab[] = {
85 13, 0, /* set baud rate divisor */
86 12, 0,
87 14, 1, /* baud rate gen enable, src=rtxc */
88 11, 0x50, /* clocks = br gen */
89 5, 0xea, /* tx 8 bits, assert DTR & RTS */
90 4, 0x46, /* x16 clock, 1 stop */
91 3, 0xc1, /* rx enable, 8 bits */
92};
93
94void udbg_init_scc(struct device_node *np)
95{
96 u32 *reg;
97 unsigned long addr;
98 int i, x;
99
100 if (np == NULL)
101 np = of_find_node_by_name(NULL, "escc");
102 if (np == NULL || np->parent == NULL)
103 return;
104
105 udbg_printf("found SCC...\n");
106 /* Get address within mac-io ASIC */
107 reg = (u32 *)get_property(np, "reg", NULL);
108 if (reg == NULL)
109 return;
110 addr = reg[0];
111 udbg_printf("local addr: %lx\n", addr);
112 /* Get address of mac-io PCI itself */
113 reg = (u32 *)get_property(np->parent, "assigned-addresses", NULL);
114 if (reg == NULL)
115 return;
116 addr += reg[2];
117 udbg_printf("final addr: %lx\n", addr);
118
119 /* Setup for 57600 8N1 */
120 addr += 0x20;
121 sccc = (volatile u8 * __iomem) ioremap(addr & PAGE_MASK, PAGE_SIZE) ;
122 sccc += addr & ~PAGE_MASK;
123 sccd = sccc + 0x10;
124
125 udbg_printf("ioremap result sccc: %p\n", sccc);
126 mb();
127
128 for (i = 20000; i != 0; --i)
129 x = in_8(sccc);
130 out_8(sccc, 0x09); /* reset A or B side */
131 out_8(sccc, 0xc0);
132 for (i = 0; i < sizeof(scc_inittab); ++i)
133 out_8(sccc, scc_inittab[i]);
134
135 ppc_md.udbg_putc = udbg_putc;
136 ppc_md.udbg_getc = udbg_getc;
137 ppc_md.udbg_getc_poll = udbg_getc_poll;
138
139 udbg_puts("Hello World !\n");
140}
141
142#endif /* CONFIG_PPC_PMAC */
143
144#ifdef CONFIG_PPC_PMAC
145static void udbg_real_putc(unsigned char c)
146{
147 while ((real_readb(sccc) & SCC_TXRDY) == 0)
148 ;
149 real_writeb(c, sccd);
150 if (c == '\n')
151 udbg_real_putc('\r');
152}
153
154void udbg_init_pmac_realmode(void)
155{
156 sccc = (volatile u8 __iomem *)0x80013020ul;
157 sccd = (volatile u8 __iomem *)0x80013030ul;
158
159 ppc_md.udbg_putc = udbg_real_putc;
160 ppc_md.udbg_getc = NULL;
161 ppc_md.udbg_getc_poll = NULL;
162}
163#endif /* CONFIG_PPC_PMAC */
164
165#ifdef CONFIG_PPC_MAPLE
166void udbg_maple_real_putc(unsigned char c)
167{
168 if (udbg_comport) {
169 while ((real_readb(&udbg_comport->lsr) & LSR_THRE) == 0)
170 /* wait for idle */;
171 real_writeb(c, &udbg_comport->thr); eieio();
172 if (c == '\n') {
173 /* Also put a CR. This is for convenience. */
174 while ((real_readb(&udbg_comport->lsr) & LSR_THRE) == 0)
175 /* wait for idle */;
176 real_writeb('\r', &udbg_comport->thr); eieio();
177 }
178 }
179}
180
181void udbg_init_maple_realmode(void)
182{
183 udbg_comport = (volatile struct NS16550 __iomem *)0xf40003f8;
184
185 ppc_md.udbg_putc = udbg_maple_real_putc;
186 ppc_md.udbg_getc = NULL;
187 ppc_md.udbg_getc_poll = NULL;
188}
189#endif /* CONFIG_PPC_MAPLE */
190
191void udbg_putc(unsigned char c)
192{
193 if (udbg_comport) {
194 while ((in_8(&udbg_comport->lsr) & LSR_THRE) == 0)
195 /* wait for idle */;
196 out_8(&udbg_comport->thr, c);
197 if (c == '\n') {
198 /* Also put a CR. This is for convenience. */
199 while ((in_8(&udbg_comport->lsr) & LSR_THRE) == 0)
200 /* wait for idle */;
201 out_8(&udbg_comport->thr, '\r');
202 }
203 }
204#ifdef CONFIG_PPC_PMAC
205 else if (sccc) {
206 while ((in_8(sccc) & SCC_TXRDY) == 0)
207 ;
208 out_8(sccd, c);
209 if (c == '\n')
210 udbg_putc('\r');
211 }
212#endif /* CONFIG_PPC_PMAC */
213}
214
215int udbg_getc_poll(void)
216{
217 if (udbg_comport) {
218 if ((in_8(&udbg_comport->lsr) & LSR_DR) != 0)
219 return in_8(&udbg_comport->rbr);
220 else
221 return -1;
222 }
223#ifdef CONFIG_PPC_PMAC
224 else if (sccc) {
225 if ((in_8(sccc) & SCC_RXRDY) != 0)
226 return in_8(sccd);
227 else
228 return -1;
229 }
230#endif /* CONFIG_PPC_PMAC */
231 return -1;
232}
233
234unsigned char udbg_getc(void)
235{
236 if (udbg_comport) {
237 while ((in_8(&udbg_comport->lsr) & LSR_DR) == 0)
238 /* wait for char */;
239 return in_8(&udbg_comport->rbr);
240 }
241#ifdef CONFIG_PPC_PMAC
242 else if (sccc) {
243 while ((in_8(sccc) & SCC_RXRDY) == 0)
244 ;
245 return in_8(sccd);
246 }
247#endif /* CONFIG_PPC_PMAC */
248 return 0;
249}
250 24
25/* udbg library, used by xmon et al */
251void udbg_puts(const char *s) 26void udbg_puts(const char *s)
252{ 27{
253 if (ppc_md.udbg_putc) { 28 if (udbg_putc) {
254 char c; 29 char c;
255 30
256 if (s && *s != '\0') { 31 if (s && *s != '\0') {
257 while ((c = *s++) != '\0') 32 while ((c = *s++) != '\0')
258 ppc_md.udbg_putc(c); 33 udbg_putc(c);
259 } 34 }
260 } 35 }
261#if 0 36#if 0
@@ -270,12 +45,12 @@ int udbg_write(const char *s, int n)
270 int remain = n; 45 int remain = n;
271 char c; 46 char c;
272 47
273 if (!ppc_md.udbg_putc) 48 if (!udbg_putc)
274 return 0; 49 return 0;
275 50
276 if (s && *s != '\0') { 51 if (s && *s != '\0') {
277 while (((c = *s++) != '\0') && (remain-- > 0)) { 52 while (((c = *s++) != '\0') && (remain-- > 0)) {
278 ppc_md.udbg_putc(c); 53 udbg_putc(c);
279 } 54 }
280 } 55 }
281 56
@@ -287,12 +62,12 @@ int udbg_read(char *buf, int buflen)
287 char c, *p = buf; 62 char c, *p = buf;
288 int i; 63 int i;
289 64
290 if (!ppc_md.udbg_getc) 65 if (!udbg_getc)
291 return 0; 66 return 0;
292 67
293 for (i = 0; i < buflen; ++i) { 68 for (i = 0; i < buflen; ++i) {
294 do { 69 do {
295 c = ppc_md.udbg_getc(); 70 c = udbg_getc();
296 } while (c == 0x11 || c == 0x13); 71 } while (c == 0x11 || c == 0x13);
297 if (c == 0) 72 if (c == 0)
298 break; 73 break;
@@ -302,11 +77,6 @@ int udbg_read(char *buf, int buflen)
302 return i; 77 return i;
303} 78}
304 79
305void udbg_console_write(struct console *con, const char *s, unsigned int n)
306{
307 udbg_write(s, n);
308}
309
310#define UDBG_BUFSIZE 256 80#define UDBG_BUFSIZE 256
311void udbg_printf(const char *fmt, ...) 81void udbg_printf(const char *fmt, ...)
312{ 82{
@@ -319,6 +89,10 @@ void udbg_printf(const char *fmt, ...)
319 va_end(args); 89 va_end(args);
320} 90}
321 91
92/* PPCDBG stuff */
93
94u64 ppc64_debug_switch;
95
322/* Special print used by PPCDBG() macro */ 96/* Special print used by PPCDBG() macro */
323void udbg_ppcdbg(unsigned long debug_flags, const char *fmt, ...) 97void udbg_ppcdbg(unsigned long debug_flags, const char *fmt, ...)
324{ 98{
@@ -358,3 +132,49 @@ unsigned long udbg_ifdebug(unsigned long flags)
358{ 132{
359 return (flags & ppc64_debug_switch); 133 return (flags & ppc64_debug_switch);
360} 134}
135
136/*
137 * Initialize the PPCDBG state. Called before relocation has been enabled.
138 */
139void __init ppcdbg_initialize(void)
140{
141 ppc64_debug_switch = PPC_DEBUG_DEFAULT; /* | PPCDBG_BUSWALK | */
142 /* PPCDBG_PHBINIT | PPCDBG_MM | PPCDBG_MMINIT | PPCDBG_TCEINIT | PPCDBG_TCE */;
143}
144
145/*
146 * Early boot console based on udbg
147 */
148static void udbg_console_write(struct console *con, const char *s,
149 unsigned int n)
150{
151 udbg_write(s, n);
152}
153
154static struct console udbg_console = {
155 .name = "udbg",
156 .write = udbg_console_write,
157 .flags = CON_PRINTBUFFER,
158 .index = -1,
159};
160
161static int early_console_initialized;
162
163void __init disable_early_printk(void)
164{
165 if (!early_console_initialized)
166 return;
167 unregister_console(&udbg_console);
168 early_console_initialized = 0;
169}
170
171/* called by setup_system */
172void register_early_udbg_console(void)
173{
174 early_console_initialized = 1;
175 register_console(&udbg_console);
176}
177
178#if 0 /* if you want to use this as a regular output console */
179console_initcall(register_udbg_console);
180#endif
diff --git a/arch/ppc64/kernel/udbg_16550.c b/arch/ppc64/kernel/udbg_16550.c
new file mode 100644
index 000000000000..9313574ab935
--- /dev/null
+++ b/arch/ppc64/kernel/udbg_16550.c
@@ -0,0 +1,123 @@
1/*
2 * udbg for for NS16550 compatable serial ports
3 *
4 * Copyright (C) 2001-2005 PPC 64 Team, IBM Corp
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11#include <linux/config.h>
12#include <linux/types.h>
13#include <asm/udbg.h>
14#include <asm/io.h>
15
16extern u8 real_readb(volatile u8 __iomem *addr);
17extern void real_writeb(u8 data, volatile u8 __iomem *addr);
18
19struct NS16550 {
20 /* this struct must be packed */
21 unsigned char rbr; /* 0 */
22 unsigned char ier; /* 1 */
23 unsigned char fcr; /* 2 */
24 unsigned char lcr; /* 3 */
25 unsigned char mcr; /* 4 */
26 unsigned char lsr; /* 5 */
27 unsigned char msr; /* 6 */
28 unsigned char scr; /* 7 */
29};
30
31#define thr rbr
32#define iir fcr
33#define dll rbr
34#define dlm ier
35#define dlab lcr
36
37#define LSR_DR 0x01 /* Data ready */
38#define LSR_OE 0x02 /* Overrun */
39#define LSR_PE 0x04 /* Parity error */
40#define LSR_FE 0x08 /* Framing error */
41#define LSR_BI 0x10 /* Break */
42#define LSR_THRE 0x20 /* Xmit holding register empty */
43#define LSR_TEMT 0x40 /* Xmitter empty */
44#define LSR_ERR 0x80 /* Error */
45
46static volatile struct NS16550 __iomem *udbg_comport;
47
48static void udbg_550_putc(unsigned char c)
49{
50 if (udbg_comport) {
51 while ((in_8(&udbg_comport->lsr) & LSR_THRE) == 0)
52 /* wait for idle */;
53 out_8(&udbg_comport->thr, c);
54 if (c == '\n')
55 udbg_550_putc('\r');
56 }
57}
58
59static int udbg_550_getc_poll(void)
60{
61 if (udbg_comport) {
62 if ((in_8(&udbg_comport->lsr) & LSR_DR) != 0)
63 return in_8(&udbg_comport->rbr);
64 else
65 return -1;
66 }
67 return -1;
68}
69
70static unsigned char udbg_550_getc(void)
71{
72 if (udbg_comport) {
73 while ((in_8(&udbg_comport->lsr) & LSR_DR) == 0)
74 /* wait for char */;
75 return in_8(&udbg_comport->rbr);
76 }
77 return 0;
78}
79
80void udbg_init_uart(void __iomem *comport, unsigned int speed)
81{
82 u16 dll = speed ? (115200 / speed) : 12;
83
84 if (comport) {
85 udbg_comport = (struct NS16550 __iomem *)comport;
86 out_8(&udbg_comport->lcr, 0x00);
87 out_8(&udbg_comport->ier, 0xff);
88 out_8(&udbg_comport->ier, 0x00);
89 out_8(&udbg_comport->lcr, 0x80); /* Access baud rate */
90 out_8(&udbg_comport->dll, dll & 0xff); /* 1 = 115200, 2 = 57600,
91 3 = 38400, 12 = 9600 baud */
92 out_8(&udbg_comport->dlm, dll >> 8); /* dll >> 8 which should be zero
93 for fast rates; */
94 out_8(&udbg_comport->lcr, 0x03); /* 8 data, 1 stop, no parity */
95 out_8(&udbg_comport->mcr, 0x03); /* RTS/DTR */
96 out_8(&udbg_comport->fcr ,0x07); /* Clear & enable FIFOs */
97 udbg_putc = udbg_550_putc;
98 udbg_getc = udbg_550_getc;
99 udbg_getc_poll = udbg_550_getc_poll;
100 }
101}
102
103#ifdef CONFIG_PPC_MAPLE
104void udbg_maple_real_putc(unsigned char c)
105{
106 if (udbg_comport) {
107 while ((real_readb(&udbg_comport->lsr) & LSR_THRE) == 0)
108 /* wait for idle */;
109 real_writeb(c, &udbg_comport->thr); eieio();
110 if (c == '\n')
111 udbg_maple_real_putc('\r');
112 }
113}
114
115void udbg_init_maple_realmode(void)
116{
117 udbg_comport = (volatile struct NS16550 __iomem *)0xf40003f8;
118
119 udbg_putc = udbg_maple_real_putc;
120 udbg_getc = NULL;
121 udbg_getc_poll = NULL;
122}
123#endif /* CONFIG_PPC_MAPLE */
diff --git a/arch/ppc64/kernel/udbg_scc.c b/arch/ppc64/kernel/udbg_scc.c
new file mode 100644
index 000000000000..c47fd6c63531
--- /dev/null
+++ b/arch/ppc64/kernel/udbg_scc.c
@@ -0,0 +1,136 @@
1/*
2 * udbg for for zilog scc ports as found on Apple PowerMacs
3 *
4 * Copyright (C) 2001-2005 PPC 64 Team, IBM Corp
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11#include <linux/config.h>
12#include <linux/types.h>
13#include <asm/udbg.h>
14#include <asm/processor.h>
15#include <asm/naca.h>
16#include <asm/io.h>
17#include <asm/prom.h>
18#include <asm/pmac_feature.h>
19
20extern u8 real_readb(volatile u8 __iomem *addr);
21extern void real_writeb(u8 data, volatile u8 __iomem *addr);
22
23#define SCC_TXRDY 4
24#define SCC_RXRDY 1
25
26static volatile u8 __iomem *sccc;
27static volatile u8 __iomem *sccd;
28
29static void udbg_scc_putc(unsigned char c)
30{
31 if (sccc) {
32 while ((in_8(sccc) & SCC_TXRDY) == 0)
33 ;
34 out_8(sccd, c);
35 if (c == '\n')
36 udbg_scc_putc('\r');
37 }
38}
39
40static int udbg_scc_getc_poll(void)
41{
42 if (sccc) {
43 if ((in_8(sccc) & SCC_RXRDY) != 0)
44 return in_8(sccd);
45 else
46 return -1;
47 }
48 return -1;
49}
50
51static unsigned char udbg_scc_getc(void)
52{
53 if (sccc) {
54 while ((in_8(sccc) & SCC_RXRDY) == 0)
55 ;
56 return in_8(sccd);
57 }
58 return 0;
59}
60
61static unsigned char scc_inittab[] = {
62 13, 0, /* set baud rate divisor */
63 12, 0,
64 14, 1, /* baud rate gen enable, src=rtxc */
65 11, 0x50, /* clocks = br gen */
66 5, 0xea, /* tx 8 bits, assert DTR & RTS */
67 4, 0x46, /* x16 clock, 1 stop */
68 3, 0xc1, /* rx enable, 8 bits */
69};
70
71void udbg_init_scc(struct device_node *np)
72{
73 u32 *reg;
74 unsigned long addr;
75 int i, x;
76
77 if (np == NULL)
78 np = of_find_node_by_name(NULL, "escc");
79 if (np == NULL || np->parent == NULL)
80 return;
81
82 udbg_printf("found SCC...\n");
83 /* Get address within mac-io ASIC */
84 reg = (u32 *)get_property(np, "reg", NULL);
85 if (reg == NULL)
86 return;
87 addr = reg[0];
88 udbg_printf("local addr: %lx\n", addr);
89 /* Get address of mac-io PCI itself */
90 reg = (u32 *)get_property(np->parent, "assigned-addresses", NULL);
91 if (reg == NULL)
92 return;
93 addr += reg[2];
94 udbg_printf("final addr: %lx\n", addr);
95
96 /* Setup for 57600 8N1 */
97 addr += 0x20;
98 sccc = (volatile u8 * __iomem) ioremap(addr & PAGE_MASK, PAGE_SIZE) ;
99 sccc += addr & ~PAGE_MASK;
100 sccd = sccc + 0x10;
101
102 udbg_printf("ioremap result sccc: %p\n", sccc);
103 mb();
104
105 for (i = 20000; i != 0; --i)
106 x = in_8(sccc);
107 out_8(sccc, 0x09); /* reset A or B side */
108 out_8(sccc, 0xc0);
109 for (i = 0; i < sizeof(scc_inittab); ++i)
110 out_8(sccc, scc_inittab[i]);
111
112 udbg_putc = udbg_scc_putc;
113 udbg_getc = udbg_scc_getc;
114 udbg_getc_poll = udbg_scc_getc_poll;
115
116 udbg_puts("Hello World !\n");
117}
118
119static void udbg_real_scc_putc(unsigned char c)
120{
121 while ((real_readb(sccc) & SCC_TXRDY) == 0)
122 ;
123 real_writeb(c, sccd);
124 if (c == '\n')
125 udbg_real_scc_putc('\r');
126}
127
128void udbg_init_pmac_realmode(void)
129{
130 sccc = (volatile u8 __iomem *)0x80013020ul;
131 sccd = (volatile u8 __iomem *)0x80013030ul;
132
133 udbg_putc = udbg_real_scc_putc;
134 udbg_getc = NULL;
135 udbg_getc_poll = NULL;
136}
diff --git a/arch/ppc64/kernel/vdso32/cacheflush.S b/arch/ppc64/kernel/vdso32/cacheflush.S
index 0ed7ea721715..c8db993574ee 100644
--- a/arch/ppc64/kernel/vdso32/cacheflush.S
+++ b/arch/ppc64/kernel/vdso32/cacheflush.S
@@ -13,7 +13,7 @@
13#include <asm/processor.h> 13#include <asm/processor.h>
14#include <asm/ppc_asm.h> 14#include <asm/ppc_asm.h>
15#include <asm/vdso.h> 15#include <asm/vdso.h>
16#include <asm/offsets.h> 16#include <asm/asm-offsets.h>
17 17
18 .text 18 .text
19 19
diff --git a/arch/ppc64/kernel/vdso32/datapage.S b/arch/ppc64/kernel/vdso32/datapage.S
index 29b6bd32e1f1..4f4eb0be3992 100644
--- a/arch/ppc64/kernel/vdso32/datapage.S
+++ b/arch/ppc64/kernel/vdso32/datapage.S
@@ -12,7 +12,7 @@
12#include <linux/config.h> 12#include <linux/config.h>
13#include <asm/processor.h> 13#include <asm/processor.h>
14#include <asm/ppc_asm.h> 14#include <asm/ppc_asm.h>
15#include <asm/offsets.h> 15#include <asm/asm-offsets.h>
16#include <asm/unistd.h> 16#include <asm/unistd.h>
17#include <asm/vdso.h> 17#include <asm/vdso.h>
18 18
diff --git a/arch/ppc64/kernel/vdso32/gettimeofday.S b/arch/ppc64/kernel/vdso32/gettimeofday.S
index 2b48bf1fb109..07f1c1c650c8 100644
--- a/arch/ppc64/kernel/vdso32/gettimeofday.S
+++ b/arch/ppc64/kernel/vdso32/gettimeofday.S
@@ -13,7 +13,7 @@
13#include <asm/processor.h> 13#include <asm/processor.h>
14#include <asm/ppc_asm.h> 14#include <asm/ppc_asm.h>
15#include <asm/vdso.h> 15#include <asm/vdso.h>
16#include <asm/offsets.h> 16#include <asm/asm-offsets.h>
17#include <asm/unistd.h> 17#include <asm/unistd.h>
18 18
19 .text 19 .text
diff --git a/arch/ppc64/kernel/vdso64/cacheflush.S b/arch/ppc64/kernel/vdso64/cacheflush.S
index e0725b7b7003..d4a0ad28d534 100644
--- a/arch/ppc64/kernel/vdso64/cacheflush.S
+++ b/arch/ppc64/kernel/vdso64/cacheflush.S
@@ -13,7 +13,7 @@
13#include <asm/processor.h> 13#include <asm/processor.h>
14#include <asm/ppc_asm.h> 14#include <asm/ppc_asm.h>
15#include <asm/vdso.h> 15#include <asm/vdso.h>
16#include <asm/offsets.h> 16#include <asm/asm-offsets.h>
17 17
18 .text 18 .text
19 19
diff --git a/arch/ppc64/kernel/vdso64/datapage.S b/arch/ppc64/kernel/vdso64/datapage.S
index 18afd971c9d9..ed6e599ae824 100644
--- a/arch/ppc64/kernel/vdso64/datapage.S
+++ b/arch/ppc64/kernel/vdso64/datapage.S
@@ -12,7 +12,7 @@
12#include <linux/config.h> 12#include <linux/config.h>
13#include <asm/processor.h> 13#include <asm/processor.h>
14#include <asm/ppc_asm.h> 14#include <asm/ppc_asm.h>
15#include <asm/offsets.h> 15#include <asm/asm-offsets.h>
16#include <asm/unistd.h> 16#include <asm/unistd.h>
17#include <asm/vdso.h> 17#include <asm/vdso.h>
18 18
diff --git a/arch/ppc64/kernel/vdso64/gettimeofday.S b/arch/ppc64/kernel/vdso64/gettimeofday.S
index ed3f970ff05e..f6df8028570a 100644
--- a/arch/ppc64/kernel/vdso64/gettimeofday.S
+++ b/arch/ppc64/kernel/vdso64/gettimeofday.S
@@ -14,7 +14,7 @@
14#include <asm/processor.h> 14#include <asm/processor.h>
15#include <asm/ppc_asm.h> 15#include <asm/ppc_asm.h>
16#include <asm/vdso.h> 16#include <asm/vdso.h>
17#include <asm/offsets.h> 17#include <asm/asm-offsets.h>
18 18
19 .text 19 .text
20/* 20/*
diff --git a/arch/ppc64/kernel/vio.c b/arch/ppc64/kernel/vio.c
index 0c0ba71ac0e8..c90e1dd875ce 100644
--- a/arch/ppc64/kernel/vio.c
+++ b/arch/ppc64/kernel/vio.c
@@ -1,10 +1,11 @@
1/* 1/*
2 * IBM PowerPC Virtual I/O Infrastructure Support. 2 * IBM PowerPC Virtual I/O Infrastructure Support.
3 * 3 *
4 * Copyright (c) 2003 IBM Corp. 4 * Copyright (c) 2003-2005 IBM Corp.
5 * Dave Engebretsen engebret@us.ibm.com 5 * Dave Engebretsen engebret@us.ibm.com
6 * Santiago Leon santil@us.ibm.com 6 * Santiago Leon santil@us.ibm.com
7 * Hollis Blanchard <hollisb@us.ibm.com> 7 * Hollis Blanchard <hollisb@us.ibm.com>
8 * Stephen Rothwell
8 * 9 *
9 * This program is free software; you can redistribute it and/or 10 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License 11 * modify it under the terms of the GNU General Public License
@@ -14,61 +15,30 @@
14 15
15#include <linux/init.h> 16#include <linux/init.h>
16#include <linux/console.h> 17#include <linux/console.h>
17#include <linux/version.h>
18#include <linux/module.h> 18#include <linux/module.h>
19#include <linux/kobject.h>
20#include <linux/mm.h> 19#include <linux/mm.h>
21#include <linux/dma-mapping.h> 20#include <linux/dma-mapping.h>
22#include <asm/rtas.h>
23#include <asm/iommu.h> 21#include <asm/iommu.h>
24#include <asm/dma.h> 22#include <asm/dma.h>
25#include <asm/ppcdebug.h>
26#include <asm/vio.h> 23#include <asm/vio.h>
27#include <asm/hvcall.h>
28#include <asm/iSeries/vio.h>
29#include <asm/iSeries/HvTypes.h>
30#include <asm/iSeries/HvCallXm.h>
31#include <asm/iSeries/HvLpConfig.h>
32
33#define DBGENTER() pr_debug("%s entered\n", __FUNCTION__)
34
35extern struct subsystem devices_subsys; /* needed for vio_find_name() */
36 24
37static const struct vio_device_id *vio_match_device( 25static const struct vio_device_id *vio_match_device(
38 const struct vio_device_id *, const struct vio_dev *); 26 const struct vio_device_id *, const struct vio_dev *);
39 27
40#ifdef CONFIG_PPC_PSERIES 28struct vio_dev vio_bus_device = { /* fake "parent" device */
41static struct iommu_table *vio_build_iommu_table(struct vio_dev *);
42static int vio_num_address_cells;
43#endif
44#ifdef CONFIG_PPC_ISERIES
45static struct iommu_table veth_iommu_table;
46static struct iommu_table vio_iommu_table;
47#endif
48static struct vio_dev vio_bus_device = { /* fake "parent" device */
49 .name = vio_bus_device.dev.bus_id, 29 .name = vio_bus_device.dev.bus_id,
50 .type = "", 30 .type = "",
51#ifdef CONFIG_PPC_ISERIES
52 .iommu_table = &vio_iommu_table,
53#endif
54 .dev.bus_id = "vio", 31 .dev.bus_id = "vio",
55 .dev.bus = &vio_bus_type, 32 .dev.bus = &vio_bus_type,
56}; 33};
57 34
58#ifdef CONFIG_PPC_ISERIES 35static struct vio_bus_ops vio_bus_ops;
59static struct vio_dev *__init vio_register_device_iseries(char *type,
60 uint32_t unit_num);
61
62struct device *iSeries_vio_dev = &vio_bus_device.dev;
63EXPORT_SYMBOL(iSeries_vio_dev);
64
65#define device_is_compatible(a, b) 1
66 36
67#endif 37/*
68 38 * Convert from struct device to struct vio_dev and pass to driver.
69/* convert from struct device to struct vio_dev and pass to driver.
70 * dev->driver has already been set by generic code because vio_bus_match 39 * dev->driver has already been set by generic code because vio_bus_match
71 * succeeded. */ 40 * succeeded.
41 */
72static int vio_bus_probe(struct device *dev) 42static int vio_bus_probe(struct device *dev)
73{ 43{
74 struct vio_dev *viodev = to_vio_dev(dev); 44 struct vio_dev *viodev = to_vio_dev(dev);
@@ -76,15 +46,12 @@ static int vio_bus_probe(struct device *dev)
76 const struct vio_device_id *id; 46 const struct vio_device_id *id;
77 int error = -ENODEV; 47 int error = -ENODEV;
78 48
79 DBGENTER();
80
81 if (!viodrv->probe) 49 if (!viodrv->probe)
82 return error; 50 return error;
83 51
84 id = vio_match_device(viodrv->id_table, viodev); 52 id = vio_match_device(viodrv->id_table, viodev);
85 if (id) { 53 if (id)
86 error = viodrv->probe(viodev, id); 54 error = viodrv->probe(viodev, id);
87 }
88 55
89 return error; 56 return error;
90} 57}
@@ -95,11 +62,8 @@ static int vio_bus_remove(struct device *dev)
95 struct vio_dev *viodev = to_vio_dev(dev); 62 struct vio_dev *viodev = to_vio_dev(dev);
96 struct vio_driver *viodrv = to_vio_driver(dev->driver); 63 struct vio_driver *viodrv = to_vio_driver(dev->driver);
97 64
98 DBGENTER(); 65 if (viodrv->remove)
99
100 if (viodrv->remove) {
101 return viodrv->remove(viodev); 66 return viodrv->remove(viodev);
102 }
103 67
104 /* driver can't remove */ 68 /* driver can't remove */
105 return 1; 69 return 1;
@@ -135,193 +99,72 @@ void vio_unregister_driver(struct vio_driver *viodrv)
135EXPORT_SYMBOL(vio_unregister_driver); 99EXPORT_SYMBOL(vio_unregister_driver);
136 100
137/** 101/**
138 * vio_match_device: - Tell if a VIO device has a matching VIO device id structure. 102 * vio_match_device: - Tell if a VIO device has a matching
139 * @ids: array of VIO device id structures to search in 103 * VIO device id structure.
140 * @dev: the VIO device structure to match against 104 * @ids: array of VIO device id structures to search in
105 * @dev: the VIO device structure to match against
141 * 106 *
142 * Used by a driver to check whether a VIO device present in the 107 * Used by a driver to check whether a VIO device present in the
143 * system is in its list of supported devices. Returns the matching 108 * system is in its list of supported devices. Returns the matching
144 * vio_device_id structure or NULL if there is no match. 109 * vio_device_id structure or NULL if there is no match.
145 */ 110 */
146static const struct vio_device_id * vio_match_device(const struct vio_device_id *ids, 111static const struct vio_device_id *vio_match_device(
147 const struct vio_dev *dev) 112 const struct vio_device_id *ids, const struct vio_dev *dev)
148{ 113{
149 DBGENTER(); 114 while (ids->type[0] != '\0') {
150 115 if (vio_bus_ops.match(ids, dev))
151 while (ids->type) {
152 if ((strncmp(dev->type, ids->type, strlen(ids->type)) == 0) &&
153 device_is_compatible(dev->dev.platform_data, ids->compat))
154 return ids; 116 return ids;
155 ids++; 117 ids++;
156 } 118 }
157 return NULL; 119 return NULL;
158} 120}
159 121
160#ifdef CONFIG_PPC_ISERIES
161void __init iommu_vio_init(void)
162{
163 struct iommu_table *t;
164 struct iommu_table_cb cb;
165 unsigned long cbp;
166 unsigned long itc_entries;
167
168 cb.itc_busno = 255; /* Bus 255 is the virtual bus */
169 cb.itc_virtbus = 0xff; /* Ask for virtual bus */
170
171 cbp = virt_to_abs(&cb);
172 HvCallXm_getTceTableParms(cbp);
173
174 itc_entries = cb.itc_size * PAGE_SIZE / sizeof(union tce_entry);
175 veth_iommu_table.it_size = itc_entries / 2;
176 veth_iommu_table.it_busno = cb.itc_busno;
177 veth_iommu_table.it_offset = cb.itc_offset;
178 veth_iommu_table.it_index = cb.itc_index;
179 veth_iommu_table.it_type = TCE_VB;
180 veth_iommu_table.it_blocksize = 1;
181
182 t = iommu_init_table(&veth_iommu_table);
183
184 if (!t)
185 printk("Virtual Bus VETH TCE table failed.\n");
186
187 vio_iommu_table.it_size = itc_entries - veth_iommu_table.it_size;
188 vio_iommu_table.it_busno = cb.itc_busno;
189 vio_iommu_table.it_offset = cb.itc_offset +
190 veth_iommu_table.it_size;
191 vio_iommu_table.it_index = cb.itc_index;
192 vio_iommu_table.it_type = TCE_VB;
193 vio_iommu_table.it_blocksize = 1;
194
195 t = iommu_init_table(&vio_iommu_table);
196
197 if (!t)
198 printk("Virtual Bus VIO TCE table failed.\n");
199}
200#endif
201
202#ifdef CONFIG_PPC_PSERIES
203static void probe_bus_pseries(void)
204{
205 struct device_node *node_vroot, *of_node;
206
207 node_vroot = find_devices("vdevice");
208 if ((node_vroot == NULL) || (node_vroot->child == NULL))
209 /* this machine doesn't do virtual IO, and that's ok */
210 return;
211
212 vio_num_address_cells = prom_n_addr_cells(node_vroot->child);
213
214 /*
215 * Create struct vio_devices for each virtual device in the device tree.
216 * Drivers will associate with them later.
217 */
218 for (of_node = node_vroot->child; of_node != NULL;
219 of_node = of_node->sibling) {
220 printk(KERN_DEBUG "%s: processing %p\n", __FUNCTION__, of_node);
221 vio_register_device_node(of_node);
222 }
223}
224#endif
225
226#ifdef CONFIG_PPC_ISERIES
227static void probe_bus_iseries(void)
228{
229 HvLpIndexMap vlan_map = HvLpConfig_getVirtualLanIndexMap();
230 struct vio_dev *viodev;
231 int i;
232
233 /* there is only one of each of these */
234 vio_register_device_iseries("viocons", 0);
235 vio_register_device_iseries("vscsi", 0);
236
237 vlan_map = HvLpConfig_getVirtualLanIndexMap();
238 for (i = 0; i < HVMAXARCHITECTEDVIRTUALLANS; i++) {
239 if ((vlan_map & (0x8000 >> i)) == 0)
240 continue;
241 viodev = vio_register_device_iseries("vlan", i);
242 /* veth is special and has it own iommu_table */
243 viodev->iommu_table = &veth_iommu_table;
244 }
245 for (i = 0; i < HVMAXARCHITECTEDVIRTUALDISKS; i++)
246 vio_register_device_iseries("viodasd", i);
247 for (i = 0; i < HVMAXARCHITECTEDVIRTUALCDROMS; i++)
248 vio_register_device_iseries("viocd", i);
249 for (i = 0; i < HVMAXARCHITECTEDVIRTUALTAPES; i++)
250 vio_register_device_iseries("viotape", i);
251}
252#endif
253
254/** 122/**
255 * vio_bus_init: - Initialize the virtual IO bus 123 * vio_bus_init: - Initialize the virtual IO bus
256 */ 124 */
257static int __init vio_bus_init(void) 125int __init vio_bus_init(struct vio_bus_ops *ops)
258{ 126{
259 int err; 127 int err;
260 128
129 vio_bus_ops = *ops;
130
261 err = bus_register(&vio_bus_type); 131 err = bus_register(&vio_bus_type);
262 if (err) { 132 if (err) {
263 printk(KERN_ERR "failed to register VIO bus\n"); 133 printk(KERN_ERR "failed to register VIO bus\n");
264 return err; 134 return err;
265 } 135 }
266 136
267 /* the fake parent of all vio devices, just to give us a nice directory */ 137 /*
138 * The fake parent of all vio devices, just to give us
139 * a nice directory
140 */
268 err = device_register(&vio_bus_device.dev); 141 err = device_register(&vio_bus_device.dev);
269 if (err) { 142 if (err) {
270 printk(KERN_WARNING "%s: device_register returned %i\n", __FUNCTION__, 143 printk(KERN_WARNING "%s: device_register returned %i\n",
271 err); 144 __FUNCTION__, err);
272 return err; 145 return err;
273 } 146 }
274 147
275#ifdef CONFIG_PPC_PSERIES
276 probe_bus_pseries();
277#endif
278#ifdef CONFIG_PPC_ISERIES
279 probe_bus_iseries();
280#endif
281
282 return 0; 148 return 0;
283} 149}
284 150
285__initcall(vio_bus_init);
286
287/* vio_dev refcount hit 0 */ 151/* vio_dev refcount hit 0 */
288static void __devinit vio_dev_release(struct device *dev) 152static void __devinit vio_dev_release(struct device *dev)
289{ 153{
290 DBGENTER(); 154 if (vio_bus_ops.release_device)
291 155 vio_bus_ops.release_device(dev);
292#ifdef CONFIG_PPC_PSERIES
293 /* XXX free TCE table */
294 of_node_put(dev->platform_data);
295#endif
296 kfree(to_vio_dev(dev)); 156 kfree(to_vio_dev(dev));
297} 157}
298 158
299#ifdef CONFIG_PPC_PSERIES 159static ssize_t viodev_show_name(struct device *dev,
300static ssize_t viodev_show_devspec(struct device *dev, struct device_attribute *attr, char *buf) 160 struct device_attribute *attr, char *buf)
301{
302 struct device_node *of_node = dev->platform_data;
303
304 return sprintf(buf, "%s\n", of_node->full_name);
305}
306DEVICE_ATTR(devspec, S_IRUSR | S_IRGRP | S_IROTH, viodev_show_devspec, NULL);
307#endif
308
309static ssize_t viodev_show_name(struct device *dev, struct device_attribute *attr, char *buf)
310{ 161{
311 return sprintf(buf, "%s\n", to_vio_dev(dev)->name); 162 return sprintf(buf, "%s\n", to_vio_dev(dev)->name);
312} 163}
313DEVICE_ATTR(name, S_IRUSR | S_IRGRP | S_IROTH, viodev_show_name, NULL); 164DEVICE_ATTR(name, S_IRUSR | S_IRGRP | S_IROTH, viodev_show_name, NULL);
314 165
315static struct vio_dev * __devinit vio_register_device_common( 166struct vio_dev * __devinit vio_register_device(struct vio_dev *viodev)
316 struct vio_dev *viodev, char *name, char *type,
317 uint32_t unit_address, struct iommu_table *iommu_table)
318{ 167{
319 DBGENTER();
320
321 viodev->name = name;
322 viodev->type = type;
323 viodev->unit_address = unit_address;
324 viodev->iommu_table = iommu_table;
325 /* init generic 'struct device' fields: */ 168 /* init generic 'struct device' fields: */
326 viodev->dev.parent = &vio_bus_device.dev; 169 viodev->dev.parent = &vio_bus_device.dev;
327 viodev->dev.bus = &vio_bus_type; 170 viodev->dev.bus = &vio_bus_type;
@@ -338,222 +181,15 @@ static struct vio_dev * __devinit vio_register_device_common(
338 return viodev; 181 return viodev;
339} 182}
340 183
341#ifdef CONFIG_PPC_PSERIES
342/**
343 * vio_register_device_node: - Register a new vio device.
344 * @of_node: The OF node for this device.
345 *
346 * Creates and initializes a vio_dev structure from the data in
347 * of_node (dev.platform_data) and adds it to the list of virtual devices.
348 * Returns a pointer to the created vio_dev or NULL if node has
349 * NULL device_type or compatible fields.
350 */
351struct vio_dev * __devinit vio_register_device_node(struct device_node *of_node)
352{
353 struct vio_dev *viodev;
354 unsigned int *unit_address;
355 unsigned int *irq_p;
356
357 DBGENTER();
358
359 /* we need the 'device_type' property, in order to match with drivers */
360 if ((NULL == of_node->type)) {
361 printk(KERN_WARNING
362 "%s: node %s missing 'device_type'\n", __FUNCTION__,
363 of_node->name ? of_node->name : "<unknown>");
364 return NULL;
365 }
366
367 unit_address = (unsigned int *)get_property(of_node, "reg", NULL);
368 if (!unit_address) {
369 printk(KERN_WARNING "%s: node %s missing 'reg'\n", __FUNCTION__,
370 of_node->name ? of_node->name : "<unknown>");
371 return NULL;
372 }
373
374 /* allocate a vio_dev for this node */
375 viodev = kmalloc(sizeof(struct vio_dev), GFP_KERNEL);
376 if (!viodev) {
377 return NULL;
378 }
379 memset(viodev, 0, sizeof(struct vio_dev));
380
381 viodev->dev.platform_data = of_node_get(of_node);
382
383 viodev->irq = NO_IRQ;
384 irq_p = (unsigned int *)get_property(of_node, "interrupts", NULL);
385 if (irq_p) {
386 int virq = virt_irq_create_mapping(*irq_p);
387 if (virq == NO_IRQ) {
388 printk(KERN_ERR "Unable to allocate interrupt "
389 "number for %s\n", of_node->full_name);
390 } else
391 viodev->irq = irq_offset_up(virq);
392 }
393
394 snprintf(viodev->dev.bus_id, BUS_ID_SIZE, "%x", *unit_address);
395
396 /* register with generic device framework */
397 if (vio_register_device_common(viodev, of_node->name, of_node->type,
398 *unit_address, vio_build_iommu_table(viodev))
399 == NULL) {
400 /* XXX free TCE table */
401 kfree(viodev);
402 return NULL;
403 }
404 device_create_file(&viodev->dev, &dev_attr_devspec);
405
406 return viodev;
407}
408EXPORT_SYMBOL(vio_register_device_node);
409#endif
410
411#ifdef CONFIG_PPC_ISERIES
412/**
413 * vio_register_device: - Register a new vio device.
414 * @voidev: The device to register.
415 */
416static struct vio_dev *__init vio_register_device_iseries(char *type,
417 uint32_t unit_num)
418{
419 struct vio_dev *viodev;
420
421 DBGENTER();
422
423 /* allocate a vio_dev for this node */
424 viodev = kmalloc(sizeof(struct vio_dev), GFP_KERNEL);
425 if (!viodev)
426 return NULL;
427 memset(viodev, 0, sizeof(struct vio_dev));
428
429 snprintf(viodev->dev.bus_id, BUS_ID_SIZE, "%s%d", type, unit_num);
430
431 return vio_register_device_common(viodev, viodev->dev.bus_id, type,
432 unit_num, &vio_iommu_table);
433}
434#endif
435
436void __devinit vio_unregister_device(struct vio_dev *viodev) 184void __devinit vio_unregister_device(struct vio_dev *viodev)
437{ 185{
438 DBGENTER(); 186 if (vio_bus_ops.unregister_device)
439#ifdef CONFIG_PPC_PSERIES 187 vio_bus_ops.unregister_device(viodev);
440 device_remove_file(&viodev->dev, &dev_attr_devspec);
441#endif
442 device_remove_file(&viodev->dev, &dev_attr_name); 188 device_remove_file(&viodev->dev, &dev_attr_name);
443 device_unregister(&viodev->dev); 189 device_unregister(&viodev->dev);
444} 190}
445EXPORT_SYMBOL(vio_unregister_device); 191EXPORT_SYMBOL(vio_unregister_device);
446 192
447#ifdef CONFIG_PPC_PSERIES
448/**
449 * vio_get_attribute: - get attribute for virtual device
450 * @vdev: The vio device to get property.
451 * @which: The property/attribute to be extracted.
452 * @length: Pointer to length of returned data size (unused if NULL).
453 *
454 * Calls prom.c's get_property() to return the value of the
455 * attribute specified by the preprocessor constant @which
456*/
457const void * vio_get_attribute(struct vio_dev *vdev, void* which, int* length)
458{
459 return get_property(vdev->dev.platform_data, (char*)which, length);
460}
461EXPORT_SYMBOL(vio_get_attribute);
462
463/* vio_find_name() - internal because only vio.c knows how we formatted the
464 * kobject name
465 * XXX once vio_bus_type.devices is actually used as a kset in
466 * drivers/base/bus.c, this function should be removed in favor of
467 * "device_find(kobj_name, &vio_bus_type)"
468 */
469static struct vio_dev *vio_find_name(const char *kobj_name)
470{
471 struct kobject *found;
472
473 found = kset_find_obj(&devices_subsys.kset, kobj_name);
474 if (!found)
475 return NULL;
476
477 return to_vio_dev(container_of(found, struct device, kobj));
478}
479
480/**
481 * vio_find_node - find an already-registered vio_dev
482 * @vnode: device_node of the virtual device we're looking for
483 */
484struct vio_dev *vio_find_node(struct device_node *vnode)
485{
486 uint32_t *unit_address;
487 char kobj_name[BUS_ID_SIZE];
488
489 /* construct the kobject name from the device node */
490 unit_address = (uint32_t *)get_property(vnode, "reg", NULL);
491 if (!unit_address)
492 return NULL;
493 snprintf(kobj_name, BUS_ID_SIZE, "%x", *unit_address);
494
495 return vio_find_name(kobj_name);
496}
497EXPORT_SYMBOL(vio_find_node);
498
499/**
500 * vio_build_iommu_table: - gets the dma information from OF and builds the TCE tree.
501 * @dev: the virtual device.
502 *
503 * Returns a pointer to the built tce tree, or NULL if it can't
504 * find property.
505*/
506static struct iommu_table * vio_build_iommu_table(struct vio_dev *dev)
507{
508 unsigned int *dma_window;
509 struct iommu_table *newTceTable;
510 unsigned long offset;
511 int dma_window_property_size;
512
513 dma_window = (unsigned int *) get_property(dev->dev.platform_data, "ibm,my-dma-window", &dma_window_property_size);
514 if(!dma_window) {
515 return NULL;
516 }
517
518 newTceTable = (struct iommu_table *) kmalloc(sizeof(struct iommu_table), GFP_KERNEL);
519
520 /* There should be some code to extract the phys-encoded offset
521 using prom_n_addr_cells(). However, according to a comment
522 on earlier versions, it's always zero, so we don't bother */
523 offset = dma_window[1] >> PAGE_SHIFT;
524
525 /* TCE table size - measured in tce entries */
526 newTceTable->it_size = dma_window[4] >> PAGE_SHIFT;
527 /* offset for VIO should always be 0 */
528 newTceTable->it_offset = offset;
529 newTceTable->it_busno = 0;
530 newTceTable->it_index = (unsigned long)dma_window[0];
531 newTceTable->it_type = TCE_VB;
532
533 return iommu_init_table(newTceTable);
534}
535
536int vio_enable_interrupts(struct vio_dev *dev)
537{
538 int rc = h_vio_signal(dev->unit_address, VIO_IRQ_ENABLE);
539 if (rc != H_Success) {
540 printk(KERN_ERR "vio: Error 0x%x enabling interrupts\n", rc);
541 }
542 return rc;
543}
544EXPORT_SYMBOL(vio_enable_interrupts);
545
546int vio_disable_interrupts(struct vio_dev *dev)
547{
548 int rc = h_vio_signal(dev->unit_address, VIO_IRQ_DISABLE);
549 if (rc != H_Success) {
550 printk(KERN_ERR "vio: Error 0x%x disabling interrupts\n", rc);
551 }
552 return rc;
553}
554EXPORT_SYMBOL(vio_disable_interrupts);
555#endif
556
557static dma_addr_t vio_map_single(struct device *dev, void *vaddr, 193static dma_addr_t vio_map_single(struct device *dev, void *vaddr,
558 size_t size, enum dma_data_direction direction) 194 size_t size, enum dma_data_direction direction)
559{ 195{
@@ -615,18 +251,8 @@ static int vio_bus_match(struct device *dev, struct device_driver *drv)
615 const struct vio_dev *vio_dev = to_vio_dev(dev); 251 const struct vio_dev *vio_dev = to_vio_dev(dev);
616 struct vio_driver *vio_drv = to_vio_driver(drv); 252 struct vio_driver *vio_drv = to_vio_driver(drv);
617 const struct vio_device_id *ids = vio_drv->id_table; 253 const struct vio_device_id *ids = vio_drv->id_table;
618 const struct vio_device_id *found_id;
619
620 DBGENTER();
621 254
622 if (!ids) 255 return (ids != NULL) && (vio_match_device(ids, vio_dev) != NULL);
623 return 0;
624
625 found_id = vio_match_device(ids, vio_dev);
626 if (found_id)
627 return 1;
628
629 return 0;
630} 256}
631 257
632struct bus_type vio_bus_type = { 258struct bus_type vio_bus_type = {
diff --git a/arch/ppc64/kernel/vmlinux.lds.S b/arch/ppc64/kernel/vmlinux.lds.S
index 4103cc13f8d6..0306510bc4ff 100644
--- a/arch/ppc64/kernel/vmlinux.lds.S
+++ b/arch/ppc64/kernel/vmlinux.lds.S
@@ -15,6 +15,7 @@ SECTIONS
15 *(.text .text.*) 15 *(.text .text.*)
16 SCHED_TEXT 16 SCHED_TEXT
17 LOCK_TEXT 17 LOCK_TEXT
18 KPROBES_TEXT
18 *(.fixup) 19 *(.fixup)
19 . = ALIGN(4096); 20 . = ALIGN(4096);
20 _etext = .; 21 _etext = .;
diff --git a/arch/ppc64/kernel/xics.c b/arch/ppc64/kernel/xics.c
index d9dc6f28d050..daf93885dcfa 100644
--- a/arch/ppc64/kernel/xics.c
+++ b/arch/ppc64/kernel/xics.c
@@ -38,7 +38,7 @@ static void xics_mask_and_ack_irq(unsigned int irq);
38static void xics_end_irq(unsigned int irq); 38static void xics_end_irq(unsigned int irq);
39static void xics_set_affinity(unsigned int irq_nr, cpumask_t cpumask); 39static void xics_set_affinity(unsigned int irq_nr, cpumask_t cpumask);
40 40
41struct hw_interrupt_type xics_pic = { 41static struct hw_interrupt_type xics_pic = {
42 .typename = " XICS ", 42 .typename = " XICS ",
43 .startup = xics_startup, 43 .startup = xics_startup,
44 .enable = xics_enable_irq, 44 .enable = xics_enable_irq,
@@ -48,7 +48,7 @@ struct hw_interrupt_type xics_pic = {
48 .set_affinity = xics_set_affinity 48 .set_affinity = xics_set_affinity
49}; 49};
50 50
51struct hw_interrupt_type xics_8259_pic = { 51static struct hw_interrupt_type xics_8259_pic = {
52 .typename = " XICS/8259", 52 .typename = " XICS/8259",
53 .ack = xics_mask_and_ack_irq, 53 .ack = xics_mask_and_ack_irq,
54}; 54};
@@ -89,9 +89,8 @@ static struct xics_ipl __iomem *xics_per_cpu[NR_CPUS];
89static int xics_irq_8259_cascade = 0; 89static int xics_irq_8259_cascade = 0;
90static int xics_irq_8259_cascade_real = 0; 90static int xics_irq_8259_cascade_real = 0;
91static unsigned int default_server = 0xFF; 91static unsigned int default_server = 0xFF;
92/* also referenced in smp.c... */ 92static unsigned int default_distrib_server = 0;
93unsigned int default_distrib_server = 0; 93static unsigned int interrupt_server_size = 8;
94unsigned int interrupt_server_size = 8;
95 94
96/* 95/*
97 * XICS only has a single IPI, so encode the messages per CPU 96 * XICS only has a single IPI, so encode the messages per CPU
@@ -99,10 +98,10 @@ unsigned int interrupt_server_size = 8;
99struct xics_ipi_struct xics_ipi_message[NR_CPUS] __cacheline_aligned; 98struct xics_ipi_struct xics_ipi_message[NR_CPUS] __cacheline_aligned;
100 99
101/* RTAS service tokens */ 100/* RTAS service tokens */
102int ibm_get_xive; 101static int ibm_get_xive;
103int ibm_set_xive; 102static int ibm_set_xive;
104int ibm_int_on; 103static int ibm_int_on;
105int ibm_int_off; 104static int ibm_int_off;
106 105
107typedef struct { 106typedef struct {
108 int (*xirr_info_get)(int cpu); 107 int (*xirr_info_get)(int cpu);
@@ -284,16 +283,17 @@ static void xics_enable_irq(unsigned int virq)
284 call_status = rtas_call(ibm_set_xive, 3, 1, NULL, irq, server, 283 call_status = rtas_call(ibm_set_xive, 3, 1, NULL, irq, server,
285 DEFAULT_PRIORITY); 284 DEFAULT_PRIORITY);
286 if (call_status != 0) { 285 if (call_status != 0) {
287 printk(KERN_ERR "xics_enable_irq: irq=%d: ibm_set_xive " 286 printk(KERN_ERR "xics_enable_irq: irq=%u: ibm_set_xive "
288 "returned %x\n", irq, call_status); 287 "returned %d\n", irq, call_status);
288 printk("set_xive %x, server %x\n", ibm_set_xive, server);
289 return; 289 return;
290 } 290 }
291 291
292 /* Now unmask the interrupt (often a no-op) */ 292 /* Now unmask the interrupt (often a no-op) */
293 call_status = rtas_call(ibm_int_on, 1, 1, NULL, irq); 293 call_status = rtas_call(ibm_int_on, 1, 1, NULL, irq);
294 if (call_status != 0) { 294 if (call_status != 0) {
295 printk(KERN_ERR "xics_enable_irq: irq=%d: ibm_int_on " 295 printk(KERN_ERR "xics_enable_irq: irq=%u: ibm_int_on "
296 "returned %x\n", irq, call_status); 296 "returned %d\n", irq, call_status);
297 return; 297 return;
298 } 298 }
299} 299}
@@ -308,8 +308,8 @@ static void xics_disable_real_irq(unsigned int irq)
308 308
309 call_status = rtas_call(ibm_int_off, 1, 1, NULL, irq); 309 call_status = rtas_call(ibm_int_off, 1, 1, NULL, irq);
310 if (call_status != 0) { 310 if (call_status != 0) {
311 printk(KERN_ERR "xics_disable_real_irq: irq=%d: " 311 printk(KERN_ERR "xics_disable_real_irq: irq=%u: "
312 "ibm_int_off returned %x\n", irq, call_status); 312 "ibm_int_off returned %d\n", irq, call_status);
313 return; 313 return;
314 } 314 }
315 315
@@ -317,8 +317,8 @@ static void xics_disable_real_irq(unsigned int irq)
317 /* Have to set XIVE to 0xff to be able to remove a slot */ 317 /* Have to set XIVE to 0xff to be able to remove a slot */
318 call_status = rtas_call(ibm_set_xive, 3, 1, NULL, irq, server, 0xff); 318 call_status = rtas_call(ibm_set_xive, 3, 1, NULL, irq, server, 0xff);
319 if (call_status != 0) { 319 if (call_status != 0) {
320 printk(KERN_ERR "xics_disable_irq: irq=%d: ibm_set_xive(0xff)" 320 printk(KERN_ERR "xics_disable_irq: irq=%u: ibm_set_xive(0xff)"
321 " returned %x\n", irq, call_status); 321 " returned %d\n", irq, call_status);
322 return; 322 return;
323 } 323 }
324} 324}
@@ -380,7 +380,7 @@ int xics_get_irq(struct pt_regs *regs)
380 if (irq == NO_IRQ) 380 if (irq == NO_IRQ)
381 irq = real_irq_to_virt_slowpath(vec); 381 irq = real_irq_to_virt_slowpath(vec);
382 if (irq == NO_IRQ) { 382 if (irq == NO_IRQ) {
383 printk(KERN_ERR "Interrupt %d (real) is invalid," 383 printk(KERN_ERR "Interrupt %u (real) is invalid,"
384 " disabling it.\n", vec); 384 " disabling it.\n", vec);
385 xics_disable_real_irq(vec); 385 xics_disable_real_irq(vec);
386 } else 386 } else
@@ -622,7 +622,7 @@ static void xics_set_affinity(unsigned int virq, cpumask_t cpumask)
622 status = rtas_call(ibm_get_xive, 1, 3, xics_status, irq); 622 status = rtas_call(ibm_get_xive, 1, 3, xics_status, irq);
623 623
624 if (status) { 624 if (status) {
625 printk(KERN_ERR "xics_set_affinity: irq=%d ibm,get-xive " 625 printk(KERN_ERR "xics_set_affinity: irq=%u ibm,get-xive "
626 "returns %d\n", irq, status); 626 "returns %d\n", irq, status);
627 return; 627 return;
628 } 628 }
@@ -641,7 +641,7 @@ static void xics_set_affinity(unsigned int virq, cpumask_t cpumask)
641 irq, newmask, xics_status[1]); 641 irq, newmask, xics_status[1]);
642 642
643 if (status) { 643 if (status) {
644 printk(KERN_ERR "xics_set_affinity: irq=%d ibm,set-xive " 644 printk(KERN_ERR "xics_set_affinity: irq=%u ibm,set-xive "
645 "returns %d\n", irq, status); 645 "returns %d\n", irq, status);
646 return; 646 return;
647 } 647 }
@@ -720,7 +720,7 @@ void xics_migrate_irqs_away(void)
720 720
721 status = rtas_call(ibm_get_xive, 1, 3, xics_status, irq); 721 status = rtas_call(ibm_get_xive, 1, 3, xics_status, irq);
722 if (status) { 722 if (status) {
723 printk(KERN_ERR "migrate_irqs_away: irq=%d " 723 printk(KERN_ERR "migrate_irqs_away: irq=%u "
724 "ibm,get-xive returns %d\n", 724 "ibm,get-xive returns %d\n",
725 virq, status); 725 virq, status);
726 goto unlock; 726 goto unlock;
@@ -734,7 +734,7 @@ void xics_migrate_irqs_away(void)
734 if (xics_status[0] != get_hard_smp_processor_id(cpu)) 734 if (xics_status[0] != get_hard_smp_processor_id(cpu))
735 goto unlock; 735 goto unlock;
736 736
737 printk(KERN_WARNING "IRQ %d affinity broken off cpu %u\n", 737 printk(KERN_WARNING "IRQ %u affinity broken off cpu %u\n",
738 virq, cpu); 738 virq, cpu);
739 739
740 /* Reset affinity to all cpus */ 740 /* Reset affinity to all cpus */
diff --git a/arch/ppc64/lib/dec_and_lock.c b/arch/ppc64/lib/dec_and_lock.c
index 6e8d8591708c..7b9d4da5cf92 100644
--- a/arch/ppc64/lib/dec_and_lock.c
+++ b/arch/ppc64/lib/dec_and_lock.c
@@ -20,14 +20,7 @@
20 * has a cmpxchg, and where atomic->value is an int holding 20 * has a cmpxchg, and where atomic->value is an int holding
21 * the value of the atomic (i.e. the high bits aren't used 21 * the value of the atomic (i.e. the high bits aren't used
22 * for a lock or anything like that). 22 * for a lock or anything like that).
23 *
24 * N.B. ATOMIC_DEC_AND_LOCK gets defined in include/linux/spinlock.h
25 * if spinlocks are empty and thus atomic_dec_and_lock is defined
26 * to be atomic_dec_and_test - in that case we don't need it
27 * defined here as well.
28 */ 23 */
29
30#ifndef ATOMIC_DEC_AND_LOCK
31int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock) 24int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock)
32{ 25{
33 int counter; 26 int counter;
@@ -52,4 +45,3 @@ int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock)
52} 45}
53 46
54EXPORT_SYMBOL(_atomic_dec_and_lock); 47EXPORT_SYMBOL(_atomic_dec_and_lock);
55#endif /* ATOMIC_DEC_AND_LOCK */
diff --git a/arch/ppc64/lib/locks.c b/arch/ppc64/lib/locks.c
index ef70ef91abe2..033643ab69e0 100644
--- a/arch/ppc64/lib/locks.c
+++ b/arch/ppc64/lib/locks.c
@@ -23,12 +23,12 @@
23/* waiting for a spinlock... */ 23/* waiting for a spinlock... */
24#if defined(CONFIG_PPC_SPLPAR) || defined(CONFIG_PPC_ISERIES) 24#if defined(CONFIG_PPC_SPLPAR) || defined(CONFIG_PPC_ISERIES)
25 25
26void __spin_yield(spinlock_t *lock) 26void __spin_yield(raw_spinlock_t *lock)
27{ 27{
28 unsigned int lock_value, holder_cpu, yield_count; 28 unsigned int lock_value, holder_cpu, yield_count;
29 struct paca_struct *holder_paca; 29 struct paca_struct *holder_paca;
30 30
31 lock_value = lock->lock; 31 lock_value = lock->slock;
32 if (lock_value == 0) 32 if (lock_value == 0)
33 return; 33 return;
34 holder_cpu = lock_value & 0xffff; 34 holder_cpu = lock_value & 0xffff;
@@ -38,7 +38,7 @@ void __spin_yield(spinlock_t *lock)
38 if ((yield_count & 1) == 0) 38 if ((yield_count & 1) == 0)
39 return; /* virtual cpu is currently running */ 39 return; /* virtual cpu is currently running */
40 rmb(); 40 rmb();
41 if (lock->lock != lock_value) 41 if (lock->slock != lock_value)
42 return; /* something has changed */ 42 return; /* something has changed */
43#ifdef CONFIG_PPC_ISERIES 43#ifdef CONFIG_PPC_ISERIES
44 HvCall2(HvCallBaseYieldProcessor, HvCall_YieldToProc, 44 HvCall2(HvCallBaseYieldProcessor, HvCall_YieldToProc,
@@ -54,7 +54,7 @@ void __spin_yield(spinlock_t *lock)
54 * This turns out to be the same for read and write locks, since 54 * This turns out to be the same for read and write locks, since
55 * we only know the holder if it is write-locked. 55 * we only know the holder if it is write-locked.
56 */ 56 */
57void __rw_yield(rwlock_t *rw) 57void __rw_yield(raw_rwlock_t *rw)
58{ 58{
59 int lock_value; 59 int lock_value;
60 unsigned int holder_cpu, yield_count; 60 unsigned int holder_cpu, yield_count;
@@ -82,9 +82,9 @@ void __rw_yield(rwlock_t *rw)
82} 82}
83#endif 83#endif
84 84
85void spin_unlock_wait(spinlock_t *lock) 85void __raw_spin_unlock_wait(raw_spinlock_t *lock)
86{ 86{
87 while (lock->lock) { 87 while (lock->slock) {
88 HMT_low(); 88 HMT_low();
89 if (SHARED_PROCESSOR) 89 if (SHARED_PROCESSOR)
90 __spin_yield(lock); 90 __spin_yield(lock);
@@ -92,4 +92,4 @@ void spin_unlock_wait(spinlock_t *lock)
92 HMT_medium(); 92 HMT_medium();
93} 93}
94 94
95EXPORT_SYMBOL(spin_unlock_wait); 95EXPORT_SYMBOL(__raw_spin_unlock_wait);
diff --git a/arch/ppc64/mm/fault.c b/arch/ppc64/mm/fault.c
index 20b0f37e8bf8..7fbc68bbb739 100644
--- a/arch/ppc64/mm/fault.c
+++ b/arch/ppc64/mm/fault.c
@@ -29,6 +29,7 @@
29#include <linux/interrupt.h> 29#include <linux/interrupt.h>
30#include <linux/smp_lock.h> 30#include <linux/smp_lock.h>
31#include <linux/module.h> 31#include <linux/module.h>
32#include <linux/kprobes.h>
32 33
33#include <asm/page.h> 34#include <asm/page.h>
34#include <asm/pgtable.h> 35#include <asm/pgtable.h>
@@ -76,6 +77,28 @@ static int store_updates_sp(struct pt_regs *regs)
76 return 0; 77 return 0;
77} 78}
78 79
80static void do_dabr(struct pt_regs *regs, unsigned long error_code)
81{
82 siginfo_t info;
83
84 if (notify_die(DIE_DABR_MATCH, "dabr_match", regs, error_code,
85 11, SIGSEGV) == NOTIFY_STOP)
86 return;
87
88 if (debugger_dabr_match(regs))
89 return;
90
91 /* Clear the DABR */
92 set_dabr(0);
93
94 /* Deliver the signal to userspace */
95 info.si_signo = SIGTRAP;
96 info.si_errno = 0;
97 info.si_code = TRAP_HWBKPT;
98 info.si_addr = (void __user *)regs->nip;
99 force_sig_info(SIGTRAP, &info, current);
100}
101
79/* 102/*
80 * The error_code parameter is 103 * The error_code parameter is
81 * - DSISR for a non-SLB data access fault, 104 * - DSISR for a non-SLB data access fault,
@@ -84,8 +107,8 @@ static int store_updates_sp(struct pt_regs *regs)
84 * The return value is 0 if the fault was handled, or the signal 107 * The return value is 0 if the fault was handled, or the signal
85 * number if this is a kernel fault that can't be handled here. 108 * number if this is a kernel fault that can't be handled here.
86 */ 109 */
87int do_page_fault(struct pt_regs *regs, unsigned long address, 110int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
88 unsigned long error_code) 111 unsigned long error_code)
89{ 112{
90 struct vm_area_struct * vma; 113 struct vm_area_struct * vma;
91 struct mm_struct *mm = current->mm; 114 struct mm_struct *mm = current->mm;
@@ -110,12 +133,9 @@ int do_page_fault(struct pt_regs *regs, unsigned long address,
110 if (!user_mode(regs) && (address >= TASK_SIZE)) 133 if (!user_mode(regs) && (address >= TASK_SIZE))
111 return SIGSEGV; 134 return SIGSEGV;
112 135
113 if (error_code & DSISR_DABRMATCH) { 136 if (error_code & DSISR_DABRMATCH) {
114 if (notify_die(DIE_DABR_MATCH, "dabr_match", regs, error_code, 137 do_dabr(regs, error_code);
115 11, SIGSEGV) == NOTIFY_STOP) 138 return 0;
116 return 0;
117 if (debugger_dabr_match(regs))
118 return 0;
119 } 139 }
120 140
121 if (in_atomic() || mm == NULL) { 141 if (in_atomic() || mm == NULL) {
diff --git a/arch/ppc64/mm/hash_low.S b/arch/ppc64/mm/hash_low.S
index fbff24827ae7..ee5a5d36bfa8 100644
--- a/arch/ppc64/mm/hash_low.S
+++ b/arch/ppc64/mm/hash_low.S
@@ -16,7 +16,7 @@
16#include <asm/page.h> 16#include <asm/page.h>
17#include <asm/types.h> 17#include <asm/types.h>
18#include <asm/ppc_asm.h> 18#include <asm/ppc_asm.h>
19#include <asm/offsets.h> 19#include <asm/asm-offsets.h>
20#include <asm/cputable.h> 20#include <asm/cputable.h>
21 21
22 .text 22 .text
@@ -129,12 +129,10 @@ _GLOBAL(__hash_page)
129 * code rather than call a C function...) 129 * code rather than call a C function...)
130 */ 130 */
131BEGIN_FTR_SECTION 131BEGIN_FTR_SECTION
132BEGIN_FTR_SECTION
133 mr r4,r30 132 mr r4,r30
134 mr r5,r7 133 mr r5,r7
135 bl .hash_page_do_lazy_icache 134 bl .hash_page_do_lazy_icache
136END_FTR_SECTION_IFSET(CPU_FTR_NOEXECUTE) 135END_FTR_SECTION(CPU_FTR_NOEXECUTE|CPU_FTR_COHERENT_ICACHE, CPU_FTR_NOEXECUTE)
137END_FTR_SECTION_IFCLR(CPU_FTR_COHERENT_ICACHE)
138 136
139 /* At this point, r3 contains new PP bits, save them in 137 /* At this point, r3 contains new PP bits, save them in
140 * place of "access" in the param area (sic) 138 * place of "access" in the param area (sic)
diff --git a/arch/ppc64/mm/hash_native.c b/arch/ppc64/mm/hash_native.c
index a6abd3a979bf..7626bb59954d 100644
--- a/arch/ppc64/mm/hash_native.c
+++ b/arch/ppc64/mm/hash_native.c
@@ -51,7 +51,6 @@ long native_hpte_insert(unsigned long hpte_group, unsigned long va,
51 unsigned long prpn, unsigned long vflags, 51 unsigned long prpn, unsigned long vflags,
52 unsigned long rflags) 52 unsigned long rflags)
53{ 53{
54 unsigned long arpn = physRpn_to_absRpn(prpn);
55 hpte_t *hptep = htab_address + hpte_group; 54 hpte_t *hptep = htab_address + hpte_group;
56 unsigned long hpte_v, hpte_r; 55 unsigned long hpte_v, hpte_r;
57 int i; 56 int i;
@@ -74,7 +73,7 @@ long native_hpte_insert(unsigned long hpte_group, unsigned long va,
74 hpte_v = (va >> 23) << HPTE_V_AVPN_SHIFT | vflags | HPTE_V_VALID; 73 hpte_v = (va >> 23) << HPTE_V_AVPN_SHIFT | vflags | HPTE_V_VALID;
75 if (vflags & HPTE_V_LARGE) 74 if (vflags & HPTE_V_LARGE)
76 va &= ~(1UL << HPTE_V_AVPN_SHIFT); 75 va &= ~(1UL << HPTE_V_AVPN_SHIFT);
77 hpte_r = (arpn << HPTE_R_RPN_SHIFT) | rflags; 76 hpte_r = (prpn << HPTE_R_RPN_SHIFT) | rflags;
78 77
79 hptep->r = hpte_r; 78 hptep->r = hpte_r;
80 /* Guarantee the second dword is visible before the valid bit */ 79 /* Guarantee the second dword is visible before the valid bit */
diff --git a/arch/ppc64/mm/hash_utils.c b/arch/ppc64/mm/hash_utils.c
index 623b5d130c31..09475c8edf7c 100644
--- a/arch/ppc64/mm/hash_utils.c
+++ b/arch/ppc64/mm/hash_utils.c
@@ -210,7 +210,7 @@ void __init htab_initialize(void)
210 210
211 /* create bolted the linear mapping in the hash table */ 211 /* create bolted the linear mapping in the hash table */
212 for (i=0; i < lmb.memory.cnt; i++) { 212 for (i=0; i < lmb.memory.cnt; i++) {
213 base = lmb.memory.region[i].physbase + KERNELBASE; 213 base = lmb.memory.region[i].base + KERNELBASE;
214 size = lmb.memory.region[i].size; 214 size = lmb.memory.region[i].size;
215 215
216 DBG("creating mapping for region: %lx : %lx\n", base, size); 216 DBG("creating mapping for region: %lx : %lx\n", base, size);
@@ -302,7 +302,7 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap)
302 int local = 0; 302 int local = 0;
303 cpumask_t tmp; 303 cpumask_t tmp;
304 304
305 if ((ea & ~REGION_MASK) > EADDR_MASK) 305 if ((ea & ~REGION_MASK) >= PGTABLE_RANGE)
306 return 1; 306 return 1;
307 307
308 switch (REGION_ID(ea)) { 308 switch (REGION_ID(ea)) {
diff --git a/arch/ppc64/mm/hugetlbpage.c b/arch/ppc64/mm/hugetlbpage.c
index f9524602818d..338771ec70d7 100644
--- a/arch/ppc64/mm/hugetlbpage.c
+++ b/arch/ppc64/mm/hugetlbpage.c
@@ -27,124 +27,94 @@
27 27
28#include <linux/sysctl.h> 28#include <linux/sysctl.h>
29 29
30#define HUGEPGDIR_SHIFT (HPAGE_SHIFT + PAGE_SHIFT - 3) 30#define NUM_LOW_AREAS (0x100000000UL >> SID_SHIFT)
31#define HUGEPGDIR_SIZE (1UL << HUGEPGDIR_SHIFT) 31#define NUM_HIGH_AREAS (PGTABLE_RANGE >> HTLB_AREA_SHIFT)
32#define HUGEPGDIR_MASK (~(HUGEPGDIR_SIZE-1))
33 32
34#define HUGEPTE_INDEX_SIZE 9 33/* Modelled after find_linux_pte() */
35#define HUGEPGD_INDEX_SIZE 10 34pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
36
37#define PTRS_PER_HUGEPTE (1 << HUGEPTE_INDEX_SIZE)
38#define PTRS_PER_HUGEPGD (1 << HUGEPGD_INDEX_SIZE)
39
40static inline int hugepgd_index(unsigned long addr)
41{
42 return (addr & ~REGION_MASK) >> HUGEPGDIR_SHIFT;
43}
44
45static pud_t *hugepgd_offset(struct mm_struct *mm, unsigned long addr)
46{ 35{
47 int index; 36 pgd_t *pg;
37 pud_t *pu;
38 pmd_t *pm;
39 pte_t *pt;
48 40
49 if (! mm->context.huge_pgdir) 41 BUG_ON(! in_hugepage_area(mm->context, addr));
50 return NULL;
51 42
43 addr &= HPAGE_MASK;
44
45 pg = pgd_offset(mm, addr);
46 if (!pgd_none(*pg)) {
47 pu = pud_offset(pg, addr);
48 if (!pud_none(*pu)) {
49 pm = pmd_offset(pu, addr);
50 pt = (pte_t *)pm;
51 BUG_ON(!pmd_none(*pm)
52 && !(pte_present(*pt) && pte_huge(*pt)));
53 return pt;
54 }
55 }
52 56
53 index = hugepgd_index(addr); 57 return NULL;
54 BUG_ON(index >= PTRS_PER_HUGEPGD);
55 return (pud_t *)(mm->context.huge_pgdir + index);
56} 58}
57 59
58static inline pte_t *hugepte_offset(pud_t *dir, unsigned long addr) 60pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr)
59{ 61{
60 int index; 62 pgd_t *pg;
61 63 pud_t *pu;
62 if (pud_none(*dir)) 64 pmd_t *pm;
63 return NULL; 65 pte_t *pt;
64 66
65 index = (addr >> HPAGE_SHIFT) % PTRS_PER_HUGEPTE;
66 return (pte_t *)pud_page(*dir) + index;
67}
68
69static pud_t *hugepgd_alloc(struct mm_struct *mm, unsigned long addr)
70{
71 BUG_ON(! in_hugepage_area(mm->context, addr)); 67 BUG_ON(! in_hugepage_area(mm->context, addr));
72 68
73 if (! mm->context.huge_pgdir) { 69 addr &= HPAGE_MASK;
74 pgd_t *new;
75 spin_unlock(&mm->page_table_lock);
76 /* Don't use pgd_alloc(), because we want __GFP_REPEAT */
77 new = kmem_cache_alloc(zero_cache, GFP_KERNEL | __GFP_REPEAT);
78 BUG_ON(memcmp(new, empty_zero_page, PAGE_SIZE));
79 spin_lock(&mm->page_table_lock);
80 70
81 /* 71 pg = pgd_offset(mm, addr);
82 * Because we dropped the lock, we should re-check the 72 pu = pud_alloc(mm, pg, addr);
83 * entry, as somebody else could have populated it..
84 */
85 if (mm->context.huge_pgdir)
86 pgd_free(new);
87 else
88 mm->context.huge_pgdir = new;
89 }
90 return hugepgd_offset(mm, addr);
91}
92 73
93static pte_t *hugepte_alloc(struct mm_struct *mm, pud_t *dir, unsigned long addr) 74 if (pu) {
94{ 75 pm = pmd_alloc(mm, pu, addr);
95 if (! pud_present(*dir)) { 76 if (pm) {
96 pte_t *new; 77 pt = (pte_t *)pm;
97 78 BUG_ON(!pmd_none(*pm)
98 spin_unlock(&mm->page_table_lock); 79 && !(pte_present(*pt) && pte_huge(*pt)));
99 new = kmem_cache_alloc(zero_cache, GFP_KERNEL | __GFP_REPEAT); 80 return pt;
100 BUG_ON(memcmp(new, empty_zero_page, PAGE_SIZE));
101 spin_lock(&mm->page_table_lock);
102 /*
103 * Because we dropped the lock, we should re-check the
104 * entry, as somebody else could have populated it..
105 */
106 if (pud_present(*dir)) {
107 if (new)
108 kmem_cache_free(zero_cache, new);
109 } else {
110 struct page *ptepage;
111
112 if (! new)
113 return NULL;
114 ptepage = virt_to_page(new);
115 ptepage->mapping = (void *) mm;
116 ptepage->index = addr & HUGEPGDIR_MASK;
117 pud_populate(mm, dir, new);
118 } 81 }
119 } 82 }
120 83
121 return hugepte_offset(dir, addr); 84 return NULL;
122} 85}
123 86
124pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr) 87#define HUGEPTE_BATCH_SIZE (HPAGE_SIZE / PMD_SIZE)
125{
126 pud_t *pud;
127 88
128 BUG_ON(! in_hugepage_area(mm->context, addr)); 89void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
90 pte_t *ptep, pte_t pte)
91{
92 int i;
129 93
130 pud = hugepgd_offset(mm, addr); 94 if (pte_present(*ptep)) {
131 if (! pud) 95 pte_clear(mm, addr, ptep);
132 return NULL; 96 flush_tlb_pending();
97 }
133 98
134 return hugepte_offset(pud, addr); 99 for (i = 0; i < HUGEPTE_BATCH_SIZE; i++) {
100 *ptep = __pte(pte_val(pte) & ~_PAGE_HPTEFLAGS);
101 ptep++;
102 }
135} 103}
136 104
137pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr) 105pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
106 pte_t *ptep)
138{ 107{
139 pud_t *pud; 108 unsigned long old = pte_update(ptep, ~0UL);
109 int i;
140 110
141 BUG_ON(! in_hugepage_area(mm->context, addr)); 111 if (old & _PAGE_HASHPTE)
112 hpte_update(mm, addr, old, 0);
142 113
143 pud = hugepgd_alloc(mm, addr); 114 for (i = 1; i < HUGEPTE_BATCH_SIZE; i++)
144 if (! pud) 115 ptep[i] = __pte(0);
145 return NULL;
146 116
147 return hugepte_alloc(mm, pud, addr); 117 return __pte(old);
148} 118}
149 119
150/* 120/*
@@ -162,29 +132,53 @@ int is_aligned_hugepage_range(unsigned long addr, unsigned long len)
162 return 0; 132 return 0;
163} 133}
164 134
165static void flush_segments(void *parm) 135static void flush_low_segments(void *parm)
166{ 136{
167 u16 segs = (unsigned long) parm; 137 u16 areas = (unsigned long) parm;
168 unsigned long i; 138 unsigned long i;
169 139
170 asm volatile("isync" : : : "memory"); 140 asm volatile("isync" : : : "memory");
171 141
172 for (i = 0; i < 16; i++) { 142 BUILD_BUG_ON((sizeof(areas)*8) != NUM_LOW_AREAS);
173 if (! (segs & (1U << i))) 143
144 for (i = 0; i < NUM_LOW_AREAS; i++) {
145 if (! (areas & (1U << i)))
174 continue; 146 continue;
175 asm volatile("slbie %0" : : "r" (i << SID_SHIFT)); 147 asm volatile("slbie %0"
148 : : "r" ((i << SID_SHIFT) | SLBIE_C));
176 } 149 }
177 150
178 asm volatile("isync" : : : "memory"); 151 asm volatile("isync" : : : "memory");
179} 152}
180 153
181static int prepare_low_seg_for_htlb(struct mm_struct *mm, unsigned long seg) 154static void flush_high_segments(void *parm)
182{ 155{
183 unsigned long start = seg << SID_SHIFT; 156 u16 areas = (unsigned long) parm;
184 unsigned long end = (seg+1) << SID_SHIFT; 157 unsigned long i, j;
158
159 asm volatile("isync" : : : "memory");
160
161 BUILD_BUG_ON((sizeof(areas)*8) != NUM_HIGH_AREAS);
162
163 for (i = 0; i < NUM_HIGH_AREAS; i++) {
164 if (! (areas & (1U << i)))
165 continue;
166 for (j = 0; j < (1UL << (HTLB_AREA_SHIFT-SID_SHIFT)); j++)
167 asm volatile("slbie %0"
168 :: "r" (((i << HTLB_AREA_SHIFT)
169 + (j << SID_SHIFT)) | SLBIE_C));
170 }
171
172 asm volatile("isync" : : : "memory");
173}
174
175static int prepare_low_area_for_htlb(struct mm_struct *mm, unsigned long area)
176{
177 unsigned long start = area << SID_SHIFT;
178 unsigned long end = (area+1) << SID_SHIFT;
185 struct vm_area_struct *vma; 179 struct vm_area_struct *vma;
186 180
187 BUG_ON(seg >= 16); 181 BUG_ON(area >= NUM_LOW_AREAS);
188 182
189 /* Check no VMAs are in the region */ 183 /* Check no VMAs are in the region */
190 vma = find_vma(mm, start); 184 vma = find_vma(mm, start);
@@ -194,20 +188,39 @@ static int prepare_low_seg_for_htlb(struct mm_struct *mm, unsigned long seg)
194 return 0; 188 return 0;
195} 189}
196 190
197static int open_low_hpage_segs(struct mm_struct *mm, u16 newsegs) 191static int prepare_high_area_for_htlb(struct mm_struct *mm, unsigned long area)
192{
193 unsigned long start = area << HTLB_AREA_SHIFT;
194 unsigned long end = (area+1) << HTLB_AREA_SHIFT;
195 struct vm_area_struct *vma;
196
197 BUG_ON(area >= NUM_HIGH_AREAS);
198
199 /* Check no VMAs are in the region */
200 vma = find_vma(mm, start);
201 if (vma && (vma->vm_start < end))
202 return -EBUSY;
203
204 return 0;
205}
206
207static int open_low_hpage_areas(struct mm_struct *mm, u16 newareas)
198{ 208{
199 unsigned long i; 209 unsigned long i;
200 210
201 newsegs &= ~(mm->context.htlb_segs); 211 BUILD_BUG_ON((sizeof(newareas)*8) != NUM_LOW_AREAS);
202 if (! newsegs) 212 BUILD_BUG_ON((sizeof(mm->context.low_htlb_areas)*8) != NUM_LOW_AREAS);
213
214 newareas &= ~(mm->context.low_htlb_areas);
215 if (! newareas)
203 return 0; /* The segments we want are already open */ 216 return 0; /* The segments we want are already open */
204 217
205 for (i = 0; i < 16; i++) 218 for (i = 0; i < NUM_LOW_AREAS; i++)
206 if ((1 << i) & newsegs) 219 if ((1 << i) & newareas)
207 if (prepare_low_seg_for_htlb(mm, i) != 0) 220 if (prepare_low_area_for_htlb(mm, i) != 0)
208 return -EBUSY; 221 return -EBUSY;
209 222
210 mm->context.htlb_segs |= newsegs; 223 mm->context.low_htlb_areas |= newareas;
211 224
212 /* update the paca copy of the context struct */ 225 /* update the paca copy of the context struct */
213 get_paca()->context = mm->context; 226 get_paca()->context = mm->context;
@@ -215,29 +228,63 @@ static int open_low_hpage_segs(struct mm_struct *mm, u16 newsegs)
215 /* the context change must make it to memory before the flush, 228 /* the context change must make it to memory before the flush,
216 * so that further SLB misses do the right thing. */ 229 * so that further SLB misses do the right thing. */
217 mb(); 230 mb();
218 on_each_cpu(flush_segments, (void *)(unsigned long)newsegs, 0, 1); 231 on_each_cpu(flush_low_segments, (void *)(unsigned long)newareas, 0, 1);
232
233 return 0;
234}
235
236static int open_high_hpage_areas(struct mm_struct *mm, u16 newareas)
237{
238 unsigned long i;
239
240 BUILD_BUG_ON((sizeof(newareas)*8) != NUM_HIGH_AREAS);
241 BUILD_BUG_ON((sizeof(mm->context.high_htlb_areas)*8)
242 != NUM_HIGH_AREAS);
243
244 newareas &= ~(mm->context.high_htlb_areas);
245 if (! newareas)
246 return 0; /* The areas we want are already open */
247
248 for (i = 0; i < NUM_HIGH_AREAS; i++)
249 if ((1 << i) & newareas)
250 if (prepare_high_area_for_htlb(mm, i) != 0)
251 return -EBUSY;
252
253 mm->context.high_htlb_areas |= newareas;
254
255 /* update the paca copy of the context struct */
256 get_paca()->context = mm->context;
257
258 /* the context change must make it to memory before the flush,
259 * so that further SLB misses do the right thing. */
260 mb();
261 on_each_cpu(flush_high_segments, (void *)(unsigned long)newareas, 0, 1);
219 262
220 return 0; 263 return 0;
221} 264}
222 265
223int prepare_hugepage_range(unsigned long addr, unsigned long len) 266int prepare_hugepage_range(unsigned long addr, unsigned long len)
224{ 267{
225 if (within_hugepage_high_range(addr, len)) 268 int err;
226 return 0; 269
227 else if ((addr < 0x100000000UL) && ((addr+len) < 0x100000000UL)) { 270 if ( (addr+len) < addr )
228 int err; 271 return -EINVAL;
229 /* Yes, we need both tests, in case addr+len overflows 272
230 * 64-bit arithmetic */ 273 if ((addr + len) < 0x100000000UL)
231 err = open_low_hpage_segs(current->mm, 274 err = open_low_hpage_areas(current->mm,
232 LOW_ESID_MASK(addr, len)); 275 LOW_ESID_MASK(addr, len));
233 if (err) 276 else
234 printk(KERN_DEBUG "prepare_hugepage_range(%lx, %lx)" 277 err = open_high_hpage_areas(current->mm,
235 " failed (segs: 0x%04hx)\n", addr, len, 278 HTLB_AREA_MASK(addr, len));
236 LOW_ESID_MASK(addr, len)); 279 if (err) {
280 printk(KERN_DEBUG "prepare_hugepage_range(%lx, %lx)"
281 " failed (lowmask: 0x%04hx, highmask: 0x%04hx)\n",
282 addr, len,
283 LOW_ESID_MASK(addr, len), HTLB_AREA_MASK(addr, len));
237 return err; 284 return err;
238 } 285 }
239 286
240 return -EINVAL; 287 return 0;
241} 288}
242 289
243struct page * 290struct page *
@@ -309,8 +356,8 @@ full_search:
309 vma = find_vma(mm, addr); 356 vma = find_vma(mm, addr);
310 continue; 357 continue;
311 } 358 }
312 if (touches_hugepage_high_range(addr, len)) { 359 if (touches_hugepage_high_range(mm, addr, len)) {
313 addr = TASK_HPAGE_END; 360 addr = ALIGN(addr+1, 1UL<<HTLB_AREA_SHIFT);
314 vma = find_vma(mm, addr); 361 vma = find_vma(mm, addr);
315 continue; 362 continue;
316 } 363 }
@@ -389,8 +436,9 @@ hugepage_recheck:
389 if (touches_hugepage_low_range(mm, addr, len)) { 436 if (touches_hugepage_low_range(mm, addr, len)) {
390 addr = (addr & ((~0) << SID_SHIFT)) - len; 437 addr = (addr & ((~0) << SID_SHIFT)) - len;
391 goto hugepage_recheck; 438 goto hugepage_recheck;
392 } else if (touches_hugepage_high_range(addr, len)) { 439 } else if (touches_hugepage_high_range(mm, addr, len)) {
393 addr = TASK_HPAGE_BASE - len; 440 addr = (addr & ((~0UL) << HTLB_AREA_SHIFT)) - len;
441 goto hugepage_recheck;
394 } 442 }
395 443
396 /* 444 /*
@@ -481,23 +529,28 @@ static unsigned long htlb_get_low_area(unsigned long len, u16 segmask)
481 return -ENOMEM; 529 return -ENOMEM;
482} 530}
483 531
484static unsigned long htlb_get_high_area(unsigned long len) 532static unsigned long htlb_get_high_area(unsigned long len, u16 areamask)
485{ 533{
486 unsigned long addr = TASK_HPAGE_BASE; 534 unsigned long addr = 0x100000000UL;
487 struct vm_area_struct *vma; 535 struct vm_area_struct *vma;
488 536
489 vma = find_vma(current->mm, addr); 537 vma = find_vma(current->mm, addr);
490 for (vma = find_vma(current->mm, addr); 538 while (addr + len <= TASK_SIZE_USER64) {
491 addr + len <= TASK_HPAGE_END;
492 vma = vma->vm_next) {
493 BUG_ON(vma && (addr >= vma->vm_end)); /* invariant */ 539 BUG_ON(vma && (addr >= vma->vm_end)); /* invariant */
494 BUG_ON(! within_hugepage_high_range(addr, len)); 540
541 if (! __within_hugepage_high_range(addr, len, areamask)) {
542 addr = ALIGN(addr+1, 1UL<<HTLB_AREA_SHIFT);
543 vma = find_vma(current->mm, addr);
544 continue;
545 }
495 546
496 if (!vma || (addr + len) <= vma->vm_start) 547 if (!vma || (addr + len) <= vma->vm_start)
497 return addr; 548 return addr;
498 addr = ALIGN(vma->vm_end, HPAGE_SIZE); 549 addr = ALIGN(vma->vm_end, HPAGE_SIZE);
499 /* Because we're in a hugepage region, this alignment 550 /* Depending on segmask this might not be a confirmed
500 * should not skip us over any VMAs */ 551 * hugepage region, so the ALIGN could have skipped
552 * some VMAs */
553 vma = find_vma(current->mm, addr);
501 } 554 }
502 555
503 return -ENOMEM; 556 return -ENOMEM;
@@ -507,6 +560,9 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
507 unsigned long len, unsigned long pgoff, 560 unsigned long len, unsigned long pgoff,
508 unsigned long flags) 561 unsigned long flags)
509{ 562{
563 int lastshift;
564 u16 areamask, curareas;
565
510 if (len & ~HPAGE_MASK) 566 if (len & ~HPAGE_MASK)
511 return -EINVAL; 567 return -EINVAL;
512 568
@@ -514,67 +570,49 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
514 return -EINVAL; 570 return -EINVAL;
515 571
516 if (test_thread_flag(TIF_32BIT)) { 572 if (test_thread_flag(TIF_32BIT)) {
517 int lastshift = 0; 573 curareas = current->mm->context.low_htlb_areas;
518 u16 segmask, cursegs = current->mm->context.htlb_segs;
519 574
520 /* First see if we can do the mapping in the existing 575 /* First see if we can do the mapping in the existing
521 * low hpage segments */ 576 * low areas */
522 addr = htlb_get_low_area(len, cursegs); 577 addr = htlb_get_low_area(len, curareas);
523 if (addr != -ENOMEM) 578 if (addr != -ENOMEM)
524 return addr; 579 return addr;
525 580
526 for (segmask = LOW_ESID_MASK(0x100000000UL-len, len); 581 lastshift = 0;
527 ! lastshift; segmask >>=1) { 582 for (areamask = LOW_ESID_MASK(0x100000000UL-len, len);
528 if (segmask & 1) 583 ! lastshift; areamask >>=1) {
584 if (areamask & 1)
529 lastshift = 1; 585 lastshift = 1;
530 586
531 addr = htlb_get_low_area(len, cursegs | segmask); 587 addr = htlb_get_low_area(len, curareas | areamask);
532 if ((addr != -ENOMEM) 588 if ((addr != -ENOMEM)
533 && open_low_hpage_segs(current->mm, segmask) == 0) 589 && open_low_hpage_areas(current->mm, areamask) == 0)
534 return addr; 590 return addr;
535 } 591 }
536 printk(KERN_DEBUG "hugetlb_get_unmapped_area() unable to open"
537 " enough segments\n");
538 return -ENOMEM;
539 } else { 592 } else {
540 return htlb_get_high_area(len); 593 curareas = current->mm->context.high_htlb_areas;
541 }
542}
543
544void hugetlb_mm_free_pgd(struct mm_struct *mm)
545{
546 int i;
547 pgd_t *pgdir;
548
549 spin_lock(&mm->page_table_lock);
550
551 pgdir = mm->context.huge_pgdir;
552 if (! pgdir)
553 goto out;
554
555 mm->context.huge_pgdir = NULL;
556 594
557 /* cleanup any hugepte pages leftover */ 595 /* First see if we can do the mapping in the existing
558 for (i = 0; i < PTRS_PER_HUGEPGD; i++) { 596 * high areas */
559 pud_t *pud = (pud_t *)(pgdir + i); 597 addr = htlb_get_high_area(len, curareas);
560 598 if (addr != -ENOMEM)
561 if (! pud_none(*pud)) { 599 return addr;
562 pte_t *pte = (pte_t *)pud_page(*pud);
563 struct page *ptepage = virt_to_page(pte);
564 600
565 ptepage->mapping = NULL; 601 lastshift = 0;
602 for (areamask = HTLB_AREA_MASK(TASK_SIZE_USER64-len, len);
603 ! lastshift; areamask >>=1) {
604 if (areamask & 1)
605 lastshift = 1;
566 606
567 BUG_ON(memcmp(pte, empty_zero_page, PAGE_SIZE)); 607 addr = htlb_get_high_area(len, curareas | areamask);
568 kmem_cache_free(zero_cache, pte); 608 if ((addr != -ENOMEM)
609 && open_high_hpage_areas(current->mm, areamask) == 0)
610 return addr;
569 } 611 }
570 pud_clear(pud);
571 } 612 }
572 613 printk(KERN_DEBUG "hugetlb_get_unmapped_area() unable to open"
573 BUG_ON(memcmp(pgdir, empty_zero_page, PAGE_SIZE)); 614 " enough areas\n");
574 kmem_cache_free(zero_cache, pgdir); 615 return -ENOMEM;
575
576 out:
577 spin_unlock(&mm->page_table_lock);
578} 616}
579 617
580int hash_huge_page(struct mm_struct *mm, unsigned long access, 618int hash_huge_page(struct mm_struct *mm, unsigned long access,
diff --git a/arch/ppc64/mm/imalloc.c b/arch/ppc64/mm/imalloc.c
index b6e75b891ac0..c65b87b92756 100644
--- a/arch/ppc64/mm/imalloc.c
+++ b/arch/ppc64/mm/imalloc.c
@@ -31,7 +31,7 @@ static int get_free_im_addr(unsigned long size, unsigned long *im_addr)
31 break; 31 break;
32 if ((unsigned long)tmp->addr >= ioremap_bot) 32 if ((unsigned long)tmp->addr >= ioremap_bot)
33 addr = tmp->size + (unsigned long) tmp->addr; 33 addr = tmp->size + (unsigned long) tmp->addr;
34 if (addr > IMALLOC_END-size) 34 if (addr >= IMALLOC_END-size)
35 return 1; 35 return 1;
36 } 36 }
37 *im_addr = addr; 37 *im_addr = addr;
diff --git a/arch/ppc64/mm/init.c b/arch/ppc64/mm/init.c
index e58a24d42879..c2157c9c3acb 100644
--- a/arch/ppc64/mm/init.c
+++ b/arch/ppc64/mm/init.c
@@ -42,7 +42,6 @@
42 42
43#include <asm/pgalloc.h> 43#include <asm/pgalloc.h>
44#include <asm/page.h> 44#include <asm/page.h>
45#include <asm/abs_addr.h>
46#include <asm/prom.h> 45#include <asm/prom.h>
47#include <asm/lmb.h> 46#include <asm/lmb.h>
48#include <asm/rtas.h> 47#include <asm/rtas.h>
@@ -66,6 +65,14 @@
66#include <asm/vdso.h> 65#include <asm/vdso.h>
67#include <asm/imalloc.h> 66#include <asm/imalloc.h>
68 67
68#if PGTABLE_RANGE > USER_VSID_RANGE
69#warning Limited user VSID range means pagetable space is wasted
70#endif
71
72#if (TASK_SIZE_USER64 < PGTABLE_RANGE) && (TASK_SIZE_USER64 < USER_VSID_RANGE)
73#warning TASK_SIZE is smaller than it needs to be.
74#endif
75
69int mem_init_done; 76int mem_init_done;
70unsigned long ioremap_bot = IMALLOC_BASE; 77unsigned long ioremap_bot = IMALLOC_BASE;
71static unsigned long phbs_io_bot = PHBS_IO_BASE; 78static unsigned long phbs_io_bot = PHBS_IO_BASE;
@@ -159,7 +166,6 @@ static int map_io_page(unsigned long ea, unsigned long pa, int flags)
159 ptep = pte_alloc_kernel(&init_mm, pmdp, ea); 166 ptep = pte_alloc_kernel(&init_mm, pmdp, ea);
160 if (!ptep) 167 if (!ptep)
161 return -ENOMEM; 168 return -ENOMEM;
162 pa = abs_to_phys(pa);
163 set_pte_at(&init_mm, ea, ptep, pfn_pte(pa >> PAGE_SHIFT, 169 set_pte_at(&init_mm, ea, ptep, pfn_pte(pa >> PAGE_SHIFT,
164 __pgprot(flags))); 170 __pgprot(flags)));
165 spin_unlock(&init_mm.page_table_lock); 171 spin_unlock(&init_mm.page_table_lock);
@@ -226,7 +232,7 @@ void __iomem * __ioremap(unsigned long addr, unsigned long size,
226 * Before that, we map using addresses going 232 * Before that, we map using addresses going
227 * up from ioremap_bot. imalloc will use 233 * up from ioremap_bot. imalloc will use
228 * the addresses from ioremap_bot through 234 * the addresses from ioremap_bot through
229 * IMALLOC_END (0xE000001fffffffff) 235 * IMALLOC_END
230 * 236 *
231 */ 237 */
232 pa = addr & PAGE_MASK; 238 pa = addr & PAGE_MASK;
@@ -386,6 +392,7 @@ void free_initmem(void)
386 392
387 addr = (unsigned long)__init_begin; 393 addr = (unsigned long)__init_begin;
388 for (; addr < (unsigned long)__init_end; addr += PAGE_SIZE) { 394 for (; addr < (unsigned long)__init_end; addr += PAGE_SIZE) {
395 memset((void *)addr, 0xcc, PAGE_SIZE);
389 ClearPageReserved(virt_to_page(addr)); 396 ClearPageReserved(virt_to_page(addr));
390 set_page_count(virt_to_page(addr), 1); 397 set_page_count(virt_to_page(addr), 1);
391 free_page(addr); 398 free_page(addr);
@@ -417,12 +424,6 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
417 int index; 424 int index;
418 int err; 425 int err;
419 426
420#ifdef CONFIG_HUGETLB_PAGE
421 /* We leave htlb_segs as it was, but for a fork, we need to
422 * clear the huge_pgdir. */
423 mm->context.huge_pgdir = NULL;
424#endif
425
426again: 427again:
427 if (!idr_pre_get(&mmu_context_idr, GFP_KERNEL)) 428 if (!idr_pre_get(&mmu_context_idr, GFP_KERNEL))
428 return -ENOMEM; 429 return -ENOMEM;
@@ -453,8 +454,6 @@ void destroy_context(struct mm_struct *mm)
453 spin_unlock(&mmu_context_lock); 454 spin_unlock(&mmu_context_lock);
454 455
455 mm->context.id = NO_CONTEXT; 456 mm->context.id = NO_CONTEXT;
456
457 hugetlb_mm_free_pgd(mm);
458} 457}
459 458
460/* 459/*
@@ -484,9 +483,9 @@ void __init mm_init_ppc64(void)
484 for (i = 1; i < lmb.memory.cnt; i++) { 483 for (i = 1; i < lmb.memory.cnt; i++) {
485 unsigned long base, prevbase, prevsize; 484 unsigned long base, prevbase, prevsize;
486 485
487 prevbase = lmb.memory.region[i-1].physbase; 486 prevbase = lmb.memory.region[i-1].base;
488 prevsize = lmb.memory.region[i-1].size; 487 prevsize = lmb.memory.region[i-1].size;
489 base = lmb.memory.region[i].physbase; 488 base = lmb.memory.region[i].base;
490 if (base > (prevbase + prevsize)) { 489 if (base > (prevbase + prevsize)) {
491 io_hole_start = prevbase + prevsize; 490 io_hole_start = prevbase + prevsize;
492 io_hole_size = base - (prevbase + prevsize); 491 io_hole_size = base - (prevbase + prevsize);
@@ -513,11 +512,8 @@ int page_is_ram(unsigned long pfn)
513 for (i=0; i < lmb.memory.cnt; i++) { 512 for (i=0; i < lmb.memory.cnt; i++) {
514 unsigned long base; 513 unsigned long base;
515 514
516#ifdef CONFIG_MSCHUNKS
517 base = lmb.memory.region[i].physbase;
518#else
519 base = lmb.memory.region[i].base; 515 base = lmb.memory.region[i].base;
520#endif 516
521 if ((paddr >= base) && 517 if ((paddr >= base) &&
522 (paddr < (base + lmb.memory.region[i].size))) { 518 (paddr < (base + lmb.memory.region[i].size))) {
523 return 1; 519 return 1;
@@ -547,7 +543,7 @@ void __init do_init_bootmem(void)
547 */ 543 */
548 bootmap_pages = bootmem_bootmap_pages(total_pages); 544 bootmap_pages = bootmem_bootmap_pages(total_pages);
549 545
550 start = abs_to_phys(lmb_alloc(bootmap_pages<<PAGE_SHIFT, PAGE_SIZE)); 546 start = lmb_alloc(bootmap_pages<<PAGE_SHIFT, PAGE_SIZE);
551 BUG_ON(!start); 547 BUG_ON(!start);
552 548
553 boot_mapsize = init_bootmem(start >> PAGE_SHIFT, total_pages); 549 boot_mapsize = init_bootmem(start >> PAGE_SHIFT, total_pages);
@@ -557,27 +553,18 @@ void __init do_init_bootmem(void)
557 /* Add all physical memory to the bootmem map, mark each area 553 /* Add all physical memory to the bootmem map, mark each area
558 * present. 554 * present.
559 */ 555 */
560 for (i=0; i < lmb.memory.cnt; i++) { 556 for (i=0; i < lmb.memory.cnt; i++)
561 unsigned long physbase, size; 557 free_bootmem(lmb.memory.region[i].base,
562 unsigned long start_pfn, end_pfn; 558 lmb_size_bytes(&lmb.memory, i));
563
564 physbase = lmb.memory.region[i].physbase;
565 size = lmb.memory.region[i].size;
566
567 start_pfn = physbase >> PAGE_SHIFT;
568 end_pfn = start_pfn + (size >> PAGE_SHIFT);
569 memory_present(0, start_pfn, end_pfn);
570
571 free_bootmem(physbase, size);
572 }
573 559
574 /* reserve the sections we're already using */ 560 /* reserve the sections we're already using */
575 for (i=0; i < lmb.reserved.cnt; i++) { 561 for (i=0; i < lmb.reserved.cnt; i++)
576 unsigned long physbase = lmb.reserved.region[i].physbase; 562 reserve_bootmem(lmb.reserved.region[i].base,
577 unsigned long size = lmb.reserved.region[i].size; 563 lmb_size_bytes(&lmb.reserved, i));
578 564
579 reserve_bootmem(physbase, size); 565 for (i=0; i < lmb.memory.cnt; i++)
580 } 566 memory_present(0, lmb_start_pfn(&lmb.memory, i),
567 lmb_end_pfn(&lmb.memory, i));
581} 568}
582 569
583/* 570/*
@@ -615,10 +602,10 @@ static int __init setup_kcore(void)
615 int i; 602 int i;
616 603
617 for (i=0; i < lmb.memory.cnt; i++) { 604 for (i=0; i < lmb.memory.cnt; i++) {
618 unsigned long physbase, size; 605 unsigned long base, size;
619 struct kcore_list *kcore_mem; 606 struct kcore_list *kcore_mem;
620 607
621 physbase = lmb.memory.region[i].physbase; 608 base = lmb.memory.region[i].base;
622 size = lmb.memory.region[i].size; 609 size = lmb.memory.region[i].size;
623 610
624 /* GFP_ATOMIC to avoid might_sleep warnings during boot */ 611 /* GFP_ATOMIC to avoid might_sleep warnings during boot */
@@ -626,7 +613,7 @@ static int __init setup_kcore(void)
626 if (!kcore_mem) 613 if (!kcore_mem)
627 panic("mem_init: kmalloc failed\n"); 614 panic("mem_init: kmalloc failed\n");
628 615
629 kclist_add(kcore_mem, __va(physbase), size); 616 kclist_add(kcore_mem, __va(base), size);
630 } 617 }
631 618
632 kclist_add(&kcore_vmem, (void *)VMALLOC_START, VMALLOC_END-VMALLOC_START); 619 kclist_add(&kcore_vmem, (void *)VMALLOC_START, VMALLOC_END-VMALLOC_START);
@@ -686,9 +673,6 @@ void __init mem_init(void)
686 673
687 mem_init_done = 1; 674 mem_init_done = 1;
688 675
689#ifdef CONFIG_PPC_ISERIES
690 iommu_vio_init();
691#endif
692 /* Initialize the vDSO */ 676 /* Initialize the vDSO */
693 vdso_init(); 677 vdso_init();
694} 678}
@@ -833,23 +817,43 @@ void __iomem * reserve_phb_iospace(unsigned long size)
833 return virt_addr; 817 return virt_addr;
834} 818}
835 819
836kmem_cache_t *zero_cache; 820static void zero_ctor(void *addr, kmem_cache_t *cache, unsigned long flags)
837
838static void zero_ctor(void *pte, kmem_cache_t *cache, unsigned long flags)
839{ 821{
840 memset(pte, 0, PAGE_SIZE); 822 memset(addr, 0, kmem_cache_size(cache));
841} 823}
842 824
825static const int pgtable_cache_size[2] = {
826 PTE_TABLE_SIZE, PMD_TABLE_SIZE
827};
828static const char *pgtable_cache_name[ARRAY_SIZE(pgtable_cache_size)] = {
829 "pgd_pte_cache", "pud_pmd_cache",
830};
831
832kmem_cache_t *pgtable_cache[ARRAY_SIZE(pgtable_cache_size)];
833
843void pgtable_cache_init(void) 834void pgtable_cache_init(void)
844{ 835{
845 zero_cache = kmem_cache_create("zero", 836 int i;
846 PAGE_SIZE, 837
847 0, 838 BUILD_BUG_ON(PTE_TABLE_SIZE != pgtable_cache_size[PTE_CACHE_NUM]);
848 SLAB_HWCACHE_ALIGN | SLAB_MUST_HWCACHE_ALIGN, 839 BUILD_BUG_ON(PMD_TABLE_SIZE != pgtable_cache_size[PMD_CACHE_NUM]);
849 zero_ctor, 840 BUILD_BUG_ON(PUD_TABLE_SIZE != pgtable_cache_size[PUD_CACHE_NUM]);
850 NULL); 841 BUILD_BUG_ON(PGD_TABLE_SIZE != pgtable_cache_size[PGD_CACHE_NUM]);
851 if (!zero_cache) 842
852 panic("pgtable_cache_init(): could not create zero_cache!\n"); 843 for (i = 0; i < ARRAY_SIZE(pgtable_cache_size); i++) {
844 int size = pgtable_cache_size[i];
845 const char *name = pgtable_cache_name[i];
846
847 pgtable_cache[i] = kmem_cache_create(name,
848 size, size,
849 SLAB_HWCACHE_ALIGN
850 | SLAB_MUST_HWCACHE_ALIGN,
851 zero_ctor,
852 NULL);
853 if (! pgtable_cache[i])
854 panic("pgtable_cache_init(): could not create %s!\n",
855 name);
856 }
853} 857}
854 858
855pgprot_t phys_mem_access_prot(struct file *file, unsigned long addr, 859pgprot_t phys_mem_access_prot(struct file *file, unsigned long addr,
diff --git a/arch/ppc64/mm/numa.c b/arch/ppc64/mm/numa.c
index 0b191f2de016..cb864b8f2750 100644
--- a/arch/ppc64/mm/numa.c
+++ b/arch/ppc64/mm/numa.c
@@ -440,8 +440,6 @@ new_range:
440 for (i = start ; i < (start+size); i += MEMORY_INCREMENT) 440 for (i = start ; i < (start+size); i += MEMORY_INCREMENT)
441 numa_memory_lookup_table[i >> MEMORY_INCREMENT_SHIFT] = 441 numa_memory_lookup_table[i >> MEMORY_INCREMENT_SHIFT] =
442 numa_domain; 442 numa_domain;
443 memory_present(numa_domain, start >> PAGE_SHIFT,
444 (start + size) >> PAGE_SHIFT);
445 443
446 if (--ranges) 444 if (--ranges)
447 goto new_range; 445 goto new_range;
@@ -483,7 +481,6 @@ static void __init setup_nonnuma(void)
483 481
484 for (i = 0 ; i < top_of_ram; i += MEMORY_INCREMENT) 482 for (i = 0 ; i < top_of_ram; i += MEMORY_INCREMENT)
485 numa_memory_lookup_table[i >> MEMORY_INCREMENT_SHIFT] = 0; 483 numa_memory_lookup_table[i >> MEMORY_INCREMENT_SHIFT] = 0;
486 memory_present(0, 0, init_node_data[0].node_end_pfn);
487} 484}
488 485
489static void __init dump_numa_topology(void) 486static void __init dump_numa_topology(void)
@@ -671,7 +668,7 @@ new_range:
671 * Mark reserved regions on this node 668 * Mark reserved regions on this node
672 */ 669 */
673 for (i = 0; i < lmb.reserved.cnt; i++) { 670 for (i = 0; i < lmb.reserved.cnt; i++) {
674 unsigned long physbase = lmb.reserved.region[i].physbase; 671 unsigned long physbase = lmb.reserved.region[i].base;
675 unsigned long size = lmb.reserved.region[i].size; 672 unsigned long size = lmb.reserved.region[i].size;
676 673
677 if (pa_to_nid(physbase) != nid && 674 if (pa_to_nid(physbase) != nid &&
@@ -695,6 +692,46 @@ new_range:
695 size); 692 size);
696 } 693 }
697 } 694 }
695 /*
696 * This loop may look famaliar, but we have to do it again
697 * after marking our reserved memory to mark memory present
698 * for sparsemem.
699 */
700 addr_cells = get_mem_addr_cells();
701 size_cells = get_mem_size_cells();
702 memory = NULL;
703 while ((memory = of_find_node_by_type(memory, "memory")) != NULL) {
704 unsigned long mem_start, mem_size;
705 int numa_domain, ranges;
706 unsigned int *memcell_buf;
707 unsigned int len;
708
709 memcell_buf = (unsigned int *)get_property(memory, "reg", &len);
710 if (!memcell_buf || len <= 0)
711 continue;
712
713 ranges = memory->n_addrs; /* ranges in cell */
714new_range2:
715 mem_start = read_n_cells(addr_cells, &memcell_buf);
716 mem_size = read_n_cells(size_cells, &memcell_buf);
717 if (numa_enabled) {
718 numa_domain = of_node_numa_domain(memory);
719 if (numa_domain >= MAX_NUMNODES)
720 numa_domain = 0;
721 } else
722 numa_domain = 0;
723
724 if (numa_domain != nid)
725 continue;
726
727 mem_size = numa_enforce_memory_limit(mem_start, mem_size);
728 memory_present(numa_domain, mem_start >> PAGE_SHIFT,
729 (mem_start + mem_size) >> PAGE_SHIFT);
730
731 if (--ranges) /* process all ranges in cell */
732 goto new_range2;
733 }
734
698 } 735 }
699} 736}
700 737
diff --git a/arch/ppc64/mm/slb.c b/arch/ppc64/mm/slb.c
index 244150a0bc18..0473953f6a37 100644
--- a/arch/ppc64/mm/slb.c
+++ b/arch/ppc64/mm/slb.c
@@ -87,8 +87,8 @@ void switch_slb(struct task_struct *tsk, struct mm_struct *mm)
87 int i; 87 int i;
88 asm volatile("isync" : : : "memory"); 88 asm volatile("isync" : : : "memory");
89 for (i = 0; i < offset; i++) { 89 for (i = 0; i < offset; i++) {
90 esid_data = (unsigned long)get_paca()->slb_cache[i] 90 esid_data = ((unsigned long)get_paca()->slb_cache[i]
91 << SID_SHIFT; 91 << SID_SHIFT) | SLBIE_C;
92 asm volatile("slbie %0" : : "r" (esid_data)); 92 asm volatile("slbie %0" : : "r" (esid_data));
93 } 93 }
94 asm volatile("isync" : : : "memory"); 94 asm volatile("isync" : : : "memory");
diff --git a/arch/ppc64/mm/slb_low.S b/arch/ppc64/mm/slb_low.S
index 8379d678f70f..a3a03da503bc 100644
--- a/arch/ppc64/mm/slb_low.S
+++ b/arch/ppc64/mm/slb_low.S
@@ -21,7 +21,7 @@
21#include <asm/page.h> 21#include <asm/page.h>
22#include <asm/mmu.h> 22#include <asm/mmu.h>
23#include <asm/ppc_asm.h> 23#include <asm/ppc_asm.h>
24#include <asm/offsets.h> 24#include <asm/asm-offsets.h>
25#include <asm/cputable.h> 25#include <asm/cputable.h>
26 26
27/* void slb_allocate(unsigned long ea); 27/* void slb_allocate(unsigned long ea);
@@ -89,32 +89,29 @@ END_FTR_SECTION_IFSET(CPU_FTR_16M_PAGE)
89 b 9f 89 b 9f
90 90
910: /* user address: proto-VSID = context<<15 | ESID */ 910: /* user address: proto-VSID = context<<15 | ESID */
92 li r11,SLB_VSID_USER 92 srdi. r9,r3,USER_ESID_BITS
93
94 srdi. r9,r3,13
95 bne- 8f /* invalid ea bits set */ 93 bne- 8f /* invalid ea bits set */
96 94
97#ifdef CONFIG_HUGETLB_PAGE 95#ifdef CONFIG_HUGETLB_PAGE
98BEGIN_FTR_SECTION 96BEGIN_FTR_SECTION
99 /* check against the hugepage ranges */ 97 lhz r9,PACAHIGHHTLBAREAS(r13)
100 cmpldi r3,(TASK_HPAGE_END>>SID_SHIFT) 98 srdi r11,r3,(HTLB_AREA_SHIFT-SID_SHIFT)
101 bge 6f /* >= TASK_HPAGE_END */ 99 srd r9,r9,r11
102 cmpldi r3,(TASK_HPAGE_BASE>>SID_SHIFT) 100 lhz r11,PACALOWHTLBAREAS(r13)
103 bge 5f /* TASK_HPAGE_BASE..TASK_HPAGE_END */ 101 srd r11,r11,r3
104 cmpldi r3,16 102 or r9,r9,r11
105 bge 6f /* 4GB..TASK_HPAGE_BASE */ 103END_FTR_SECTION_IFSET(CPU_FTR_16M_PAGE)
106 104#endif /* CONFIG_HUGETLB_PAGE */
107 lhz r9,PACAHTLBSEGS(r13) 105
108 srd r9,r9,r3 106 li r11,SLB_VSID_USER
109 andi. r9,r9,1 107
110 beq 6f 108#ifdef CONFIG_HUGETLB_PAGE
111 109BEGIN_FTR_SECTION
1125: /* this is a hugepage user address */ 110 rldimi r11,r9,8,55 /* shift masked bit into SLB_VSID_L */
113 li r11,(SLB_VSID_USER|SLB_VSID_L)
114END_FTR_SECTION_IFSET(CPU_FTR_16M_PAGE) 111END_FTR_SECTION_IFSET(CPU_FTR_16M_PAGE)
115#endif /* CONFIG_HUGETLB_PAGE */ 112#endif /* CONFIG_HUGETLB_PAGE */
116 113
1176: ld r9,PACACONTEXTID(r13) 114 ld r9,PACACONTEXTID(r13)
118 rldimi r3,r9,USER_ESID_BITS,0 115 rldimi r3,r9,USER_ESID_BITS,0
119 116
1209: /* r3 = protovsid, r11 = flags, r10 = esid_data, cr7 = <>KERNELBASE */ 1179: /* r3 = protovsid, r11 = flags, r10 = esid_data, cr7 = <>KERNELBASE */
diff --git a/arch/ppc64/mm/tlb.c b/arch/ppc64/mm/tlb.c
index 26f0172c4527..d8a6593a13f0 100644
--- a/arch/ppc64/mm/tlb.c
+++ b/arch/ppc64/mm/tlb.c
@@ -41,7 +41,58 @@ DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
41DEFINE_PER_CPU(struct pte_freelist_batch *, pte_freelist_cur); 41DEFINE_PER_CPU(struct pte_freelist_batch *, pte_freelist_cur);
42unsigned long pte_freelist_forced_free; 42unsigned long pte_freelist_forced_free;
43 43
44void __pte_free_tlb(struct mmu_gather *tlb, struct page *ptepage) 44struct pte_freelist_batch
45{
46 struct rcu_head rcu;
47 unsigned int index;
48 pgtable_free_t tables[0];
49};
50
51DEFINE_PER_CPU(struct pte_freelist_batch *, pte_freelist_cur);
52unsigned long pte_freelist_forced_free;
53
54#define PTE_FREELIST_SIZE \
55 ((PAGE_SIZE - sizeof(struct pte_freelist_batch)) \
56 / sizeof(pgtable_free_t))
57
58#ifdef CONFIG_SMP
59static void pte_free_smp_sync(void *arg)
60{
61 /* Do nothing, just ensure we sync with all CPUs */
62}
63#endif
64
65/* This is only called when we are critically out of memory
66 * (and fail to get a page in pte_free_tlb).
67 */
68static void pgtable_free_now(pgtable_free_t pgf)
69{
70 pte_freelist_forced_free++;
71
72 smp_call_function(pte_free_smp_sync, NULL, 0, 1);
73
74 pgtable_free(pgf);
75}
76
77static void pte_free_rcu_callback(struct rcu_head *head)
78{
79 struct pte_freelist_batch *batch =
80 container_of(head, struct pte_freelist_batch, rcu);
81 unsigned int i;
82
83 for (i = 0; i < batch->index; i++)
84 pgtable_free(batch->tables[i]);
85
86 free_page((unsigned long)batch);
87}
88
89static void pte_free_submit(struct pte_freelist_batch *batch)
90{
91 INIT_RCU_HEAD(&batch->rcu);
92 call_rcu(&batch->rcu, pte_free_rcu_callback);
93}
94
95void pgtable_free_tlb(struct mmu_gather *tlb, pgtable_free_t pgf)
45{ 96{
46 /* This is safe as we are holding page_table_lock */ 97 /* This is safe as we are holding page_table_lock */
47 cpumask_t local_cpumask = cpumask_of_cpu(smp_processor_id()); 98 cpumask_t local_cpumask = cpumask_of_cpu(smp_processor_id());
@@ -49,19 +100,19 @@ void __pte_free_tlb(struct mmu_gather *tlb, struct page *ptepage)
49 100
50 if (atomic_read(&tlb->mm->mm_users) < 2 || 101 if (atomic_read(&tlb->mm->mm_users) < 2 ||
51 cpus_equal(tlb->mm->cpu_vm_mask, local_cpumask)) { 102 cpus_equal(tlb->mm->cpu_vm_mask, local_cpumask)) {
52 pte_free(ptepage); 103 pgtable_free(pgf);
53 return; 104 return;
54 } 105 }
55 106
56 if (*batchp == NULL) { 107 if (*batchp == NULL) {
57 *batchp = (struct pte_freelist_batch *)__get_free_page(GFP_ATOMIC); 108 *batchp = (struct pte_freelist_batch *)__get_free_page(GFP_ATOMIC);
58 if (*batchp == NULL) { 109 if (*batchp == NULL) {
59 pte_free_now(ptepage); 110 pgtable_free_now(pgf);
60 return; 111 return;
61 } 112 }
62 (*batchp)->index = 0; 113 (*batchp)->index = 0;
63 } 114 }
64 (*batchp)->pages[(*batchp)->index++] = ptepage; 115 (*batchp)->tables[(*batchp)->index++] = pgf;
65 if ((*batchp)->index == PTE_FREELIST_SIZE) { 116 if ((*batchp)->index == PTE_FREELIST_SIZE) {
66 pte_free_submit(*batchp); 117 pte_free_submit(*batchp);
67 *batchp = NULL; 118 *batchp = NULL;
@@ -132,42 +183,6 @@ void __flush_tlb_pending(struct ppc64_tlb_batch *batch)
132 put_cpu(); 183 put_cpu();
133} 184}
134 185
135#ifdef CONFIG_SMP
136static void pte_free_smp_sync(void *arg)
137{
138 /* Do nothing, just ensure we sync with all CPUs */
139}
140#endif
141
142/* This is only called when we are critically out of memory
143 * (and fail to get a page in pte_free_tlb).
144 */
145void pte_free_now(struct page *ptepage)
146{
147 pte_freelist_forced_free++;
148
149 smp_call_function(pte_free_smp_sync, NULL, 0, 1);
150
151 pte_free(ptepage);
152}
153
154static void pte_free_rcu_callback(struct rcu_head *head)
155{
156 struct pte_freelist_batch *batch =
157 container_of(head, struct pte_freelist_batch, rcu);
158 unsigned int i;
159
160 for (i = 0; i < batch->index; i++)
161 pte_free(batch->pages[i]);
162 free_page((unsigned long)batch);
163}
164
165void pte_free_submit(struct pte_freelist_batch *batch)
166{
167 INIT_RCU_HEAD(&batch->rcu);
168 call_rcu(&batch->rcu, pte_free_rcu_callback);
169}
170
171void pte_free_finish(void) 186void pte_free_finish(void)
172{ 187{
173 /* This is safe as we are holding page_table_lock */ 188 /* This is safe as we are holding page_table_lock */
diff --git a/arch/ppc64/oprofile/common.c b/arch/ppc64/oprofile/common.c
index b28bfda23d94..e5f572710aa0 100644
--- a/arch/ppc64/oprofile/common.c
+++ b/arch/ppc64/oprofile/common.c
@@ -16,11 +16,9 @@
16#include <asm/ptrace.h> 16#include <asm/ptrace.h>
17#include <asm/system.h> 17#include <asm/system.h>
18#include <asm/pmc.h> 18#include <asm/pmc.h>
19#include <asm/cputable.h>
20#include <asm/oprofile_impl.h>
19 21
20#include "op_impl.h"
21
22extern struct op_ppc64_model op_model_rs64;
23extern struct op_ppc64_model op_model_power4;
24static struct op_ppc64_model *model; 22static struct op_ppc64_model *model;
25 23
26static struct op_counter_config ctr[OP_MAX_COUNTER]; 24static struct op_counter_config ctr[OP_MAX_COUNTER];
@@ -123,52 +121,13 @@ static int op_ppc64_create_files(struct super_block *sb, struct dentry *root)
123 121
124int __init oprofile_arch_init(struct oprofile_operations *ops) 122int __init oprofile_arch_init(struct oprofile_operations *ops)
125{ 123{
126 unsigned int pvr; 124 if (!cur_cpu_spec->oprofile_model || !cur_cpu_spec->oprofile_cpu_type)
127 125 return -ENODEV;
128 pvr = mfspr(SPRN_PVR); 126
129 127 model = cur_cpu_spec->oprofile_model;
130 switch (PVR_VER(pvr)) { 128 model->num_counters = cur_cpu_spec->num_pmcs;
131 case PV_630:
132 case PV_630p:
133 model = &op_model_rs64;
134 model->num_counters = 8;
135 ops->cpu_type = "ppc64/power3";
136 break;
137
138 case PV_NORTHSTAR:
139 case PV_PULSAR:
140 case PV_ICESTAR:
141 case PV_SSTAR:
142 model = &op_model_rs64;
143 model->num_counters = 8;
144 ops->cpu_type = "ppc64/rs64";
145 break;
146
147 case PV_POWER4:
148 case PV_POWER4p:
149 model = &op_model_power4;
150 model->num_counters = 8;
151 ops->cpu_type = "ppc64/power4";
152 break;
153
154 case PV_970:
155 case PV_970FX:
156 model = &op_model_power4;
157 model->num_counters = 8;
158 ops->cpu_type = "ppc64/970";
159 break;
160
161 case PV_POWER5:
162 case PV_POWER5p:
163 model = &op_model_power4;
164 model->num_counters = 6;
165 ops->cpu_type = "ppc64/power5";
166 break;
167
168 default:
169 return -ENODEV;
170 }
171 129
130 ops->cpu_type = cur_cpu_spec->oprofile_cpu_type;
172 ops->create_files = op_ppc64_create_files; 131 ops->create_files = op_ppc64_create_files;
173 ops->setup = op_ppc64_setup; 132 ops->setup = op_ppc64_setup;
174 ops->shutdown = op_ppc64_shutdown; 133 ops->shutdown = op_ppc64_shutdown;
diff --git a/arch/ppc64/oprofile/op_impl.h b/arch/ppc64/oprofile/op_impl.h
deleted file mode 100644
index 7fa7eaabc035..000000000000
--- a/arch/ppc64/oprofile/op_impl.h
+++ /dev/null
@@ -1,108 +0,0 @@
1/*
2 * Copyright (C) 2004 Anton Blanchard <anton@au.ibm.com>, IBM
3 *
4 * Based on alpha version.
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11
12#ifndef OP_IMPL_H
13#define OP_IMPL_H 1
14
15#define OP_MAX_COUNTER 8
16
17/* Per-counter configuration as set via oprofilefs. */
18struct op_counter_config {
19 unsigned long valid;
20 unsigned long enabled;
21 unsigned long event;
22 unsigned long count;
23 unsigned long kernel;
24 /* We dont support per counter user/kernel selection */
25 unsigned long user;
26 unsigned long unit_mask;
27};
28
29/* System-wide configuration as set via oprofilefs. */
30struct op_system_config {
31 unsigned long mmcr0;
32 unsigned long mmcr1;
33 unsigned long mmcra;
34 unsigned long enable_kernel;
35 unsigned long enable_user;
36 unsigned long backtrace_spinlocks;
37};
38
39/* Per-arch configuration */
40struct op_ppc64_model {
41 void (*reg_setup) (struct op_counter_config *,
42 struct op_system_config *,
43 int num_counters);
44 void (*cpu_setup) (void *);
45 void (*start) (struct op_counter_config *);
46 void (*stop) (void);
47 void (*handle_interrupt) (struct pt_regs *,
48 struct op_counter_config *);
49 int num_counters;
50};
51
52static inline unsigned int ctr_read(unsigned int i)
53{
54 switch(i) {
55 case 0:
56 return mfspr(SPRN_PMC1);
57 case 1:
58 return mfspr(SPRN_PMC2);
59 case 2:
60 return mfspr(SPRN_PMC3);
61 case 3:
62 return mfspr(SPRN_PMC4);
63 case 4:
64 return mfspr(SPRN_PMC5);
65 case 5:
66 return mfspr(SPRN_PMC6);
67 case 6:
68 return mfspr(SPRN_PMC7);
69 case 7:
70 return mfspr(SPRN_PMC8);
71 default:
72 return 0;
73 }
74}
75
76static inline void ctr_write(unsigned int i, unsigned int val)
77{
78 switch(i) {
79 case 0:
80 mtspr(SPRN_PMC1, val);
81 break;
82 case 1:
83 mtspr(SPRN_PMC2, val);
84 break;
85 case 2:
86 mtspr(SPRN_PMC3, val);
87 break;
88 case 3:
89 mtspr(SPRN_PMC4, val);
90 break;
91 case 4:
92 mtspr(SPRN_PMC5, val);
93 break;
94 case 5:
95 mtspr(SPRN_PMC6, val);
96 break;
97 case 6:
98 mtspr(SPRN_PMC7, val);
99 break;
100 case 7:
101 mtspr(SPRN_PMC8, val);
102 break;
103 default:
104 break;
105 }
106}
107
108#endif
diff --git a/arch/ppc64/oprofile/op_model_power4.c b/arch/ppc64/oprofile/op_model_power4.c
index 3d103d66870d..32b2bb5625fe 100644
--- a/arch/ppc64/oprofile/op_model_power4.c
+++ b/arch/ppc64/oprofile/op_model_power4.c
@@ -16,14 +16,12 @@
16#include <asm/cputable.h> 16#include <asm/cputable.h>
17#include <asm/systemcfg.h> 17#include <asm/systemcfg.h>
18#include <asm/rtas.h> 18#include <asm/rtas.h>
19#include <asm/oprofile_impl.h>
19 20
20#define dbg(args...) 21#define dbg(args...)
21 22
22#include "op_impl.h"
23
24static unsigned long reset_value[OP_MAX_COUNTER]; 23static unsigned long reset_value[OP_MAX_COUNTER];
25 24
26static int num_counters;
27static int oprofile_running; 25static int oprofile_running;
28static int mmcra_has_sihv; 26static int mmcra_has_sihv;
29 27
@@ -45,8 +43,6 @@ static void power4_reg_setup(struct op_counter_config *ctr,
45{ 43{
46 int i; 44 int i;
47 45
48 num_counters = num_ctrs;
49
50 /* 46 /*
51 * SIHV / SIPR bits are only implemented on POWER4+ (GQ) and above. 47 * SIHV / SIPR bits are only implemented on POWER4+ (GQ) and above.
52 * However we disable it on all POWER4 until we verify it works 48 * However we disable it on all POWER4 until we verify it works
@@ -68,7 +64,7 @@ static void power4_reg_setup(struct op_counter_config *ctr,
68 64
69 backtrace_spinlocks = sys->backtrace_spinlocks; 65 backtrace_spinlocks = sys->backtrace_spinlocks;
70 66
71 for (i = 0; i < num_counters; ++i) 67 for (i = 0; i < cur_cpu_spec->num_pmcs; ++i)
72 reset_value[i] = 0x80000000UL - ctr[i].count; 68 reset_value[i] = 0x80000000UL - ctr[i].count;
73 69
74 /* setup user and kernel profiling */ 70 /* setup user and kernel profiling */
@@ -121,7 +117,7 @@ static void power4_start(struct op_counter_config *ctr)
121 /* set the PMM bit (see comment below) */ 117 /* set the PMM bit (see comment below) */
122 mtmsrd(mfmsr() | MSR_PMM); 118 mtmsrd(mfmsr() | MSR_PMM);
123 119
124 for (i = 0; i < num_counters; ++i) { 120 for (i = 0; i < cur_cpu_spec->num_pmcs; ++i) {
125 if (ctr[i].enabled) { 121 if (ctr[i].enabled) {
126 ctr_write(i, reset_value[i]); 122 ctr_write(i, reset_value[i]);
127 } else { 123 } else {
@@ -272,7 +268,7 @@ static void power4_handle_interrupt(struct pt_regs *regs,
272 /* set the PMM bit (see comment below) */ 268 /* set the PMM bit (see comment below) */
273 mtmsrd(mfmsr() | MSR_PMM); 269 mtmsrd(mfmsr() | MSR_PMM);
274 270
275 for (i = 0; i < num_counters; ++i) { 271 for (i = 0; i < cur_cpu_spec->num_pmcs; ++i) {
276 val = ctr_read(i); 272 val = ctr_read(i);
277 if (val < 0) { 273 if (val < 0) {
278 if (oprofile_running && ctr[i].enabled) { 274 if (oprofile_running && ctr[i].enabled) {
diff --git a/arch/ppc64/oprofile/op_model_rs64.c b/arch/ppc64/oprofile/op_model_rs64.c
index bcec506c266a..08c5b333f5c4 100644
--- a/arch/ppc64/oprofile/op_model_rs64.c
+++ b/arch/ppc64/oprofile/op_model_rs64.c
@@ -14,11 +14,10 @@
14#include <asm/system.h> 14#include <asm/system.h>
15#include <asm/processor.h> 15#include <asm/processor.h>
16#include <asm/cputable.h> 16#include <asm/cputable.h>
17#include <asm/oprofile_impl.h>
17 18
18#define dbg(args...) 19#define dbg(args...)
19 20
20#include "op_impl.h"
21
22static void ctrl_write(unsigned int i, unsigned int val) 21static void ctrl_write(unsigned int i, unsigned int val)
23{ 22{
24 unsigned int tmp = 0; 23 unsigned int tmp = 0;
diff --git a/arch/ppc64/xmon/privinst.h b/arch/ppc64/xmon/privinst.h
index 183c3e400258..02eb40dac0b3 100644
--- a/arch/ppc64/xmon/privinst.h
+++ b/arch/ppc64/xmon/privinst.h
@@ -46,7 +46,6 @@ GSETSPR(287, pvr)
46GSETSPR(1008, hid0) 46GSETSPR(1008, hid0)
47GSETSPR(1009, hid1) 47GSETSPR(1009, hid1)
48GSETSPR(1010, iabr) 48GSETSPR(1010, iabr)
49GSETSPR(1013, dabr)
50GSETSPR(1023, pir) 49GSETSPR(1023, pir)
51 50
52static inline void store_inst(void *p) 51static inline void store_inst(void *p)
diff --git a/arch/ppc64/xmon/start.c b/arch/ppc64/xmon/start.c
index a9265bcc79b2..e50c158191e1 100644
--- a/arch/ppc64/xmon/start.c
+++ b/arch/ppc64/xmon/start.c
@@ -27,7 +27,7 @@ static void sysrq_handle_xmon(int key, struct pt_regs *pt_regs,
27 struct tty_struct *tty) 27 struct tty_struct *tty)
28{ 28{
29 /* ensure xmon is enabled */ 29 /* ensure xmon is enabled */
30 xmon_init(); 30 xmon_init(1);
31 debugger(pt_regs); 31 debugger(pt_regs);
32} 32}
33 33
@@ -61,7 +61,9 @@ xmon_read(void *handle, void *ptr, int nb)
61int 61int
62xmon_read_poll(void) 62xmon_read_poll(void)
63{ 63{
64 return udbg_getc_poll(); 64 if (udbg_getc_poll)
65 return udbg_getc_poll();
66 return -1;
65} 67}
66 68
67FILE *xmon_stdin; 69FILE *xmon_stdin;
diff --git a/arch/ppc64/xmon/xmon.c b/arch/ppc64/xmon/xmon.c
index 05539439e6bc..74e63a886a69 100644
--- a/arch/ppc64/xmon/xmon.c
+++ b/arch/ppc64/xmon/xmon.c
@@ -586,6 +586,8 @@ int xmon_dabr_match(struct pt_regs *regs)
586{ 586{
587 if ((regs->msr & (MSR_IR|MSR_PR|MSR_SF)) != (MSR_IR|MSR_SF)) 587 if ((regs->msr & (MSR_IR|MSR_PR|MSR_SF)) != (MSR_IR|MSR_SF))
588 return 0; 588 return 0;
589 if (dabr.enabled == 0)
590 return 0;
589 xmon_core(regs, 0); 591 xmon_core(regs, 0);
590 return 1; 592 return 1;
591} 593}
@@ -628,20 +630,6 @@ int xmon_fault_handler(struct pt_regs *regs)
628 return 0; 630 return 0;
629} 631}
630 632
631/* On systems with a hypervisor, we can't set the DABR
632 (data address breakpoint register) directly. */
633static void set_controlled_dabr(unsigned long val)
634{
635#ifdef CONFIG_PPC_PSERIES
636 if (systemcfg->platform == PLATFORM_PSERIES_LPAR) {
637 int rc = plpar_hcall_norets(H_SET_DABR, val);
638 if (rc != H_Success)
639 xmon_printf("Warning: setting DABR failed (%d)\n", rc);
640 } else
641#endif
642 set_dabr(val);
643}
644
645static struct bpt *at_breakpoint(unsigned long pc) 633static struct bpt *at_breakpoint(unsigned long pc)
646{ 634{
647 int i; 635 int i;
@@ -728,7 +716,7 @@ static void insert_bpts(void)
728static void insert_cpu_bpts(void) 716static void insert_cpu_bpts(void)
729{ 717{
730 if (dabr.enabled) 718 if (dabr.enabled)
731 set_controlled_dabr(dabr.address | (dabr.enabled & 7)); 719 set_dabr(dabr.address | (dabr.enabled & 7));
732 if (iabr && cpu_has_feature(CPU_FTR_IABR)) 720 if (iabr && cpu_has_feature(CPU_FTR_IABR))
733 set_iabr(iabr->address 721 set_iabr(iabr->address
734 | (iabr->enabled & (BP_IABR|BP_IABR_TE))); 722 | (iabr->enabled & (BP_IABR|BP_IABR_TE)));
@@ -756,7 +744,7 @@ static void remove_bpts(void)
756 744
757static void remove_cpu_bpts(void) 745static void remove_cpu_bpts(void)
758{ 746{
759 set_controlled_dabr(0); 747 set_dabr(0);
760 if (cpu_has_feature(CPU_FTR_IABR)) 748 if (cpu_has_feature(CPU_FTR_IABR))
761 set_iabr(0); 749 set_iabr(0);
762} 750}
@@ -2496,15 +2484,25 @@ static void dump_stab(void)
2496 } 2484 }
2497} 2485}
2498 2486
2499void xmon_init(void) 2487void xmon_init(int enable)
2500{ 2488{
2501 __debugger = xmon; 2489 if (enable) {
2502 __debugger_ipi = xmon_ipi; 2490 __debugger = xmon;
2503 __debugger_bpt = xmon_bpt; 2491 __debugger_ipi = xmon_ipi;
2504 __debugger_sstep = xmon_sstep; 2492 __debugger_bpt = xmon_bpt;
2505 __debugger_iabr_match = xmon_iabr_match; 2493 __debugger_sstep = xmon_sstep;
2506 __debugger_dabr_match = xmon_dabr_match; 2494 __debugger_iabr_match = xmon_iabr_match;
2507 __debugger_fault_handler = xmon_fault_handler; 2495 __debugger_dabr_match = xmon_dabr_match;
2496 __debugger_fault_handler = xmon_fault_handler;
2497 } else {
2498 __debugger = NULL;
2499 __debugger_ipi = NULL;
2500 __debugger_bpt = NULL;
2501 __debugger_sstep = NULL;
2502 __debugger_iabr_match = NULL;
2503 __debugger_dabr_match = NULL;
2504 __debugger_fault_handler = NULL;
2505 }
2508} 2506}
2509 2507
2510void dump_segments(void) 2508void dump_segments(void)