aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorAnton Altaparmakov <aia21@cantab.net>2005-10-31 05:06:46 -0500
committerAnton Altaparmakov <aia21@cantab.net>2005-10-31 05:06:46 -0500
commit1f04c0a24b2f3cfe89c802a24396263623e3512d (patch)
treed7e2216b6e65b833c0c2b79b478d13ce17dbf296 /arch
parent07b188ab773e183871e57b33ae37bf635c9f12ba (diff)
parente2f2e58e7968f8446b1078a20a18bf8ea12b4fbc (diff)
Merge branch 'master' of /usr/src/ntfs-2.6/
Diffstat (limited to 'arch')
-rw-r--r--arch/alpha/kernel/time.c4
-rw-r--r--arch/arm/common/amba.c2
-rw-r--r--arch/arm/common/dmabounce.c165
-rw-r--r--arch/arm/common/scoop.c3
-rw-r--r--arch/arm/configs/ixdp2400_defconfig2
-rw-r--r--arch/arm/configs/ixdp2800_defconfig2
-rw-r--r--arch/arm/kernel/arthur.c1
-rw-r--r--arch/arm/kernel/ptrace.c2
-rw-r--r--arch/arm/kernel/time.c4
-rw-r--r--arch/arm/kernel/traps.c29
-rw-r--r--arch/arm/lib/ashldi3.S48
-rw-r--r--arch/arm/lib/ashldi3.c56
-rw-r--r--arch/arm/lib/ashrdi3.S48
-rw-r--r--arch/arm/lib/ashrdi3.c57
-rw-r--r--arch/arm/lib/gcclib.h22
-rw-r--r--arch/arm/lib/lshrdi3.S48
-rw-r--r--arch/arm/lib/lshrdi3.c56
-rw-r--r--arch/arm/lib/muldi3.S44
-rw-r--r--arch/arm/lib/muldi3.c72
-rw-r--r--arch/arm/lib/ucmpdi2.S35
-rw-r--r--arch/arm/lib/ucmpdi2.c49
-rw-r--r--arch/arm/mach-imx/generic.c2
-rw-r--r--arch/arm/mach-integrator/clock.c1
-rw-r--r--arch/arm/mach-integrator/integrator_ap.c1
-rw-r--r--arch/arm/mach-integrator/lm.c1
-rw-r--r--arch/arm/mach-iop3xx/iq31244-pci.c2
-rw-r--r--arch/arm/mach-iop3xx/iq80321-pci.c2
-rw-r--r--arch/arm/mach-iop3xx/iq80331-pci.c2
-rw-r--r--arch/arm/mach-iop3xx/iq80332-pci.c2
-rw-r--r--arch/arm/mach-pxa/corgi.c20
-rw-r--r--arch/arm/mach-pxa/generic.c1
-rw-r--r--arch/arm/mach-pxa/poodle.c21
-rw-r--r--arch/arm/mach-pxa/spitz.c19
-rw-r--r--arch/arm/mach-sa1100/generic.c1
-rw-r--r--arch/arm/mach-versatile/clock.c1
-rw-r--r--arch/arm/mm/copypage-v6.c16
-rw-r--r--arch/arm/plat-omap/clock.c1
-rw-r--r--arch/arm26/kernel/ptrace.c2
-rw-r--r--arch/arm26/kernel/time.c4
-rw-r--r--arch/cris/arch-v10/drivers/axisflashmap.c1
-rw-r--r--arch/cris/arch-v32/drivers/axisflashmap.c1
-rw-r--r--arch/cris/kernel/time.c5
-rw-r--r--arch/frv/kernel/ptrace.c2
-rw-r--r--arch/frv/kernel/time.c3
-rw-r--r--arch/h8300/kernel/ptrace.c2
-rw-r--r--arch/h8300/kernel/time.c4
-rw-r--r--arch/i386/Kconfig310
-rw-r--r--arch/i386/Kconfig.cpu309
-rw-r--r--arch/i386/Makefile31
-rw-r--r--arch/i386/Makefile.cpu41
-rw-r--r--arch/i386/kernel/apic.c82
-rw-r--r--arch/i386/kernel/apm.c40
-rw-r--r--arch/i386/kernel/cpu/common.c15
-rw-r--r--arch/i386/kernel/cpu/cpufreq/acpi-cpufreq.c1
-rw-r--r--arch/i386/kernel/cpu/cpufreq/p4-clockmod.c1
-rw-r--r--arch/i386/kernel/cpu/cpufreq/powernow-k8.c1
-rw-r--r--arch/i386/kernel/cpu/cpufreq/speedstep-centrino.c1
-rw-r--r--arch/i386/kernel/cpu/intel_cacheinfo.c87
-rw-r--r--arch/i386/kernel/cpu/mcheck/p6.c11
-rw-r--r--arch/i386/kernel/cpu/mtrr/if.c119
-rw-r--r--arch/i386/kernel/cpu/proc.c2
-rw-r--r--arch/i386/kernel/crash.c7
-rw-r--r--arch/i386/kernel/i8259.c4
-rw-r--r--arch/i386/kernel/io_apic.c153
-rw-r--r--arch/i386/kernel/irq.c8
-rw-r--r--arch/i386/kernel/mpparse.c41
-rw-r--r--arch/i386/kernel/nmi.c39
-rw-r--r--arch/i386/kernel/ptrace.c2
-rw-r--r--arch/i386/kernel/reboot_fixups.c2
-rw-r--r--arch/i386/kernel/setup.c24
-rw-r--r--arch/i386/kernel/smpboot.c72
-rw-r--r--arch/i386/kernel/srat.c7
-rw-r--r--arch/i386/kernel/time.c16
-rw-r--r--arch/i386/kernel/time_hpet.c20
-rw-r--r--arch/i386/kernel/timers/timer_hpet.c17
-rw-r--r--arch/i386/kernel/timers/timer_tsc.c21
-rw-r--r--arch/i386/kernel/traps.c1
-rw-r--r--arch/i386/mach-es7000/es7000.h11
-rw-r--r--arch/i386/mach-es7000/es7000plat.c11
-rw-r--r--arch/i386/mm/fault.c2
-rw-r--r--arch/i386/pci/irq.c55
-rw-r--r--arch/i386/power/cpu.c12
-rw-r--r--arch/ia64/ia32/sys_ia32.c1
-rw-r--r--arch/ia64/kernel/cyclone.c1
-rw-r--r--arch/ia64/kernel/time.c4
-rw-r--r--arch/m32r/kernel/entry.S2
-rw-r--r--arch/m32r/kernel/io_m32700ut.c6
-rw-r--r--arch/m32r/kernel/io_mappi.c2
-rw-r--r--arch/m32r/kernel/io_mappi2.c11
-rw-r--r--arch/m32r/kernel/io_mappi3.c7
-rw-r--r--arch/m32r/kernel/io_oaks32r.c2
-rw-r--r--arch/m32r/kernel/io_opsput.c8
-rw-r--r--arch/m32r/kernel/io_usrv.c2
-rw-r--r--arch/m32r/kernel/ptrace.c2
-rw-r--r--arch/m32r/kernel/setup.c24
-rw-r--r--arch/m32r/kernel/time.c4
-rw-r--r--arch/m32r/lib/csum_partial_copy.c2
-rw-r--r--arch/m68k/kernel/ptrace.c2
-rw-r--r--arch/m68k/kernel/time.c4
-rw-r--r--arch/m68knommu/kernel/ptrace.c2
-rw-r--r--arch/m68knommu/kernel/time.c4
-rw-r--r--arch/mips/kernel/irixelf.c17
-rw-r--r--arch/mips/kernel/ptrace.c2
-rw-r--r--arch/mips/kernel/time.c4
-rw-r--r--arch/mips/sgi-ip27/ip27-berr.c1
-rw-r--r--arch/parisc/kernel/ioctl32.c5
-rw-r--r--arch/parisc/kernel/ptrace.c2
-rw-r--r--arch/parisc/kernel/time.c4
-rw-r--r--arch/powerpc/Kconfig900
-rw-r--r--arch/powerpc/Kconfig.debug128
-rw-r--r--arch/powerpc/Makefile222
-rw-r--r--arch/powerpc/kernel/Makefile56
-rw-r--r--arch/powerpc/kernel/asm-offsets.c273
-rw-r--r--arch/powerpc/kernel/binfmt_elf32.c (renamed from arch/ppc64/kernel/binfmt_elf32.c)3
-rw-r--r--arch/powerpc/kernel/btext.c853
-rw-r--r--arch/powerpc/kernel/cputable.c (renamed from arch/ppc/kernel/cputable.c)797
-rw-r--r--arch/powerpc/kernel/entry_32.S1000
-rw-r--r--arch/powerpc/kernel/entry_64.S (renamed from arch/ppc64/kernel/entry.S)47
-rw-r--r--arch/powerpc/kernel/fpu.S (renamed from arch/ppc/kernel/fpu.S)105
-rw-r--r--arch/powerpc/kernel/head_32.S1381
-rw-r--r--arch/powerpc/kernel/head_44x.S782
-rw-r--r--arch/powerpc/kernel/head_4xx.S1022
-rw-r--r--arch/powerpc/kernel/head_64.S1957
-rw-r--r--arch/powerpc/kernel/head_8xx.S860
-rw-r--r--arch/powerpc/kernel/head_fsl_booke.S1063
-rw-r--r--arch/powerpc/kernel/idle_6xx.S233
-rw-r--r--arch/powerpc/kernel/idle_power4.S (renamed from arch/ppc64/kernel/idle_power4.S)9
-rw-r--r--arch/powerpc/kernel/init_task.c (renamed from arch/ppc64/kernel/init_task.c)0
-rw-r--r--arch/powerpc/kernel/lparmap.c (renamed from arch/ppc64/kernel/lparmap.c)0
-rw-r--r--arch/powerpc/kernel/misc_32.S1037
-rw-r--r--arch/powerpc/kernel/misc_64.S880
-rw-r--r--arch/powerpc/kernel/of_device.c (renamed from arch/ppc64/kernel/of_device.c)6
-rw-r--r--arch/powerpc/kernel/pmc.c (renamed from arch/ppc64/kernel/pmc.c)30
-rw-r--r--arch/powerpc/kernel/ppc_ksyms.c273
-rw-r--r--arch/powerpc/kernel/process.c (renamed from arch/ppc64/kernel/process.c)486
-rw-r--r--arch/powerpc/kernel/prom.c2170
-rw-r--r--arch/powerpc/kernel/prom_init.c2109
-rw-r--r--arch/powerpc/kernel/ptrace.c (renamed from arch/ppc/kernel/ptrace.c)172
-rw-r--r--arch/powerpc/kernel/ptrace32.c (renamed from arch/ppc64/kernel/ptrace32.c)9
-rw-r--r--arch/powerpc/kernel/rtas.c (renamed from arch/ppc64/kernel/rtas.c)254
-rw-r--r--arch/powerpc/kernel/semaphore.c135
-rw-r--r--arch/powerpc/kernel/setup-common.c410
-rw-r--r--arch/powerpc/kernel/setup_32.c372
-rw-r--r--arch/powerpc/kernel/setup_64.c (renamed from arch/ppc64/kernel/setup.c)352
-rw-r--r--arch/powerpc/kernel/signal_32.c (renamed from arch/ppc64/kernel/signal32.c)993
-rw-r--r--arch/powerpc/kernel/sys_ppc32.c (renamed from arch/ppc64/kernel/sys_ppc32.c)320
-rw-r--r--arch/powerpc/kernel/syscalls.c (renamed from arch/ppc64/kernel/syscalls.c)187
-rw-r--r--arch/powerpc/kernel/systbl.S321
-rw-r--r--arch/powerpc/kernel/time.c (renamed from arch/ppc64/kernel/time.c)574
-rw-r--r--arch/powerpc/kernel/traps.c1101
-rw-r--r--arch/powerpc/kernel/vecemu.c (renamed from arch/ppc/kernel/vecemu.c)0
-rw-r--r--arch/powerpc/kernel/vector.S (renamed from arch/ppc64/kernel/vector.S)71
-rw-r--r--arch/powerpc/kernel/vio.c (renamed from arch/ppc64/kernel/vio.c)14
-rw-r--r--arch/powerpc/kernel/vmlinux.lds.S279
-rw-r--r--arch/powerpc/lib/Makefile19
-rw-r--r--arch/powerpc/lib/checksum_32.S225
-rw-r--r--arch/powerpc/lib/checksum_64.S (renamed from arch/ppc64/lib/checksum.S)0
-rw-r--r--arch/powerpc/lib/copy_32.S543
-rw-r--r--arch/powerpc/lib/copypage_64.S (renamed from arch/ppc64/lib/copypage.S)0
-rw-r--r--arch/powerpc/lib/copyuser_64.S (renamed from arch/ppc64/lib/copyuser.S)0
-rw-r--r--arch/powerpc/lib/div64.S59
-rw-r--r--arch/powerpc/lib/e2a.c (renamed from arch/ppc64/lib/e2a.c)0
-rw-r--r--arch/powerpc/lib/locks.c (renamed from arch/ppc64/lib/locks.c)5
-rw-r--r--arch/powerpc/lib/mem_64.S119
-rw-r--r--arch/powerpc/lib/memcpy_64.S (renamed from arch/ppc64/lib/memcpy.S)0
-rw-r--r--arch/powerpc/lib/rheap.c693
-rw-r--r--arch/powerpc/lib/sstep.c (renamed from arch/ppc64/lib/sstep.c)17
-rw-r--r--arch/powerpc/lib/strcase.c (renamed from arch/ppc64/lib/strcase.c)8
-rw-r--r--arch/powerpc/lib/string.S198
-rw-r--r--arch/powerpc/lib/usercopy_64.c (renamed from arch/ppc64/lib/usercopy.c)0
-rw-r--r--arch/powerpc/mm/44x_mmu.c120
-rw-r--r--arch/powerpc/mm/4xx_mmu.c141
-rw-r--r--arch/powerpc/mm/Makefile21
-rw-r--r--arch/powerpc/mm/fault.c (renamed from arch/ppc64/mm/fault.c)104
-rw-r--r--arch/powerpc/mm/fsl_booke_mmu.c237
-rw-r--r--arch/powerpc/mm/hash_low_32.S618
-rw-r--r--arch/powerpc/mm/hash_low_64.S (renamed from arch/ppc64/mm/hash_low.S)2
-rw-r--r--arch/powerpc/mm/hash_native_64.c (renamed from arch/ppc64/mm/hash_native.c)13
-rw-r--r--arch/powerpc/mm/hash_utils_64.c (renamed from arch/ppc64/mm/hash_utils.c)61
-rw-r--r--arch/powerpc/mm/hugetlbpage.c (renamed from arch/ppc64/mm/hugetlbpage.c)0
-rw-r--r--arch/powerpc/mm/imalloc.c (renamed from arch/ppc64/mm/imalloc.c)0
-rw-r--r--arch/powerpc/mm/init_32.c254
-rw-r--r--arch/powerpc/mm/init_64.c223
-rw-r--r--arch/powerpc/mm/lmb.c (renamed from arch/ppc64/kernel/lmb.c)105
-rw-r--r--arch/powerpc/mm/mem.c564
-rw-r--r--arch/powerpc/mm/mmap.c (renamed from arch/ppc64/mm/mmap.c)0
-rw-r--r--arch/powerpc/mm/mmu_context_32.c86
-rw-r--r--arch/powerpc/mm/mmu_context_64.c63
-rw-r--r--arch/powerpc/mm/mmu_decl.h87
-rw-r--r--arch/powerpc/mm/numa.c (renamed from arch/ppc64/mm/numa.c)2
-rw-r--r--arch/powerpc/mm/pgtable_32.c467
-rw-r--r--arch/powerpc/mm/pgtable_64.c347
-rw-r--r--arch/powerpc/mm/ppc_mmu_32.c285
-rw-r--r--arch/powerpc/mm/slb.c (renamed from arch/ppc64/mm/slb.c)0
-rw-r--r--arch/powerpc/mm/slb_low.S (renamed from arch/ppc64/mm/slb_low.S)0
-rw-r--r--arch/powerpc/mm/stab.c (renamed from arch/ppc64/mm/stab.c)0
-rw-r--r--arch/powerpc/mm/tlb_32.c183
-rw-r--r--arch/powerpc/mm/tlb_64.c (renamed from arch/ppc64/mm/tlb.c)23
-rw-r--r--arch/powerpc/oprofile/Kconfig (renamed from arch/ppc/oprofile/Kconfig)0
-rw-r--r--arch/powerpc/oprofile/Makefile (renamed from arch/ppc/oprofile/Makefile)7
-rw-r--r--arch/powerpc/oprofile/common.c (renamed from arch/ppc64/oprofile/common.c)84
-rw-r--r--arch/powerpc/oprofile/op_model_fsl_booke.c (renamed from arch/ppc/oprofile/op_model_fsl_booke.c)7
-rw-r--r--arch/powerpc/oprofile/op_model_power4.c (renamed from arch/ppc64/oprofile/op_model_power4.c)2
-rw-r--r--arch/powerpc/oprofile/op_model_rs64.c (renamed from arch/ppc64/oprofile/op_model_rs64.c)2
-rw-r--r--arch/powerpc/platforms/4xx/Kconfig280
-rw-r--r--arch/powerpc/platforms/4xx/Makefile1
-rw-r--r--arch/powerpc/platforms/85xx/Kconfig86
-rw-r--r--arch/powerpc/platforms/85xx/Makefile1
-rw-r--r--arch/powerpc/platforms/8xx/Kconfig352
-rw-r--r--arch/powerpc/platforms/Makefile13
-rw-r--r--arch/powerpc/platforms/apus/Kconfig130
-rw-r--r--arch/powerpc/platforms/chrp/Makefile4
-rw-r--r--arch/powerpc/platforms/chrp/chrp.h12
-rw-r--r--arch/powerpc/platforms/chrp/nvram.c84
-rw-r--r--arch/powerpc/platforms/chrp/pci.c310
-rw-r--r--arch/powerpc/platforms/chrp/pegasos_eth.c213
-rw-r--r--arch/powerpc/platforms/chrp/setup.c522
-rw-r--r--arch/powerpc/platforms/chrp/smp.c122
-rw-r--r--arch/powerpc/platforms/chrp/time.c188
-rw-r--r--arch/powerpc/platforms/embedded6xx/Kconfig318
-rw-r--r--arch/powerpc/platforms/iseries/Kconfig31
-rw-r--r--arch/powerpc/platforms/iseries/Makefile9
-rw-r--r--arch/powerpc/platforms/iseries/call_hpt.h101
-rw-r--r--arch/powerpc/platforms/iseries/call_pci.h290
-rw-r--r--arch/powerpc/platforms/iseries/call_sm.h37
-rw-r--r--arch/powerpc/platforms/iseries/htab.c (renamed from arch/ppc64/kernel/iSeries_htab.c)47
-rw-r--r--arch/powerpc/platforms/iseries/hvcall.S (renamed from arch/ppc64/kernel/hvCall.S)22
-rw-r--r--arch/powerpc/platforms/iseries/hvlog.c (renamed from arch/ppc64/kernel/HvCall.c)1
-rw-r--r--arch/powerpc/platforms/iseries/hvlpconfig.c (renamed from arch/ppc64/kernel/HvLpConfig.c)1
-rw-r--r--arch/powerpc/platforms/iseries/iommu.c (renamed from arch/ppc64/kernel/iSeries_iommu.c)44
-rw-r--r--arch/powerpc/platforms/iseries/ipl_parms.h70
-rw-r--r--arch/powerpc/platforms/iseries/irq.c (renamed from arch/ppc64/kernel/iSeries_irq.c)17
-rw-r--r--arch/powerpc/platforms/iseries/irq.h8
-rw-r--r--arch/powerpc/platforms/iseries/ksyms.c27
-rw-r--r--arch/powerpc/platforms/iseries/lpardata.c (renamed from arch/ppc64/kernel/LparData.c)28
-rw-r--r--arch/powerpc/platforms/iseries/lpevents.c (renamed from arch/ppc64/kernel/ItLpQueue.c)77
-rw-r--r--arch/powerpc/platforms/iseries/main_store.h165
-rw-r--r--arch/powerpc/platforms/iseries/mf.c (renamed from arch/ppc64/kernel/mf.c)98
-rw-r--r--arch/powerpc/platforms/iseries/misc.S55
-rw-r--r--arch/powerpc/platforms/iseries/pci.c (renamed from arch/ppc64/kernel/iSeries_pci.c)173
-rw-r--r--arch/powerpc/platforms/iseries/pci.h63
-rw-r--r--arch/powerpc/platforms/iseries/proc.c (renamed from arch/ppc64/kernel/iSeries_proc.c)15
-rw-r--r--arch/powerpc/platforms/iseries/processor_vpd.h85
-rw-r--r--arch/powerpc/platforms/iseries/release_data.h63
-rw-r--r--arch/powerpc/platforms/iseries/setup.c (renamed from arch/ppc64/kernel/iSeries_setup.c)495
-rw-r--r--arch/powerpc/platforms/iseries/setup.h (renamed from arch/ppc64/kernel/iSeries_setup.h)4
-rw-r--r--arch/powerpc/platforms/iseries/smp.c (renamed from arch/ppc64/kernel/iSeries_smp.c)46
-rw-r--r--arch/powerpc/platforms/iseries/spcomm_area.h36
-rw-r--r--arch/powerpc/platforms/iseries/vio.c (renamed from arch/ppc64/kernel/iSeries_vio.c)1
-rw-r--r--arch/powerpc/platforms/iseries/viopath.c (renamed from arch/ppc64/kernel/viopath.c)3
-rw-r--r--arch/powerpc/platforms/iseries/vpd_areas.h88
-rw-r--r--arch/powerpc/platforms/iseries/vpdinfo.c (renamed from arch/ppc64/kernel/iSeries_VpdInfo.c)21
-rw-r--r--arch/powerpc/platforms/maple/Makefile1
-rw-r--r--arch/powerpc/platforms/maple/maple.h12
-rw-r--r--arch/powerpc/platforms/maple/pci.c (renamed from arch/ppc64/kernel/maple_pci.c)7
-rw-r--r--arch/powerpc/platforms/maple/setup.c (renamed from arch/ppc64/kernel/maple_setup.c)13
-rw-r--r--arch/powerpc/platforms/maple/time.c (renamed from arch/ppc64/kernel/maple_time.c)9
-rw-r--r--arch/powerpc/platforms/powermac/Makefile8
-rw-r--r--arch/powerpc/platforms/powermac/backlight.c202
-rw-r--r--arch/powerpc/platforms/powermac/cache.S359
-rw-r--r--arch/powerpc/platforms/powermac/cpufreq.c726
-rw-r--r--arch/powerpc/platforms/powermac/feature.c3063
-rw-r--r--arch/powerpc/platforms/powermac/low_i2c.c (renamed from arch/ppc64/kernel/pmac_low_i2c.c)0
-rw-r--r--arch/powerpc/platforms/powermac/nvram.c (renamed from arch/ppc64/kernel/pmac_nvram.c)282
-rw-r--r--arch/powerpc/platforms/powermac/pci.c1170
-rw-r--r--arch/powerpc/platforms/powermac/pic.c678
-rw-r--r--arch/powerpc/platforms/powermac/pic.h11
-rw-r--r--arch/powerpc/platforms/powermac/pmac.h51
-rw-r--r--arch/powerpc/platforms/powermac/setup.c794
-rw-r--r--arch/powerpc/platforms/powermac/sleep.S396
-rw-r--r--arch/powerpc/platforms/powermac/smp.c865
-rw-r--r--arch/powerpc/platforms/powermac/time.c360
-rw-r--r--arch/powerpc/platforms/prep/Kconfig22
-rw-r--r--arch/powerpc/platforms/pseries/Kconfig42
-rw-r--r--arch/powerpc/platforms/pseries/Makefile5
-rw-r--r--arch/powerpc/platforms/pseries/hvCall.S (renamed from arch/ppc64/kernel/pSeries_hvCall.S)0
-rw-r--r--arch/powerpc/platforms/pseries/iommu.c (renamed from arch/ppc64/kernel/pSeries_iommu.c)28
-rw-r--r--arch/powerpc/platforms/pseries/lpar.c (renamed from arch/ppc64/kernel/pSeries_lpar.c)5
-rw-r--r--arch/powerpc/platforms/pseries/nvram.c (renamed from arch/ppc64/kernel/pSeries_nvram.c)0
-rw-r--r--arch/powerpc/platforms/pseries/pci.c (renamed from arch/ppc64/kernel/pSeries_pci.c)3
-rw-r--r--arch/powerpc/platforms/pseries/ras.c (renamed from arch/ppc64/kernel/ras.c)11
-rw-r--r--arch/powerpc/platforms/pseries/reconfig.c (renamed from arch/ppc64/kernel/pSeries_reconfig.c)0
-rw-r--r--arch/powerpc/platforms/pseries/rtas-fw.c138
-rw-r--r--arch/powerpc/platforms/pseries/rtas-fw.h3
-rw-r--r--arch/powerpc/platforms/pseries/setup.c (renamed from arch/ppc64/kernel/pSeries_setup.c)57
-rw-r--r--arch/powerpc/platforms/pseries/smp.c (renamed from arch/ppc64/kernel/pSeries_smp.c)52
-rw-r--r--arch/powerpc/platforms/pseries/vio.c (renamed from arch/ppc64/kernel/pSeries_vio.c)1
-rw-r--r--arch/powerpc/platforms/pseries/xics.c (renamed from arch/ppc64/kernel/xics.c)30
-rw-r--r--arch/powerpc/platforms/pseries/xics.h34
-rw-r--r--arch/powerpc/sysdev/Makefile7
-rw-r--r--arch/powerpc/sysdev/dcr.S (renamed from arch/ppc/syslib/dcr.S)0
-rw-r--r--arch/powerpc/sysdev/grackle.c64
-rw-r--r--arch/powerpc/sysdev/i8259.c (renamed from arch/ppc/syslib/i8259.c)65
-rw-r--r--arch/powerpc/sysdev/indirect_pci.c (renamed from arch/ppc/syslib/indirect_pci.c)0
-rw-r--r--arch/powerpc/sysdev/mpic.c (renamed from arch/ppc64/kernel/mpic.c)53
-rw-r--r--arch/powerpc/sysdev/u3_iommu.c (renamed from arch/ppc64/kernel/u3_iommu.c)50
-rw-r--r--arch/powerpc/xmon/Makefile11
-rw-r--r--arch/powerpc/xmon/ansidecl.h (renamed from arch/ppc64/xmon/ansidecl.h)0
-rw-r--r--arch/powerpc/xmon/nonstdio.h (renamed from arch/ppc64/xmon/nonstdio.h)0
-rw-r--r--arch/powerpc/xmon/ppc-dis.c (renamed from arch/ppc64/xmon/ppc-dis.c)0
-rw-r--r--arch/powerpc/xmon/ppc-opc.c (renamed from arch/ppc64/xmon/ppc-opc.c)0
-rw-r--r--arch/powerpc/xmon/ppc.h (renamed from arch/ppc64/xmon/ppc.h)0
-rw-r--r--arch/powerpc/xmon/setjmp.S135
-rw-r--r--arch/powerpc/xmon/start_32.c624
-rw-r--r--arch/powerpc/xmon/start_64.c (renamed from arch/ppc64/xmon/start.c)0
-rw-r--r--arch/powerpc/xmon/start_8xx.c287
-rw-r--r--arch/powerpc/xmon/subr_prf.c (renamed from arch/ppc64/xmon/subr_prf.c)11
-rw-r--r--arch/powerpc/xmon/xmon.c (renamed from arch/ppc64/xmon/xmon.c)399
-rw-r--r--arch/ppc/8xx_io/commproc.c20
-rw-r--r--arch/ppc/Kconfig40
-rw-r--r--arch/ppc/Makefile14
-rw-r--r--arch/ppc/boot/of1275/claim.c1
-rw-r--r--arch/ppc/boot/openfirmware/chrpmain.c2
-rw-r--r--arch/ppc/boot/openfirmware/coffmain.c2
-rw-r--r--arch/ppc/kernel/Makefile27
-rw-r--r--arch/ppc/kernel/align.c4
-rw-r--r--arch/ppc/kernel/asm-offsets.c3
-rw-r--r--arch/ppc/kernel/cpu_setup_6xx.S6
-rw-r--r--arch/ppc/kernel/cpu_setup_power4.S6
-rw-r--r--arch/ppc/kernel/entry.S12
-rw-r--r--arch/ppc/kernel/head.S100
-rw-r--r--arch/ppc/kernel/head_44x.S32
-rw-r--r--arch/ppc/kernel/head_4xx.S68
-rw-r--r--arch/ppc/kernel/head_8xx.S42
-rw-r--r--arch/ppc/kernel/head_booke.h4
-rw-r--r--arch/ppc/kernel/head_fsl_booke.S47
-rw-r--r--arch/ppc/kernel/idle.c3
-rw-r--r--arch/ppc/kernel/irq.c1
-rw-r--r--arch/ppc/kernel/l2cr.S2
-rw-r--r--arch/ppc/kernel/misc.S235
-rw-r--r--arch/ppc/kernel/pci.c33
-rw-r--r--arch/ppc/kernel/perfmon.c96
-rw-r--r--arch/ppc/kernel/perfmon_fsl_booke.c2
-rw-r--r--arch/ppc/kernel/ppc_ksyms.c34
-rw-r--r--arch/ppc/kernel/process.c142
-rw-r--r--arch/ppc/kernel/setup.c39
-rw-r--r--arch/ppc/kernel/signal.c771
-rw-r--r--arch/ppc/kernel/smp.c22
-rw-r--r--arch/ppc/kernel/syscalls.c268
-rw-r--r--arch/ppc/kernel/time.c14
-rw-r--r--arch/ppc/kernel/traps.c42
-rw-r--r--arch/ppc/kernel/vector.S217
-rw-r--r--arch/ppc/kernel/vmlinux.lds.S26
-rw-r--r--arch/ppc/lib/string.S24
-rw-r--r--arch/ppc/math-emu/sfp-machine.h2
-rw-r--r--arch/ppc/mm/init.c23
-rw-r--r--arch/ppc/oprofile/common.c161
-rw-r--r--arch/ppc/oprofile/op_impl.h45
-rw-r--r--arch/ppc/platforms/4xx/bamboo.c14
-rw-r--r--arch/ppc/platforms/4xx/ebony.c15
-rw-r--r--arch/ppc/platforms/4xx/luan.c13
-rw-r--r--arch/ppc/platforms/4xx/ocotea.c31
-rw-r--r--arch/ppc/platforms/83xx/mpc834x_sys.h1
-rw-r--r--arch/ppc/platforms/85xx/mpc8540_ads.c30
-rw-r--r--arch/ppc/platforms/85xx/mpc8560_ads.c25
-rw-r--r--arch/ppc/platforms/85xx/mpc85xx_ads_common.h1
-rw-r--r--arch/ppc/platforms/85xx/mpc85xx_cds_common.c39
-rw-r--r--arch/ppc/platforms/85xx/sbc8560.c22
-rw-r--r--arch/ppc/platforms/85xx/stx_gp3.c21
-rw-r--r--arch/ppc/platforms/85xx/stx_gp3.h1
-rw-r--r--arch/ppc/platforms/Makefile3
-rw-r--r--arch/ppc/platforms/chestnut.c1
-rw-r--r--arch/ppc/platforms/chrp_nvram.c83
-rw-r--r--arch/ppc/platforms/chrp_pci.c10
-rw-r--r--arch/ppc/platforms/chrp_pegasos_eth.c124
-rw-r--r--arch/ppc/platforms/chrp_setup.c33
-rw-r--r--arch/ppc/platforms/chrp_smp.c3
-rw-r--r--arch/ppc/platforms/chrp_time.c8
-rw-r--r--arch/ppc/platforms/ev64360.c1
-rw-r--r--arch/ppc/platforms/fads.h2
-rw-r--r--arch/ppc/platforms/gemini_setup.c4
-rw-r--r--arch/ppc/platforms/hdpu.c9
-rw-r--r--arch/ppc/platforms/katana.c3
-rw-r--r--arch/ppc/platforms/lite5200.c1
-rw-r--r--arch/ppc/platforms/lopec.c17
-rw-r--r--arch/ppc/platforms/mpc885ads.h2
-rw-r--r--arch/ppc/platforms/mvme5100.c6
-rw-r--r--arch/ppc/platforms/pal4_setup.c1
-rw-r--r--arch/ppc/platforms/pmac_backlight.c16
-rw-r--r--arch/ppc/platforms/pmac_cpufreq.c36
-rw-r--r--arch/ppc/platforms/pmac_feature.c176
-rw-r--r--arch/ppc/platforms/pmac_nvram.c42
-rw-r--r--arch/ppc/platforms/pmac_pci.c28
-rw-r--r--arch/ppc/platforms/pmac_pic.c27
-rw-r--r--arch/ppc/platforms/pmac_setup.c19
-rw-r--r--arch/ppc/platforms/pmac_sleep.S4
-rw-r--r--arch/ppc/platforms/pmac_smp.c11
-rw-r--r--arch/ppc/platforms/pmac_time.c8
-rw-r--r--arch/ppc/platforms/pplus.c17
-rw-r--r--arch/ppc/platforms/prep_pci.c64
-rw-r--r--arch/ppc/platforms/prep_setup.c70
-rw-r--r--arch/ppc/platforms/radstone_ppc7d.c15
-rw-r--r--arch/ppc/platforms/residual.c2
-rw-r--r--arch/ppc/platforms/sandpoint.c21
-rw-r--r--arch/ppc/syslib/Makefile57
-rw-r--r--arch/ppc/syslib/btext.c6
-rw-r--r--arch/ppc/syslib/gt64260_pic.c1
-rw-r--r--arch/ppc/syslib/ibm440gx_common.c6
-rw-r--r--arch/ppc/syslib/ibm44x_common.c37
-rw-r--r--arch/ppc/syslib/ibm44x_common.h3
-rw-r--r--arch/ppc/syslib/m8260_setup.c4
-rw-r--r--arch/ppc/syslib/m82xx_pci.c4
-rw-r--r--arch/ppc/syslib/m8xx_setup.c48
-rw-r--r--arch/ppc/syslib/m8xx_wdt.c14
-rw-r--r--arch/ppc/syslib/mpc52xx_pci.c3
-rw-r--r--arch/ppc/syslib/mpc83xx_devices.c1
-rw-r--r--arch/ppc/syslib/mpc85xx_devices.c17
-rw-r--r--arch/ppc/syslib/mpc85xx_sys.c44
-rw-r--r--arch/ppc/syslib/mpc8xx_sys.c4
-rw-r--r--arch/ppc/syslib/mv64360_pic.c1
-rw-r--r--arch/ppc/syslib/mv64x60.c2
-rw-r--r--arch/ppc/syslib/mv64x60_dbg.c1
-rw-r--r--arch/ppc/syslib/of_device.c276
-rw-r--r--arch/ppc/syslib/open_pic.c3
-rw-r--r--arch/ppc/syslib/open_pic2.c1
-rw-r--r--arch/ppc/syslib/ppc403_pic.c1
-rw-r--r--arch/ppc/syslib/ppc4xx_pic.c1
-rw-r--r--arch/ppc/syslib/ppc4xx_setup.c2
-rw-r--r--arch/ppc/syslib/ppc83xx_setup.c1
-rw-r--r--arch/ppc/syslib/ppc85xx_setup.c1
-rw-r--r--arch/ppc/syslib/ppc8xx_pic.c17
-rw-r--r--arch/ppc/syslib/ppc_sys.c3
-rw-r--r--arch/ppc/syslib/pq2_devices.c1
-rw-r--r--arch/ppc/syslib/prep_nvram.c13
-rw-r--r--arch/ppc/syslib/prom.c18
-rw-r--r--arch/ppc/syslib/xilinx_pic.c1
-rw-r--r--arch/ppc/xmon/start.c3
-rw-r--r--arch/ppc/xmon/xmon.c9
-rw-r--r--arch/ppc64/Kconfig33
-rw-r--r--arch/ppc64/Makefile18
-rw-r--r--arch/ppc64/boot/Makefile67
-rw-r--r--arch/ppc64/boot/crt0.S53
-rw-r--r--arch/ppc64/boot/install.sh2
-rw-r--r--arch/ppc64/boot/main.c268
-rw-r--r--arch/ppc64/boot/string.S4
-rw-r--r--arch/ppc64/boot/string.h1
-rw-r--r--arch/ppc64/boot/zImage.lds64
-rw-r--r--arch/ppc64/boot/zlib.c2195
-rw-r--r--arch/ppc64/boot/zlib.h432
-rw-r--r--arch/ppc64/defconfig4
-rw-r--r--arch/ppc64/kernel/HvLpEvent.c88
-rw-r--r--arch/ppc64/kernel/Makefile75
-rw-r--r--arch/ppc64/kernel/align.c4
-rw-r--r--arch/ppc64/kernel/asm-offsets.c3
-rw-r--r--arch/ppc64/kernel/bpa_iommu.c2
-rw-r--r--arch/ppc64/kernel/bpa_setup.c7
-rw-r--r--arch/ppc64/kernel/btext.c42
-rw-r--r--arch/ppc64/kernel/cputable.c308
-rw-r--r--arch/ppc64/kernel/eeh.c2
-rw-r--r--arch/ppc64/kernel/head.S290
-rw-r--r--arch/ppc64/kernel/hvcserver.c2
-rw-r--r--arch/ppc64/kernel/i8259.c177
-rw-r--r--arch/ppc64/kernel/i8259.h17
-rw-r--r--arch/ppc64/kernel/idle.c8
-rw-r--r--arch/ppc64/kernel/ioctl32.c4
-rw-r--r--arch/ppc64/kernel/kprobes.c1
-rw-r--r--arch/ppc64/kernel/misc.S662
-rw-r--r--arch/ppc64/kernel/mpic.h273
-rw-r--r--arch/ppc64/kernel/pci.c46
-rw-r--r--arch/ppc64/kernel/pci.h54
-rw-r--r--arch/ppc64/kernel/pci_direct_iommu.c3
-rw-r--r--arch/ppc64/kernel/pci_dn.c3
-rw-r--r--arch/ppc64/kernel/pci_iommu.c21
-rw-r--r--arch/ppc64/kernel/pmac.h31
-rw-r--r--arch/ppc64/kernel/pmac_feature.c767
-rw-r--r--arch/ppc64/kernel/pmac_pci.c793
-rw-r--r--arch/ppc64/kernel/pmac_setup.c525
-rw-r--r--arch/ppc64/kernel/pmac_smp.c330
-rw-r--r--arch/ppc64/kernel/pmac_time.c195
-rw-r--r--arch/ppc64/kernel/ppc_ksyms.c20
-rw-r--r--arch/ppc64/kernel/prom.c7
-rw-r--r--arch/ppc64/kernel/prom_init.c1
-rw-r--r--arch/ppc64/kernel/ptrace.c363
-rw-r--r--arch/ppc64/kernel/rtas-proc.c1
-rw-r--r--arch/ppc64/kernel/rtas_pci.c9
-rw-r--r--arch/ppc64/kernel/rtc.c48
-rw-r--r--arch/ppc64/kernel/signal.c2
-rw-r--r--arch/ppc64/kernel/smp.c40
-rw-r--r--arch/ppc64/kernel/traps.c568
-rw-r--r--arch/ppc64/kernel/vdso64/sigtramp.S1
-rw-r--r--arch/ppc64/kernel/vecemu.c346
-rw-r--r--arch/ppc64/kernel/vmlinux.lds.S17
-rw-r--r--arch/ppc64/lib/Makefile15
-rw-r--r--arch/ppc64/lib/string.S106
-rw-r--r--arch/ppc64/mm/Makefile11
-rw-r--r--arch/ppc64/mm/init.c950
-rw-r--r--arch/ppc64/oprofile/Kconfig23
-rw-r--r--arch/ppc64/oprofile/Makefile9
-rw-r--r--arch/ppc64/xmon/Makefile5
-rw-r--r--arch/ppc64/xmon/setjmp.S73
-rw-r--r--arch/s390/kernel/compat_ioctl.c9
-rw-r--r--arch/s390/kernel/head.S72
-rw-r--r--arch/s390/kernel/head64.S66
-rw-r--r--arch/s390/kernel/setup.c186
-rw-r--r--arch/s390/kernel/time.c4
-rw-r--r--arch/s390/kernel/vtime.c18
-rw-r--r--arch/sh/drivers/dma/dma-sysfs.c1
-rw-r--r--arch/sh/kernel/cpufreq.c1
-rw-r--r--arch/sh/kernel/ptrace.c2
-rw-r--r--arch/sh/kernel/time.c4
-rw-r--r--arch/sh64/kernel/ptrace.c2
-rw-r--r--arch/sh64/kernel/time.c2
-rw-r--r--arch/sparc/kernel/pcic.c4
-rw-r--r--arch/sparc/kernel/time.c4
-rw-r--r--arch/sparc64/kernel/ioctl32.c3
-rw-r--r--arch/sparc64/kernel/time.c4
-rw-r--r--arch/um/Kconfig10
-rw-r--r--arch/um/Kconfig.x86_645
-rw-r--r--arch/um/Makefile-i38612
-rw-r--r--arch/um/include/sysdep-i386/syscalls.h1
-rw-r--r--arch/um/kernel/time_kern.c4
-rw-r--r--arch/v850/kernel/ptrace.c2
-rw-r--r--arch/v850/kernel/time.c4
-rw-r--r--arch/x86_64/ia32/ia32_ioctl.c125
-rw-r--r--arch/x86_64/kernel/i8259.c8
-rw-r--r--arch/x86_64/kernel/setup.c2
-rw-r--r--arch/x86_64/kernel/suspend.c95
-rw-r--r--arch/x86_64/kernel/time.c32
-rw-r--r--arch/xtensa/kernel/platform.c1
-rw-r--r--arch/xtensa/kernel/ptrace.c2
-rw-r--r--arch/xtensa/kernel/time.c3
521 files changed, 46650 insertions, 17651 deletions
diff --git a/arch/alpha/kernel/time.c b/arch/alpha/kernel/time.c
index 67be50b7d80a..6b2921be1909 100644
--- a/arch/alpha/kernel/time.c
+++ b/arch/alpha/kernel/time.c
@@ -55,10 +55,6 @@
55#include "proto.h" 55#include "proto.h"
56#include "irq_impl.h" 56#include "irq_impl.h"
57 57
58u64 jiffies_64 = INITIAL_JIFFIES;
59
60EXPORT_SYMBOL(jiffies_64);
61
62extern unsigned long wall_jiffies; /* kernel/timer.c */ 58extern unsigned long wall_jiffies; /* kernel/timer.c */
63 59
64static int set_rtc_mmss(unsigned long); 60static int set_rtc_mmss(unsigned long);
diff --git a/arch/arm/common/amba.c b/arch/arm/common/amba.c
index c6beb751f2a9..e1013112c354 100644
--- a/arch/arm/common/amba.c
+++ b/arch/arm/common/amba.c
@@ -10,6 +10,8 @@
10#include <linux/module.h> 10#include <linux/module.h>
11#include <linux/init.h> 11#include <linux/init.h>
12#include <linux/device.h> 12#include <linux/device.h>
13#include <linux/string.h>
14#include <linux/slab.h>
13 15
14#include <asm/io.h> 16#include <asm/io.h>
15#include <asm/irq.h> 17#include <asm/irq.h>
diff --git a/arch/arm/common/dmabounce.c b/arch/arm/common/dmabounce.c
index cbf2165476b0..ad6c89a555bb 100644
--- a/arch/arm/common/dmabounce.c
+++ b/arch/arm/common/dmabounce.c
@@ -33,8 +33,8 @@
33#include <asm/cacheflush.h> 33#include <asm/cacheflush.h>
34 34
35#undef DEBUG 35#undef DEBUG
36
37#undef STATS 36#undef STATS
37
38#ifdef STATS 38#ifdef STATS
39#define DO_STATS(X) do { X ; } while (0) 39#define DO_STATS(X) do { X ; } while (0)
40#else 40#else
@@ -52,26 +52,31 @@ struct safe_buffer {
52 int direction; 52 int direction;
53 53
54 /* safe buffer info */ 54 /* safe buffer info */
55 struct dma_pool *pool; 55 struct dmabounce_pool *pool;
56 void *safe; 56 void *safe;
57 dma_addr_t safe_dma_addr; 57 dma_addr_t safe_dma_addr;
58}; 58};
59 59
60struct dmabounce_pool {
61 unsigned long size;
62 struct dma_pool *pool;
63#ifdef STATS
64 unsigned long allocs;
65#endif
66};
67
60struct dmabounce_device_info { 68struct dmabounce_device_info {
61 struct list_head node; 69 struct list_head node;
62 70
63 struct device *dev; 71 struct device *dev;
64 struct dma_pool *small_buffer_pool;
65 struct dma_pool *large_buffer_pool;
66 struct list_head safe_buffers; 72 struct list_head safe_buffers;
67 unsigned long small_buffer_size, large_buffer_size;
68#ifdef STATS 73#ifdef STATS
69 unsigned long sbp_allocs;
70 unsigned long lbp_allocs;
71 unsigned long total_allocs; 74 unsigned long total_allocs;
72 unsigned long map_op_count; 75 unsigned long map_op_count;
73 unsigned long bounce_count; 76 unsigned long bounce_count;
74#endif 77#endif
78 struct dmabounce_pool small;
79 struct dmabounce_pool large;
75}; 80};
76 81
77static LIST_HEAD(dmabounce_devs); 82static LIST_HEAD(dmabounce_devs);
@@ -82,9 +87,9 @@ static void print_alloc_stats(struct dmabounce_device_info *device_info)
82 printk(KERN_INFO 87 printk(KERN_INFO
83 "%s: dmabounce: sbp: %lu, lbp: %lu, other: %lu, total: %lu\n", 88 "%s: dmabounce: sbp: %lu, lbp: %lu, other: %lu, total: %lu\n",
84 device_info->dev->bus_id, 89 device_info->dev->bus_id,
85 device_info->sbp_allocs, device_info->lbp_allocs, 90 device_info->small.allocs, device_info->large.allocs,
86 device_info->total_allocs - device_info->sbp_allocs - 91 device_info->total_allocs - device_info->small.allocs -
87 device_info->lbp_allocs, 92 device_info->large.allocs,
88 device_info->total_allocs); 93 device_info->total_allocs);
89} 94}
90#endif 95#endif
@@ -106,18 +111,22 @@ find_dmabounce_dev(struct device *dev)
106/* allocate a 'safe' buffer and keep track of it */ 111/* allocate a 'safe' buffer and keep track of it */
107static inline struct safe_buffer * 112static inline struct safe_buffer *
108alloc_safe_buffer(struct dmabounce_device_info *device_info, void *ptr, 113alloc_safe_buffer(struct dmabounce_device_info *device_info, void *ptr,
109 size_t size, enum dma_data_direction dir) 114 size_t size, enum dma_data_direction dir)
110{ 115{
111 struct safe_buffer *buf; 116 struct safe_buffer *buf;
112 struct dma_pool *pool; 117 struct dmabounce_pool *pool;
113 struct device *dev = device_info->dev; 118 struct device *dev = device_info->dev;
114 void *safe;
115 dma_addr_t safe_dma_addr;
116 119
117 dev_dbg(dev, "%s(ptr=%p, size=%d, dir=%d)\n", 120 dev_dbg(dev, "%s(ptr=%p, size=%d, dir=%d)\n",
118 __func__, ptr, size, dir); 121 __func__, ptr, size, dir);
119 122
120 DO_STATS ( device_info->total_allocs++ ); 123 if (size <= device_info->small.size) {
124 pool = &device_info->small;
125 } else if (size <= device_info->large.size) {
126 pool = &device_info->large;
127 } else {
128 pool = NULL;
129 }
121 130
122 buf = kmalloc(sizeof(struct safe_buffer), GFP_ATOMIC); 131 buf = kmalloc(sizeof(struct safe_buffer), GFP_ATOMIC);
123 if (buf == NULL) { 132 if (buf == NULL) {
@@ -125,41 +134,35 @@ alloc_safe_buffer(struct dmabounce_device_info *device_info, void *ptr,
125 return NULL; 134 return NULL;
126 } 135 }
127 136
128 if (size <= device_info->small_buffer_size) { 137 buf->ptr = ptr;
129 pool = device_info->small_buffer_pool; 138 buf->size = size;
130 safe = dma_pool_alloc(pool, GFP_ATOMIC, &safe_dma_addr); 139 buf->direction = dir;
131 140 buf->pool = pool;
132 DO_STATS ( device_info->sbp_allocs++ );
133 } else if (size <= device_info->large_buffer_size) {
134 pool = device_info->large_buffer_pool;
135 safe = dma_pool_alloc(pool, GFP_ATOMIC, &safe_dma_addr);
136 141
137 DO_STATS ( device_info->lbp_allocs++ ); 142 if (pool) {
143 buf->safe = dma_pool_alloc(pool->pool, GFP_ATOMIC,
144 &buf->safe_dma_addr);
138 } else { 145 } else {
139 pool = NULL; 146 buf->safe = dma_alloc_coherent(dev, size, &buf->safe_dma_addr,
140 safe = dma_alloc_coherent(dev, size, &safe_dma_addr, GFP_ATOMIC); 147 GFP_ATOMIC);
141 } 148 }
142 149
143 if (safe == NULL) { 150 if (buf->safe == NULL) {
144 dev_warn(device_info->dev, 151 dev_warn(dev,
145 "%s: could not alloc dma memory (size=%d)\n", 152 "%s: could not alloc dma memory (size=%d)\n",
146 __func__, size); 153 __func__, size);
147 kfree(buf); 154 kfree(buf);
148 return NULL; 155 return NULL;
149 } 156 }
150 157
151#ifdef STATS 158#ifdef STATS
159 if (pool)
160 pool->allocs++;
161 device_info->total_allocs++;
152 if (device_info->total_allocs % 1000 == 0) 162 if (device_info->total_allocs % 1000 == 0)
153 print_alloc_stats(device_info); 163 print_alloc_stats(device_info);
154#endif 164#endif
155 165
156 buf->ptr = ptr;
157 buf->size = size;
158 buf->direction = dir;
159 buf->pool = pool;
160 buf->safe = safe;
161 buf->safe_dma_addr = safe_dma_addr;
162
163 list_add(&buf->node, &device_info->safe_buffers); 166 list_add(&buf->node, &device_info->safe_buffers);
164 167
165 return buf; 168 return buf;
@@ -186,7 +189,7 @@ free_safe_buffer(struct dmabounce_device_info *device_info, struct safe_buffer *
186 list_del(&buf->node); 189 list_del(&buf->node);
187 190
188 if (buf->pool) 191 if (buf->pool)
189 dma_pool_free(buf->pool, buf->safe, buf->safe_dma_addr); 192 dma_pool_free(buf->pool->pool, buf->safe, buf->safe_dma_addr);
190 else 193 else
191 dma_free_coherent(device_info->dev, buf->size, buf->safe, 194 dma_free_coherent(device_info->dev, buf->size, buf->safe,
192 buf->safe_dma_addr); 195 buf->safe_dma_addr);
@@ -197,12 +200,10 @@ free_safe_buffer(struct dmabounce_device_info *device_info, struct safe_buffer *
197/* ************************************************** */ 200/* ************************************************** */
198 201
199#ifdef STATS 202#ifdef STATS
200
201static void print_map_stats(struct dmabounce_device_info *device_info) 203static void print_map_stats(struct dmabounce_device_info *device_info)
202{ 204{
203 printk(KERN_INFO 205 dev_info(device_info->dev,
204 "%s: dmabounce: map_op_count=%lu, bounce_count=%lu\n", 206 "dmabounce: map_op_count=%lu, bounce_count=%lu\n",
205 device_info->dev->bus_id,
206 device_info->map_op_count, device_info->bounce_count); 207 device_info->map_op_count, device_info->bounce_count);
207} 208}
208#endif 209#endif
@@ -258,13 +259,13 @@ map_single(struct device *dev, void *ptr, size_t size,
258 __func__, ptr, buf->safe, size); 259 __func__, ptr, buf->safe, size);
259 memcpy(buf->safe, ptr, size); 260 memcpy(buf->safe, ptr, size);
260 } 261 }
261 consistent_sync(buf->safe, size, dir); 262 ptr = buf->safe;
262 263
263 dma_addr = buf->safe_dma_addr; 264 dma_addr = buf->safe_dma_addr;
264 } else {
265 consistent_sync(ptr, size, dir);
266 } 265 }
267 266
267 consistent_sync(ptr, size, dir);
268
268 return dma_addr; 269 return dma_addr;
269} 270}
270 271
@@ -278,7 +279,7 @@ unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
278 /* 279 /*
279 * Trying to unmap an invalid mapping 280 * Trying to unmap an invalid mapping
280 */ 281 */
281 if (dma_addr == ~0) { 282 if (dma_mapping_error(dma_addr)) {
282 dev_err(dev, "Trying to unmap invalid mapping\n"); 283 dev_err(dev, "Trying to unmap invalid mapping\n");
283 return; 284 return;
284 } 285 }
@@ -570,11 +571,25 @@ dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nents,
570 local_irq_restore(flags); 571 local_irq_restore(flags);
571} 572}
572 573
574static int
575dmabounce_init_pool(struct dmabounce_pool *pool, struct device *dev, const char *name,
576 unsigned long size)
577{
578 pool->size = size;
579 DO_STATS(pool->allocs = 0);
580 pool->pool = dma_pool_create(name, dev, size,
581 0 /* byte alignment */,
582 0 /* no page-crossing issues */);
583
584 return pool->pool ? 0 : -ENOMEM;
585}
586
573int 587int
574dmabounce_register_dev(struct device *dev, unsigned long small_buffer_size, 588dmabounce_register_dev(struct device *dev, unsigned long small_buffer_size,
575 unsigned long large_buffer_size) 589 unsigned long large_buffer_size)
576{ 590{
577 struct dmabounce_device_info *device_info; 591 struct dmabounce_device_info *device_info;
592 int ret;
578 593
579 device_info = kmalloc(sizeof(struct dmabounce_device_info), GFP_ATOMIC); 594 device_info = kmalloc(sizeof(struct dmabounce_device_info), GFP_ATOMIC);
580 if (!device_info) { 595 if (!device_info) {
@@ -584,45 +599,31 @@ dmabounce_register_dev(struct device *dev, unsigned long small_buffer_size,
584 return -ENOMEM; 599 return -ENOMEM;
585 } 600 }
586 601
587 device_info->small_buffer_pool = 602 ret = dmabounce_init_pool(&device_info->small, dev,
588 dma_pool_create("small_dmabounce_pool", 603 "small_dmabounce_pool", small_buffer_size);
589 dev, 604 if (ret) {
590 small_buffer_size, 605 dev_err(dev,
591 0 /* byte alignment */, 606 "dmabounce: could not allocate DMA pool for %ld byte objects\n",
592 0 /* no page-crossing issues */); 607 small_buffer_size);
593 if (!device_info->small_buffer_pool) { 608 goto err_free;
594 printk(KERN_ERR
595 "dmabounce: could not allocate small DMA pool for %s\n",
596 dev->bus_id);
597 kfree(device_info);
598 return -ENOMEM;
599 } 609 }
600 610
601 if (large_buffer_size) { 611 if (large_buffer_size) {
602 device_info->large_buffer_pool = 612 ret = dmabounce_init_pool(&device_info->large, dev,
603 dma_pool_create("large_dmabounce_pool", 613 "large_dmabounce_pool",
604 dev, 614 large_buffer_size);
605 large_buffer_size, 615 if (ret) {
606 0 /* byte alignment */, 616 dev_err(dev,
607 0 /* no page-crossing issues */); 617 "dmabounce: could not allocate DMA pool for %ld byte objects\n",
608 if (!device_info->large_buffer_pool) { 618 large_buffer_size);
609 printk(KERN_ERR 619 goto err_destroy;
610 "dmabounce: could not allocate large DMA pool for %s\n",
611 dev->bus_id);
612 dma_pool_destroy(device_info->small_buffer_pool);
613
614 return -ENOMEM;
615 } 620 }
616 } 621 }
617 622
618 device_info->dev = dev; 623 device_info->dev = dev;
619 device_info->small_buffer_size = small_buffer_size;
620 device_info->large_buffer_size = large_buffer_size;
621 INIT_LIST_HEAD(&device_info->safe_buffers); 624 INIT_LIST_HEAD(&device_info->safe_buffers);
622 625
623#ifdef STATS 626#ifdef STATS
624 device_info->sbp_allocs = 0;
625 device_info->lbp_allocs = 0;
626 device_info->total_allocs = 0; 627 device_info->total_allocs = 0;
627 device_info->map_op_count = 0; 628 device_info->map_op_count = 0;
628 device_info->bounce_count = 0; 629 device_info->bounce_count = 0;
@@ -634,6 +635,12 @@ dmabounce_register_dev(struct device *dev, unsigned long small_buffer_size,
634 dev->bus_id, dev->bus->name); 635 dev->bus_id, dev->bus->name);
635 636
636 return 0; 637 return 0;
638
639 err_destroy:
640 dma_pool_destroy(device_info->small.pool);
641 err_free:
642 kfree(device_info);
643 return ret;
637} 644}
638 645
639void 646void
@@ -655,10 +662,10 @@ dmabounce_unregister_dev(struct device *dev)
655 BUG(); 662 BUG();
656 } 663 }
657 664
658 if (device_info->small_buffer_pool) 665 if (device_info->small.pool)
659 dma_pool_destroy(device_info->small_buffer_pool); 666 dma_pool_destroy(device_info->small.pool);
660 if (device_info->large_buffer_pool) 667 if (device_info->large.pool)
661 dma_pool_destroy(device_info->large_buffer_pool); 668 dma_pool_destroy(device_info->large.pool);
662 669
663#ifdef STATS 670#ifdef STATS
664 print_alloc_stats(device_info); 671 print_alloc_stats(device_info);
diff --git a/arch/arm/common/scoop.c b/arch/arm/common/scoop.c
index e8356b76d7c6..4af0cf5f3bfb 100644
--- a/arch/arm/common/scoop.c
+++ b/arch/arm/common/scoop.c
@@ -12,6 +12,9 @@
12 */ 12 */
13 13
14#include <linux/device.h> 14#include <linux/device.h>
15#include <linux/string.h>
16#include <linux/slab.h>
17
15#include <asm/io.h> 18#include <asm/io.h>
16#include <asm/hardware/scoop.h> 19#include <asm/hardware/scoop.h>
17 20
diff --git a/arch/arm/configs/ixdp2400_defconfig b/arch/arm/configs/ixdp2400_defconfig
index 678720fa2e2e..ddeb9f99d662 100644
--- a/arch/arm/configs/ixdp2400_defconfig
+++ b/arch/arm/configs/ixdp2400_defconfig
@@ -559,7 +559,7 @@ CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768
559# 559#
560CONFIG_SERIAL_8250=y 560CONFIG_SERIAL_8250=y
561CONFIG_SERIAL_8250_CONSOLE=y 561CONFIG_SERIAL_8250_CONSOLE=y
562CONFIG_SERIAL_8250_NR_UARTS=2 562CONFIG_SERIAL_8250_NR_UARTS=1
563# CONFIG_SERIAL_8250_EXTENDED is not set 563# CONFIG_SERIAL_8250_EXTENDED is not set
564 564
565# 565#
diff --git a/arch/arm/configs/ixdp2800_defconfig b/arch/arm/configs/ixdp2800_defconfig
index 261e2343903b..81d3a0606f95 100644
--- a/arch/arm/configs/ixdp2800_defconfig
+++ b/arch/arm/configs/ixdp2800_defconfig
@@ -559,7 +559,7 @@ CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768
559# 559#
560CONFIG_SERIAL_8250=y 560CONFIG_SERIAL_8250=y
561CONFIG_SERIAL_8250_CONSOLE=y 561CONFIG_SERIAL_8250_CONSOLE=y
562CONFIG_SERIAL_8250_NR_UARTS=2 562CONFIG_SERIAL_8250_NR_UARTS=1
563# CONFIG_SERIAL_8250_EXTENDED is not set 563# CONFIG_SERIAL_8250_EXTENDED is not set
564 564
565# 565#
diff --git a/arch/arm/kernel/arthur.c b/arch/arm/kernel/arthur.c
index a418dad6692c..0ee2e9819631 100644
--- a/arch/arm/kernel/arthur.c
+++ b/arch/arm/kernel/arthur.c
@@ -18,6 +18,7 @@
18#include <linux/stddef.h> 18#include <linux/stddef.h>
19#include <linux/signal.h> 19#include <linux/signal.h>
20#include <linux/init.h> 20#include <linux/init.h>
21#include <linux/sched.h>
21 22
22#include <asm/ptrace.h> 23#include <asm/ptrace.h>
23 24
diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c
index cd99b83f14c2..9bd8609a2926 100644
--- a/arch/arm/kernel/ptrace.c
+++ b/arch/arm/kernel/ptrace.c
@@ -782,7 +782,7 @@ static int do_ptrace(int request, struct task_struct *child, long addr, long dat
782 return ret; 782 return ret;
783} 783}
784 784
785asmlinkage int sys_ptrace(long request, long pid, long addr, long data) 785asmlinkage long sys_ptrace(long request, long pid, long addr, long data)
786{ 786{
787 struct task_struct *child; 787 struct task_struct *child;
788 int ret; 788 int ret;
diff --git a/arch/arm/kernel/time.c b/arch/arm/kernel/time.c
index 69449a818dcc..fc4729106a32 100644
--- a/arch/arm/kernel/time.c
+++ b/arch/arm/kernel/time.c
@@ -36,10 +36,6 @@
36#include <asm/thread_info.h> 36#include <asm/thread_info.h>
37#include <asm/mach/time.h> 37#include <asm/mach/time.h>
38 38
39u64 jiffies_64 = INITIAL_JIFFIES;
40
41EXPORT_SYMBOL(jiffies_64);
42
43/* 39/*
44 * Our system timer. 40 * Our system timer.
45 */ 41 */
diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
index 66e5a0516f23..45e9ea6cd2a5 100644
--- a/arch/arm/kernel/traps.c
+++ b/arch/arm/kernel/traps.c
@@ -198,25 +198,16 @@ void show_stack(struct task_struct *tsk, unsigned long *sp)
198 barrier(); 198 barrier();
199} 199}
200 200
201DEFINE_SPINLOCK(die_lock); 201static void __die(const char *str, int err, struct thread_info *thread, struct pt_regs *regs)
202
203/*
204 * This function is protected against re-entrancy.
205 */
206NORET_TYPE void die(const char *str, struct pt_regs *regs, int err)
207{ 202{
208 struct task_struct *tsk = current; 203 struct task_struct *tsk = thread->task;
209 static int die_counter; 204 static int die_counter;
210 205
211 console_verbose();
212 spin_lock_irq(&die_lock);
213 bust_spinlocks(1);
214
215 printk("Internal error: %s: %x [#%d]\n", str, err, ++die_counter); 206 printk("Internal error: %s: %x [#%d]\n", str, err, ++die_counter);
216 print_modules(); 207 print_modules();
217 __show_regs(regs); 208 __show_regs(regs);
218 printk("Process %s (pid: %d, stack limit = 0x%p)\n", 209 printk("Process %s (pid: %d, stack limit = 0x%p)\n",
219 tsk->comm, tsk->pid, tsk->thread_info + 1); 210 tsk->comm, tsk->pid, thread + 1);
220 211
221 if (!user_mode(regs) || in_interrupt()) { 212 if (!user_mode(regs) || in_interrupt()) {
222 dump_mem("Stack: ", regs->ARM_sp, 213 dump_mem("Stack: ", regs->ARM_sp,
@@ -224,7 +215,21 @@ NORET_TYPE void die(const char *str, struct pt_regs *regs, int err)
224 dump_backtrace(regs, tsk); 215 dump_backtrace(regs, tsk);
225 dump_instr(regs); 216 dump_instr(regs);
226 } 217 }
218}
227 219
220DEFINE_SPINLOCK(die_lock);
221
222/*
223 * This function is protected against re-entrancy.
224 */
225NORET_TYPE void die(const char *str, struct pt_regs *regs, int err)
226{
227 struct thread_info *thread = current_thread_info();
228
229 console_verbose();
230 spin_lock_irq(&die_lock);
231 bust_spinlocks(1);
232 __die(str, err, thread, regs);
228 bust_spinlocks(0); 233 bust_spinlocks(0);
229 spin_unlock_irq(&die_lock); 234 spin_unlock_irq(&die_lock);
230 do_exit(SIGSEGV); 235 do_exit(SIGSEGV);
diff --git a/arch/arm/lib/ashldi3.S b/arch/arm/lib/ashldi3.S
new file mode 100644
index 000000000000..561e20717b30
--- /dev/null
+++ b/arch/arm/lib/ashldi3.S
@@ -0,0 +1,48 @@
1/* Copyright 1995, 1996, 1998, 1999, 2000, 2003, 2004, 2005
2 Free Software Foundation, Inc.
3
4This file is free software; you can redistribute it and/or modify it
5under the terms of the GNU General Public License as published by the
6Free Software Foundation; either version 2, or (at your option) any
7later version.
8
9In addition to the permissions in the GNU General Public License, the
10Free Software Foundation gives you unlimited permission to link the
11compiled version of this file into combinations with other programs,
12and to distribute those combinations without any restriction coming
13from the use of this file. (The General Public License restrictions
14do apply in other respects; for example, they cover modification of
15the file, and distribution when not linked into a combine
16executable.)
17
18This file is distributed in the hope that it will be useful, but
19WITHOUT ANY WARRANTY; without even the implied warranty of
20MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
21General Public License for more details.
22
23You should have received a copy of the GNU General Public License
24along with this program; see the file COPYING. If not, write to
25the Free Software Foundation, 51 Franklin Street, Fifth Floor,
26Boston, MA 02110-1301, USA. */
27
28
29#include <linux/linkage.h>
30
31#ifdef __ARMEB__
32#define al r1
33#define ah r0
34#else
35#define al r0
36#define ah r1
37#endif
38
39ENTRY(__ashldi3)
40
41 subs r3, r2, #32
42 rsb ip, r2, #32
43 movmi ah, ah, lsl r2
44 movpl ah, al, lsl r3
45 orrmi ah, ah, al, lsr ip
46 mov al, al, lsl r2
47 mov pc, lr
48
diff --git a/arch/arm/lib/ashldi3.c b/arch/arm/lib/ashldi3.c
deleted file mode 100644
index b62875cfd8f8..000000000000
--- a/arch/arm/lib/ashldi3.c
+++ /dev/null
@@ -1,56 +0,0 @@
1/* More subroutines needed by GCC output code on some machines. */
2/* Compile this one with gcc. */
3/* Copyright (C) 1989, 92-98, 1999 Free Software Foundation, Inc.
4
5This file is part of GNU CC.
6
7GNU CC is free software; you can redistribute it and/or modify
8it under the terms of the GNU General Public License as published by
9the Free Software Foundation; either version 2, or (at your option)
10any later version.
11
12GNU CC is distributed in the hope that it will be useful,
13but WITHOUT ANY WARRANTY; without even the implied warranty of
14MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15GNU General Public License for more details.
16
17You should have received a copy of the GNU General Public License
18along with GNU CC; see the file COPYING. If not, write to
19the Free Software Foundation, 59 Temple Place - Suite 330,
20Boston, MA 02111-1307, USA. */
21
22/* As a special exception, if you link this library with other files,
23 some of which are compiled with GCC, to produce an executable,
24 this library does not by itself cause the resulting executable
25 to be covered by the GNU General Public License.
26 This exception does not however invalidate any other reasons why
27 the executable file might be covered by the GNU General Public License.
28 */
29/* support functions required by the kernel. based on code from gcc-2.95.3 */
30/* I Molton 29/07/01 */
31
32#include "gcclib.h"
33
34s64 __ashldi3(s64 u, int b)
35{
36 DIunion w;
37 int bm;
38 DIunion uu;
39
40 if (b == 0)
41 return u;
42
43 uu.ll = u;
44
45 bm = (sizeof(s32) * BITS_PER_UNIT) - b;
46 if (bm <= 0) {
47 w.s.low = 0;
48 w.s.high = (u32) uu.s.low << -bm;
49 } else {
50 u32 carries = (u32) uu.s.low >> bm;
51 w.s.low = (u32) uu.s.low << b;
52 w.s.high = ((u32) uu.s.high << b) | carries;
53 }
54
55 return w.ll;
56}
diff --git a/arch/arm/lib/ashrdi3.S b/arch/arm/lib/ashrdi3.S
new file mode 100644
index 000000000000..86fb2a90c301
--- /dev/null
+++ b/arch/arm/lib/ashrdi3.S
@@ -0,0 +1,48 @@
1/* Copyright 1995, 1996, 1998, 1999, 2000, 2003, 2004, 2005
2 Free Software Foundation, Inc.
3
4This file is free software; you can redistribute it and/or modify it
5under the terms of the GNU General Public License as published by the
6Free Software Foundation; either version 2, or (at your option) any
7later version.
8
9In addition to the permissions in the GNU General Public License, the
10Free Software Foundation gives you unlimited permission to link the
11compiled version of this file into combinations with other programs,
12and to distribute those combinations without any restriction coming
13from the use of this file. (The General Public License restrictions
14do apply in other respects; for example, they cover modification of
15the file, and distribution when not linked into a combine
16executable.)
17
18This file is distributed in the hope that it will be useful, but
19WITHOUT ANY WARRANTY; without even the implied warranty of
20MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
21General Public License for more details.
22
23You should have received a copy of the GNU General Public License
24along with this program; see the file COPYING. If not, write to
25the Free Software Foundation, 51 Franklin Street, Fifth Floor,
26Boston, MA 02110-1301, USA. */
27
28
29#include <linux/linkage.h>
30
31#ifdef __ARMEB__
32#define al r1
33#define ah r0
34#else
35#define al r0
36#define ah r1
37#endif
38
39ENTRY(__ashrdi3)
40
41 subs r3, r2, #32
42 rsb ip, r2, #32
43 movmi al, al, lsr r2
44 movpl al, ah, asr r3
45 orrmi al, al, ah, lsl ip
46 mov ah, ah, asr r2
47 mov pc, lr
48
diff --git a/arch/arm/lib/ashrdi3.c b/arch/arm/lib/ashrdi3.c
deleted file mode 100644
index 9a8600a7543f..000000000000
--- a/arch/arm/lib/ashrdi3.c
+++ /dev/null
@@ -1,57 +0,0 @@
1/* More subroutines needed by GCC output code on some machines. */
2/* Compile this one with gcc. */
3/* Copyright (C) 1989, 92-98, 1999 Free Software Foundation, Inc.
4
5This file is part of GNU CC.
6
7GNU CC is free software; you can redistribute it and/or modify
8it under the terms of the GNU General Public License as published by
9the Free Software Foundation; either version 2, or (at your option)
10any later version.
11
12GNU CC is distributed in the hope that it will be useful,
13but WITHOUT ANY WARRANTY; without even the implied warranty of
14MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15GNU General Public License for more details.
16
17You should have received a copy of the GNU General Public License
18along with GNU CC; see the file COPYING. If not, write to
19the Free Software Foundation, 59 Temple Place - Suite 330,
20Boston, MA 02111-1307, USA. */
21
22/* As a special exception, if you link this library with other files,
23 some of which are compiled with GCC, to produce an executable,
24 this library does not by itself cause the resulting executable
25 to be covered by the GNU General Public License.
26 This exception does not however invalidate any other reasons why
27 the executable file might be covered by the GNU General Public License.
28 */
29/* support functions required by the kernel. based on code from gcc-2.95.3 */
30/* I Molton 29/07/01 */
31
32#include "gcclib.h"
33
34s64 __ashrdi3(s64 u, int b)
35{
36 DIunion w;
37 int bm;
38 DIunion uu;
39
40 if (b == 0)
41 return u;
42
43 uu.ll = u;
44
45 bm = (sizeof(s32) * BITS_PER_UNIT) - b;
46 if (bm <= 0) {
47 /* w.s.high = 1..1 or 0..0 */
48 w.s.high = uu.s.high >> (sizeof(s32) * BITS_PER_UNIT - 1);
49 w.s.low = uu.s.high >> -bm;
50 } else {
51 u32 carries = (u32) uu.s.high << bm;
52 w.s.high = uu.s.high >> b;
53 w.s.low = ((u32) uu.s.low >> b) | carries;
54 }
55
56 return w.ll;
57}
diff --git a/arch/arm/lib/gcclib.h b/arch/arm/lib/gcclib.h
deleted file mode 100644
index 8b6dcc656de7..000000000000
--- a/arch/arm/lib/gcclib.h
+++ /dev/null
@@ -1,22 +0,0 @@
1/* gcclib.h -- definitions for various functions 'borrowed' from gcc-2.95.3 */
2/* I Molton 29/07/01 */
3
4#include <linux/types.h>
5
6#define BITS_PER_UNIT 8
7#define SI_TYPE_SIZE (sizeof(s32) * BITS_PER_UNIT)
8
9#ifdef __ARMEB__
10struct DIstruct {
11 s32 high, low;
12};
13#else
14struct DIstruct {
15 s32 low, high;
16};
17#endif
18
19typedef union {
20 struct DIstruct s;
21 s64 ll;
22} DIunion;
diff --git a/arch/arm/lib/lshrdi3.S b/arch/arm/lib/lshrdi3.S
new file mode 100644
index 000000000000..46c2ed19ec95
--- /dev/null
+++ b/arch/arm/lib/lshrdi3.S
@@ -0,0 +1,48 @@
1/* Copyright 1995, 1996, 1998, 1999, 2000, 2003, 2004, 2005
2 Free Software Foundation, Inc.
3
4This file is free software; you can redistribute it and/or modify it
5under the terms of the GNU General Public License as published by the
6Free Software Foundation; either version 2, or (at your option) any
7later version.
8
9In addition to the permissions in the GNU General Public License, the
10Free Software Foundation gives you unlimited permission to link the
11compiled version of this file into combinations with other programs,
12and to distribute those combinations without any restriction coming
13from the use of this file. (The General Public License restrictions
14do apply in other respects; for example, they cover modification of
15the file, and distribution when not linked into a combine
16executable.)
17
18This file is distributed in the hope that it will be useful, but
19WITHOUT ANY WARRANTY; without even the implied warranty of
20MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
21General Public License for more details.
22
23You should have received a copy of the GNU General Public License
24along with this program; see the file COPYING. If not, write to
25the Free Software Foundation, 51 Franklin Street, Fifth Floor,
26Boston, MA 02110-1301, USA. */
27
28
29#include <linux/linkage.h>
30
31#ifdef __ARMEB__
32#define al r1
33#define ah r0
34#else
35#define al r0
36#define ah r1
37#endif
38
39ENTRY(__lshrdi3)
40
41 subs r3, r2, #32
42 rsb ip, r2, #32
43 movmi al, al, lsr r2
44 movpl al, ah, lsr r3
45 orrmi al, al, ah, lsl ip
46 mov ah, ah, lsr r2
47 mov pc, lr
48
diff --git a/arch/arm/lib/lshrdi3.c b/arch/arm/lib/lshrdi3.c
deleted file mode 100644
index 3681f49d2b6e..000000000000
--- a/arch/arm/lib/lshrdi3.c
+++ /dev/null
@@ -1,56 +0,0 @@
1/* More subroutines needed by GCC output code on some machines. */
2/* Compile this one with gcc. */
3/* Copyright (C) 1989, 92-98, 1999 Free Software Foundation, Inc.
4
5This file is part of GNU CC.
6
7GNU CC is free software; you can redistribute it and/or modify
8it under the terms of the GNU General Public License as published by
9the Free Software Foundation; either version 2, or (at your option)
10any later version.
11
12GNU CC is distributed in the hope that it will be useful,
13but WITHOUT ANY WARRANTY; without even the implied warranty of
14MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15GNU General Public License for more details.
16
17You should have received a copy of the GNU General Public License
18along with GNU CC; see the file COPYING. If not, write to
19the Free Software Foundation, 59 Temple Place - Suite 330,
20Boston, MA 02111-1307, USA. */
21
22/* As a special exception, if you link this library with other files,
23 some of which are compiled with GCC, to produce an executable,
24 this library does not by itself cause the resulting executable
25 to be covered by the GNU General Public License.
26 This exception does not however invalidate any other reasons why
27 the executable file might be covered by the GNU General Public License.
28 */
29/* support functions required by the kernel. based on code from gcc-2.95.3 */
30/* I Molton 29/07/01 */
31
32#include "gcclib.h"
33
34s64 __lshrdi3(s64 u, int b)
35{
36 DIunion w;
37 int bm;
38 DIunion uu;
39
40 if (b == 0)
41 return u;
42
43 uu.ll = u;
44
45 bm = (sizeof(s32) * BITS_PER_UNIT) - b;
46 if (bm <= 0) {
47 w.s.high = 0;
48 w.s.low = (u32) uu.s.high >> -bm;
49 } else {
50 u32 carries = (u32) uu.s.high << bm;
51 w.s.high = (u32) uu.s.high >> b;
52 w.s.low = ((u32) uu.s.low >> b) | carries;
53 }
54
55 return w.ll;
56}
diff --git a/arch/arm/lib/muldi3.S b/arch/arm/lib/muldi3.S
new file mode 100644
index 000000000000..c7fbdf005319
--- /dev/null
+++ b/arch/arm/lib/muldi3.S
@@ -0,0 +1,44 @@
1/*
2 * linux/arch/arm/lib/muldi3.S
3 *
4 * Author: Nicolas Pitre
5 * Created: Oct 19, 2005
6 * Copyright: Monta Vista Software, Inc.
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12
13#include <linux/linkage.h>
14
15#ifdef __ARMEB__
16#define xh r0
17#define xl r1
18#define yh r2
19#define yl r3
20#else
21#define xl r0
22#define xh r1
23#define yl r2
24#define yh r3
25#endif
26
27ENTRY(__muldi3)
28
29 mul xh, yl, xh
30 mla xh, xl, yh, xh
31 mov ip, xl, asr #16
32 mov yh, yl, asr #16
33 bic xl, xl, ip, lsl #16
34 bic yl, yl, yh, lsl #16
35 mla xh, yh, ip, xh
36 mul yh, xl, yh
37 mul xl, yl, xl
38 mul ip, yl, ip
39 adds xl, xl, yh, lsl #16
40 adc xh, xh, yh, lsr #16
41 adds xl, xl, ip, lsl #16
42 adc xh, xh, ip, lsr #16
43 mov pc, lr
44
diff --git a/arch/arm/lib/muldi3.c b/arch/arm/lib/muldi3.c
deleted file mode 100644
index 0a3b93313f18..000000000000
--- a/arch/arm/lib/muldi3.c
+++ /dev/null
@@ -1,72 +0,0 @@
1/* More subroutines needed by GCC output code on some machines. */
2/* Compile this one with gcc. */
3/* Copyright (C) 1989, 92-98, 1999 Free Software Foundation, Inc.
4
5This file is part of GNU CC.
6
7GNU CC is free software; you can redistribute it and/or modify
8it under the terms of the GNU General Public License as published by
9the Free Software Foundation; either version 2, or (at your option)
10any later version.
11
12GNU CC is distributed in the hope that it will be useful,
13but WITHOUT ANY WARRANTY; without even the implied warranty of
14MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15GNU General Public License for more details.
16
17You should have received a copy of the GNU General Public License
18along with GNU CC; see the file COPYING. If not, write to
19the Free Software Foundation, 59 Temple Place - Suite 330,
20Boston, MA 02111-1307, USA. */
21
22/* As a special exception, if you link this library with other files,
23 some of which are compiled with GCC, to produce an executable,
24 this library does not by itself cause the resulting executable
25 to be covered by the GNU General Public License.
26 This exception does not however invalidate any other reasons why
27 the executable file might be covered by the GNU General Public License.
28 */
29/* support functions required by the kernel. based on code from gcc-2.95.3 */
30/* I Molton 29/07/01 */
31
32#include "gcclib.h"
33
34#define umul_ppmm(xh, xl, a, b) \
35{register u32 __t0, __t1, __t2; \
36 __asm__ ("%@ Inlined umul_ppmm \n\
37 mov %2, %5, lsr #16 \n\
38 mov %0, %6, lsr #16 \n\
39 bic %3, %5, %2, lsl #16 \n\
40 bic %4, %6, %0, lsl #16 \n\
41 mul %1, %3, %4 \n\
42 mul %4, %2, %4 \n\
43 mul %3, %0, %3 \n\
44 mul %0, %2, %0 \n\
45 adds %3, %4, %3 \n\
46 addcs %0, %0, #65536 \n\
47 adds %1, %1, %3, lsl #16 \n\
48 adc %0, %0, %3, lsr #16" \
49 : "=&r" ((u32) (xh)), \
50 "=r" ((u32) (xl)), \
51 "=&r" (__t0), "=&r" (__t1), "=r" (__t2) \
52 : "r" ((u32) (a)), \
53 "r" ((u32) (b)));}
54
55#define __umulsidi3(u, v) \
56 ({DIunion __w; \
57 umul_ppmm (__w.s.high, __w.s.low, u, v); \
58 __w.ll; })
59
60s64 __muldi3(s64 u, s64 v)
61{
62 DIunion w;
63 DIunion uu, vv;
64
65 uu.ll = u, vv.ll = v;
66
67 w.ll = __umulsidi3(uu.s.low, vv.s.low);
68 w.s.high += ((u32) uu.s.low * (u32) vv.s.high
69 + (u32) uu.s.high * (u32) vv.s.low);
70
71 return w.ll;
72}
diff --git a/arch/arm/lib/ucmpdi2.S b/arch/arm/lib/ucmpdi2.S
new file mode 100644
index 000000000000..112630f93e5d
--- /dev/null
+++ b/arch/arm/lib/ucmpdi2.S
@@ -0,0 +1,35 @@
1/*
2 * linux/arch/arm/lib/ucmpdi2.S
3 *
4 * Author: Nicolas Pitre
5 * Created: Oct 19, 2005
6 * Copyright: Monta Vista Software, Inc.
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12
13#include <linux/linkage.h>
14
15#ifdef __ARMEB__
16#define xh r0
17#define xl r1
18#define yh r2
19#define yl r3
20#else
21#define xl r0
22#define xh r1
23#define yl r2
24#define yh r3
25#endif
26
27ENTRY(__ucmpdi2)
28
29 cmp xh, yh
30 cmpeq xl, yl
31 movlo r0, #0
32 moveq r0, #1
33 movhi r0, #2
34 mov pc, lr
35
diff --git a/arch/arm/lib/ucmpdi2.c b/arch/arm/lib/ucmpdi2.c
deleted file mode 100644
index 57f3f2df3850..000000000000
--- a/arch/arm/lib/ucmpdi2.c
+++ /dev/null
@@ -1,49 +0,0 @@
1/* More subroutines needed by GCC output code on some machines. */
2/* Compile this one with gcc. */
3/* Copyright (C) 1989, 92-98, 1999 Free Software Foundation, Inc.
4
5This file is part of GNU CC.
6
7GNU CC is free software; you can redistribute it and/or modify
8it under the terms of the GNU General Public License as published by
9the Free Software Foundation; either version 2, or (at your option)
10any later version.
11
12GNU CC is distributed in the hope that it will be useful,
13but WITHOUT ANY WARRANTY; without even the implied warranty of
14MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15GNU General Public License for more details.
16
17You should have received a copy of the GNU General Public License
18along with GNU CC; see the file COPYING. If not, write to
19the Free Software Foundation, 59 Temple Place - Suite 330,
20Boston, MA 02111-1307, USA. */
21
22/* As a special exception, if you link this library with other files,
23 some of which are compiled with GCC, to produce an executable,
24 this library does not by itself cause the resulting executable
25 to be covered by the GNU General Public License.
26 This exception does not however invalidate any other reasons why
27 the executable file might be covered by the GNU General Public License.
28 */
29/* support functions required by the kernel. based on code from gcc-2.95.3 */
30/* I Molton 29/07/01 */
31
32#include "gcclib.h"
33
34int __ucmpdi2(s64 a, s64 b)
35{
36 DIunion au, bu;
37
38 au.ll = a, bu.ll = b;
39
40 if ((u32) au.s.high < (u32) bu.s.high)
41 return 0;
42 else if ((u32) au.s.high > (u32) bu.s.high)
43 return 2;
44 if ((u32) au.s.low < (u32) bu.s.low)
45 return 0;
46 else if ((u32) au.s.low > (u32) bu.s.low)
47 return 2;
48 return 1;
49}
diff --git a/arch/arm/mach-imx/generic.c b/arch/arm/mach-imx/generic.c
index cb14b0682cef..837d7f0bda4c 100644
--- a/arch/arm/mach-imx/generic.c
+++ b/arch/arm/mach-imx/generic.c
@@ -26,6 +26,8 @@
26#include <linux/init.h> 26#include <linux/init.h>
27#include <linux/kernel.h> 27#include <linux/kernel.h>
28#include <linux/module.h> 28#include <linux/module.h>
29#include <linux/string.h>
30
29#include <asm/arch/imxfb.h> 31#include <asm/arch/imxfb.h>
30#include <asm/hardware.h> 32#include <asm/hardware.h>
31#include <asm/arch/imx-regs.h> 33#include <asm/arch/imx-regs.h>
diff --git a/arch/arm/mach-integrator/clock.c b/arch/arm/mach-integrator/clock.c
index 56200594db3c..73c360685cad 100644
--- a/arch/arm/mach-integrator/clock.c
+++ b/arch/arm/mach-integrator/clock.c
@@ -13,6 +13,7 @@
13#include <linux/list.h> 13#include <linux/list.h>
14#include <linux/errno.h> 14#include <linux/errno.h>
15#include <linux/err.h> 15#include <linux/err.h>
16#include <linux/string.h>
16 17
17#include <asm/semaphore.h> 18#include <asm/semaphore.h>
18#include <asm/hardware/clock.h> 19#include <asm/hardware/clock.h>
diff --git a/arch/arm/mach-integrator/integrator_ap.c b/arch/arm/mach-integrator/integrator_ap.c
index f368b85f0447..764ceb49470a 100644
--- a/arch/arm/mach-integrator/integrator_ap.c
+++ b/arch/arm/mach-integrator/integrator_ap.c
@@ -30,6 +30,7 @@
30#include <asm/io.h> 30#include <asm/io.h>
31#include <asm/irq.h> 31#include <asm/irq.h>
32#include <asm/setup.h> 32#include <asm/setup.h>
33#include <asm/param.h> /* HZ */
33#include <asm/mach-types.h> 34#include <asm/mach-types.h>
34#include <asm/hardware/amba.h> 35#include <asm/hardware/amba.h>
35#include <asm/hardware/amba_kmi.h> 36#include <asm/hardware/amba_kmi.h>
diff --git a/arch/arm/mach-integrator/lm.c b/arch/arm/mach-integrator/lm.c
index c5f19d160598..5b41e3a724e1 100644
--- a/arch/arm/mach-integrator/lm.c
+++ b/arch/arm/mach-integrator/lm.c
@@ -10,6 +10,7 @@
10#include <linux/module.h> 10#include <linux/module.h>
11#include <linux/init.h> 11#include <linux/init.h>
12#include <linux/device.h> 12#include <linux/device.h>
13#include <linux/slab.h>
13 14
14#include <asm/arch/lm.h> 15#include <asm/arch/lm.h>
15 16
diff --git a/arch/arm/mach-iop3xx/iq31244-pci.c b/arch/arm/mach-iop3xx/iq31244-pci.c
index f997daa800bf..c6a973ba8fc6 100644
--- a/arch/arm/mach-iop3xx/iq31244-pci.c
+++ b/arch/arm/mach-iop3xx/iq31244-pci.c
@@ -14,6 +14,8 @@
14#include <linux/kernel.h> 14#include <linux/kernel.h>
15#include <linux/pci.h> 15#include <linux/pci.h>
16#include <linux/init.h> 16#include <linux/init.h>
17#include <linux/string.h>
18#include <linux/slab.h>
17 19
18#include <asm/hardware.h> 20#include <asm/hardware.h>
19#include <asm/irq.h> 21#include <asm/irq.h>
diff --git a/arch/arm/mach-iop3xx/iq80321-pci.c b/arch/arm/mach-iop3xx/iq80321-pci.c
index 79fea3d20b66..802f6d091b75 100644
--- a/arch/arm/mach-iop3xx/iq80321-pci.c
+++ b/arch/arm/mach-iop3xx/iq80321-pci.c
@@ -14,6 +14,8 @@
14#include <linux/kernel.h> 14#include <linux/kernel.h>
15#include <linux/pci.h> 15#include <linux/pci.h>
16#include <linux/init.h> 16#include <linux/init.h>
17#include <linux/string.h>
18#include <linux/slab.h>
17 19
18#include <asm/hardware.h> 20#include <asm/hardware.h>
19#include <asm/irq.h> 21#include <asm/irq.h>
diff --git a/arch/arm/mach-iop3xx/iq80331-pci.c b/arch/arm/mach-iop3xx/iq80331-pci.c
index f37a0e26b466..654e450a1311 100644
--- a/arch/arm/mach-iop3xx/iq80331-pci.c
+++ b/arch/arm/mach-iop3xx/iq80331-pci.c
@@ -13,6 +13,8 @@
13#include <linux/kernel.h> 13#include <linux/kernel.h>
14#include <linux/pci.h> 14#include <linux/pci.h>
15#include <linux/init.h> 15#include <linux/init.h>
16#include <linux/string.h>
17#include <linux/slab.h>
16 18
17#include <asm/hardware.h> 19#include <asm/hardware.h>
18#include <asm/irq.h> 20#include <asm/irq.h>
diff --git a/arch/arm/mach-iop3xx/iq80332-pci.c b/arch/arm/mach-iop3xx/iq80332-pci.c
index b9807aa2aade..65951ffe4631 100644
--- a/arch/arm/mach-iop3xx/iq80332-pci.c
+++ b/arch/arm/mach-iop3xx/iq80332-pci.c
@@ -13,6 +13,8 @@
13#include <linux/kernel.h> 13#include <linux/kernel.h>
14#include <linux/pci.h> 14#include <linux/pci.h>
15#include <linux/init.h> 15#include <linux/init.h>
16#include <linux/string.h>
17#include <linux/slab.h>
16 18
17#include <asm/hardware.h> 19#include <asm/hardware.h>
18#include <asm/irq.h> 20#include <asm/irq.h>
diff --git a/arch/arm/mach-pxa/corgi.c b/arch/arm/mach-pxa/corgi.c
index 60c8b9d8bb9c..656f73bbcb5a 100644
--- a/arch/arm/mach-pxa/corgi.c
+++ b/arch/arm/mach-pxa/corgi.c
@@ -33,6 +33,7 @@
33 33
34#include <asm/arch/pxa-regs.h> 34#include <asm/arch/pxa-regs.h>
35#include <asm/arch/irq.h> 35#include <asm/arch/irq.h>
36#include <asm/arch/irda.h>
36#include <asm/arch/mmc.h> 37#include <asm/arch/mmc.h>
37#include <asm/arch/udc.h> 38#include <asm/arch/udc.h>
38#include <asm/arch/corgi.h> 39#include <asm/arch/corgi.h>
@@ -224,6 +225,22 @@ static struct pxamci_platform_data corgi_mci_platform_data = {
224}; 225};
225 226
226 227
228/*
229 * Irda
230 */
231static void corgi_irda_transceiver_mode(struct device *dev, int mode)
232{
233 if (mode & IR_OFF)
234 GPSR(CORGI_GPIO_IR_ON) = GPIO_bit(CORGI_GPIO_IR_ON);
235 else
236 GPCR(CORGI_GPIO_IR_ON) = GPIO_bit(CORGI_GPIO_IR_ON);
237}
238
239static struct pxaficp_platform_data corgi_ficp_platform_data = {
240 .transceiver_cap = IR_SIRMODE | IR_OFF,
241 .transceiver_mode = corgi_irda_transceiver_mode,
242};
243
227 244
228/* 245/*
229 * USB Device Controller 246 * USB Device Controller
@@ -269,10 +286,13 @@ static void __init corgi_init(void)
269 286
270 corgi_ssp_set_machinfo(&corgi_ssp_machinfo); 287 corgi_ssp_set_machinfo(&corgi_ssp_machinfo);
271 288
289 pxa_gpio_mode(CORGI_GPIO_IR_ON | GPIO_OUT);
272 pxa_gpio_mode(CORGI_GPIO_USB_PULLUP | GPIO_OUT); 290 pxa_gpio_mode(CORGI_GPIO_USB_PULLUP | GPIO_OUT);
273 pxa_gpio_mode(CORGI_GPIO_HSYNC | GPIO_IN); 291 pxa_gpio_mode(CORGI_GPIO_HSYNC | GPIO_IN);
292
274 pxa_set_udc_info(&udc_info); 293 pxa_set_udc_info(&udc_info);
275 pxa_set_mci_info(&corgi_mci_platform_data); 294 pxa_set_mci_info(&corgi_mci_platform_data);
295 pxa_set_ficp_info(&corgi_ficp_platform_data);
276 296
277 scoop_num = 1; 297 scoop_num = 1;
278 scoop_devs = &corgi_pcmcia_scoop[0]; 298 scoop_devs = &corgi_pcmcia_scoop[0];
diff --git a/arch/arm/mach-pxa/generic.c b/arch/arm/mach-pxa/generic.c
index 3248bc9b9495..9c0289333301 100644
--- a/arch/arm/mach-pxa/generic.c
+++ b/arch/arm/mach-pxa/generic.c
@@ -23,6 +23,7 @@
23#include <linux/device.h> 23#include <linux/device.h>
24#include <linux/ioport.h> 24#include <linux/ioport.h>
25#include <linux/pm.h> 25#include <linux/pm.h>
26#include <linux/string.h>
26 27
27#include <asm/hardware.h> 28#include <asm/hardware.h>
28#include <asm/irq.h> 29#include <asm/irq.h>
diff --git a/arch/arm/mach-pxa/poodle.c b/arch/arm/mach-pxa/poodle.c
index f25638810017..6d413f6701a7 100644
--- a/arch/arm/mach-pxa/poodle.c
+++ b/arch/arm/mach-pxa/poodle.c
@@ -32,6 +32,7 @@
32#include <asm/arch/irq.h> 32#include <asm/arch/irq.h>
33#include <asm/arch/mmc.h> 33#include <asm/arch/mmc.h>
34#include <asm/arch/udc.h> 34#include <asm/arch/udc.h>
35#include <asm/arch/irda.h>
35#include <asm/arch/poodle.h> 36#include <asm/arch/poodle.h>
36#include <asm/arch/pxafb.h> 37#include <asm/arch/pxafb.h>
37 38
@@ -152,6 +153,24 @@ static struct pxamci_platform_data poodle_mci_platform_data = {
152 153
153 154
154/* 155/*
156 * Irda
157 */
158static void poodle_irda_transceiver_mode(struct device *dev, int mode)
159{
160 if (mode & IR_OFF) {
161 GPSR(POODLE_GPIO_IR_ON) = GPIO_bit(POODLE_GPIO_IR_ON);
162 } else {
163 GPCR(POODLE_GPIO_IR_ON) = GPIO_bit(POODLE_GPIO_IR_ON);
164 }
165}
166
167static struct pxaficp_platform_data poodle_ficp_platform_data = {
168 .transceiver_cap = IR_SIRMODE | IR_OFF,
169 .transceiver_mode = poodle_irda_transceiver_mode,
170};
171
172
173/*
155 * USB Device Controller 174 * USB Device Controller
156 */ 175 */
157static void poodle_udc_command(int cmd) 176static void poodle_udc_command(int cmd)
@@ -244,8 +263,10 @@ static void __init poodle_init(void)
244 263
245 set_pxa_fb_info(&poodle_fb_info); 264 set_pxa_fb_info(&poodle_fb_info);
246 pxa_gpio_mode(POODLE_GPIO_USB_PULLUP | GPIO_OUT); 265 pxa_gpio_mode(POODLE_GPIO_USB_PULLUP | GPIO_OUT);
266 pxa_gpio_mode(POODLE_GPIO_IR_ON | GPIO_OUT);
247 pxa_set_udc_info(&udc_info); 267 pxa_set_udc_info(&udc_info);
248 pxa_set_mci_info(&poodle_mci_platform_data); 268 pxa_set_mci_info(&poodle_mci_platform_data);
269 pxa_set_ficp_info(&poodle_ficp_platform_data);
249 270
250 scoop_num = 1; 271 scoop_num = 1;
251 scoop_devs = &poodle_pcmcia_scoop[0]; 272 scoop_devs = &poodle_pcmcia_scoop[0];
diff --git a/arch/arm/mach-pxa/spitz.c b/arch/arm/mach-pxa/spitz.c
index d0ab428c2d7d..b838842b6a20 100644
--- a/arch/arm/mach-pxa/spitz.c
+++ b/arch/arm/mach-pxa/spitz.c
@@ -34,6 +34,7 @@
34 34
35#include <asm/arch/pxa-regs.h> 35#include <asm/arch/pxa-regs.h>
36#include <asm/arch/irq.h> 36#include <asm/arch/irq.h>
37#include <asm/arch/irda.h>
37#include <asm/arch/mmc.h> 38#include <asm/arch/mmc.h>
38#include <asm/arch/udc.h> 39#include <asm/arch/udc.h>
39#include <asm/arch/pxafb.h> 40#include <asm/arch/pxafb.h>
@@ -277,6 +278,23 @@ static struct pxamci_platform_data spitz_mci_platform_data = {
277 278
278 279
279/* 280/*
281 * Irda
282 */
283static void spitz_irda_transceiver_mode(struct device *dev, int mode)
284{
285 if (mode & IR_OFF)
286 set_scoop_gpio(&spitzscoop2_device.dev, SPITZ_SCP2_IR_ON);
287 else
288 reset_scoop_gpio(&spitzscoop2_device.dev, SPITZ_SCP2_IR_ON);
289}
290
291static struct pxaficp_platform_data spitz_ficp_platform_data = {
292 .transceiver_cap = IR_SIRMODE | IR_OFF,
293 .transceiver_mode = spitz_irda_transceiver_mode,
294};
295
296
297/*
280 * Spitz PXA Framebuffer 298 * Spitz PXA Framebuffer
281 */ 299 */
282static struct pxafb_mach_info spitz_pxafb_info __initdata = { 300static struct pxafb_mach_info spitz_pxafb_info __initdata = {
@@ -326,6 +344,7 @@ static void __init common_init(void)
326 344
327 platform_add_devices(devices, ARRAY_SIZE(devices)); 345 platform_add_devices(devices, ARRAY_SIZE(devices));
328 pxa_set_mci_info(&spitz_mci_platform_data); 346 pxa_set_mci_info(&spitz_mci_platform_data);
347 pxa_set_ficp_info(&spitz_ficp_platform_data);
329 set_pxa_fb_parent(&spitzssp_device.dev); 348 set_pxa_fb_parent(&spitzssp_device.dev);
330 set_pxa_fb_info(&spitz_pxafb_info); 349 set_pxa_fb_info(&spitz_pxafb_info);
331} 350}
diff --git a/arch/arm/mach-sa1100/generic.c b/arch/arm/mach-sa1100/generic.c
index f94b0fbcdcc8..83eba8b54816 100644
--- a/arch/arm/mach-sa1100/generic.c
+++ b/arch/arm/mach-sa1100/generic.c
@@ -17,6 +17,7 @@
17#include <linux/pm.h> 17#include <linux/pm.h>
18#include <linux/cpufreq.h> 18#include <linux/cpufreq.h>
19#include <linux/ioport.h> 19#include <linux/ioport.h>
20#include <linux/sched.h> /* just for sched_clock() - funny that */
20 21
21#include <asm/div64.h> 22#include <asm/div64.h>
22#include <asm/hardware.h> 23#include <asm/hardware.h>
diff --git a/arch/arm/mach-versatile/clock.c b/arch/arm/mach-versatile/clock.c
index 48025c2b9987..b96a2ea15d41 100644
--- a/arch/arm/mach-versatile/clock.c
+++ b/arch/arm/mach-versatile/clock.c
@@ -13,6 +13,7 @@
13#include <linux/list.h> 13#include <linux/list.h>
14#include <linux/errno.h> 14#include <linux/errno.h>
15#include <linux/err.h> 15#include <linux/err.h>
16#include <linux/string.h>
16 17
17#include <asm/semaphore.h> 18#include <asm/semaphore.h>
18#include <asm/hardware/clock.h> 19#include <asm/hardware/clock.h>
diff --git a/arch/arm/mm/copypage-v6.c b/arch/arm/mm/copypage-v6.c
index 27d041574ea7..269ce6913ee9 100644
--- a/arch/arm/mm/copypage-v6.c
+++ b/arch/arm/mm/copypage-v6.c
@@ -22,9 +22,7 @@
22#endif 22#endif
23 23
24#define from_address (0xffff8000) 24#define from_address (0xffff8000)
25#define from_pgprot PAGE_KERNEL
26#define to_address (0xffffc000) 25#define to_address (0xffffc000)
27#define to_pgprot PAGE_KERNEL
28 26
29#define TOP_PTE(x) pte_offset_kernel(top_pmd, x) 27#define TOP_PTE(x) pte_offset_kernel(top_pmd, x)
30 28
@@ -34,7 +32,7 @@ static DEFINE_SPINLOCK(v6_lock);
34 * Copy the user page. No aliasing to deal with so we can just 32 * Copy the user page. No aliasing to deal with so we can just
35 * attack the kernel's existing mapping of these pages. 33 * attack the kernel's existing mapping of these pages.
36 */ 34 */
37void v6_copy_user_page_nonaliasing(void *kto, const void *kfrom, unsigned long vaddr) 35static void v6_copy_user_page_nonaliasing(void *kto, const void *kfrom, unsigned long vaddr)
38{ 36{
39 copy_page(kto, kfrom); 37 copy_page(kto, kfrom);
40} 38}
@@ -43,7 +41,7 @@ void v6_copy_user_page_nonaliasing(void *kto, const void *kfrom, unsigned long v
43 * Clear the user page. No aliasing to deal with so we can just 41 * Clear the user page. No aliasing to deal with so we can just
44 * attack the kernel's existing mapping of this page. 42 * attack the kernel's existing mapping of this page.
45 */ 43 */
46void v6_clear_user_page_nonaliasing(void *kaddr, unsigned long vaddr) 44static void v6_clear_user_page_nonaliasing(void *kaddr, unsigned long vaddr)
47{ 45{
48 clear_page(kaddr); 46 clear_page(kaddr);
49} 47}
@@ -51,7 +49,7 @@ void v6_clear_user_page_nonaliasing(void *kaddr, unsigned long vaddr)
51/* 49/*
52 * Copy the page, taking account of the cache colour. 50 * Copy the page, taking account of the cache colour.
53 */ 51 */
54void v6_copy_user_page_aliasing(void *kto, const void *kfrom, unsigned long vaddr) 52static void v6_copy_user_page_aliasing(void *kto, const void *kfrom, unsigned long vaddr)
55{ 53{
56 unsigned int offset = CACHE_COLOUR(vaddr); 54 unsigned int offset = CACHE_COLOUR(vaddr);
57 unsigned long from, to; 55 unsigned long from, to;
@@ -72,8 +70,8 @@ void v6_copy_user_page_aliasing(void *kto, const void *kfrom, unsigned long vadd
72 */ 70 */
73 spin_lock(&v6_lock); 71 spin_lock(&v6_lock);
74 72
75 set_pte(TOP_PTE(from_address) + offset, pfn_pte(__pa(kfrom) >> PAGE_SHIFT, from_pgprot)); 73 set_pte(TOP_PTE(from_address) + offset, pfn_pte(__pa(kfrom) >> PAGE_SHIFT, PAGE_KERNEL));
76 set_pte(TOP_PTE(to_address) + offset, pfn_pte(__pa(kto) >> PAGE_SHIFT, to_pgprot)); 74 set_pte(TOP_PTE(to_address) + offset, pfn_pte(__pa(kto) >> PAGE_SHIFT, PAGE_KERNEL));
77 75
78 from = from_address + (offset << PAGE_SHIFT); 76 from = from_address + (offset << PAGE_SHIFT);
79 to = to_address + (offset << PAGE_SHIFT); 77 to = to_address + (offset << PAGE_SHIFT);
@@ -91,7 +89,7 @@ void v6_copy_user_page_aliasing(void *kto, const void *kfrom, unsigned long vadd
91 * so remap the kernel page into the same cache colour as the user 89 * so remap the kernel page into the same cache colour as the user
92 * page. 90 * page.
93 */ 91 */
94void v6_clear_user_page_aliasing(void *kaddr, unsigned long vaddr) 92static void v6_clear_user_page_aliasing(void *kaddr, unsigned long vaddr)
95{ 93{
96 unsigned int offset = CACHE_COLOUR(vaddr); 94 unsigned int offset = CACHE_COLOUR(vaddr);
97 unsigned long to = to_address + (offset << PAGE_SHIFT); 95 unsigned long to = to_address + (offset << PAGE_SHIFT);
@@ -112,7 +110,7 @@ void v6_clear_user_page_aliasing(void *kaddr, unsigned long vaddr)
112 */ 110 */
113 spin_lock(&v6_lock); 111 spin_lock(&v6_lock);
114 112
115 set_pte(TOP_PTE(to_address) + offset, pfn_pte(__pa(kaddr) >> PAGE_SHIFT, to_pgprot)); 113 set_pte(TOP_PTE(to_address) + offset, pfn_pte(__pa(kaddr) >> PAGE_SHIFT, PAGE_KERNEL));
116 flush_tlb_kernel_page(to); 114 flush_tlb_kernel_page(to);
117 clear_page((void *)to); 115 clear_page((void *)to);
118 116
diff --git a/arch/arm/plat-omap/clock.c b/arch/arm/plat-omap/clock.c
index 52a58b2da288..a020fe16428f 100644
--- a/arch/arm/plat-omap/clock.c
+++ b/arch/arm/plat-omap/clock.c
@@ -13,6 +13,7 @@
13#include <linux/list.h> 13#include <linux/list.h>
14#include <linux/errno.h> 14#include <linux/errno.h>
15#include <linux/err.h> 15#include <linux/err.h>
16#include <linux/string.h>
16 17
17#include <asm/io.h> 18#include <asm/io.h>
18#include <asm/semaphore.h> 19#include <asm/semaphore.h>
diff --git a/arch/arm26/kernel/ptrace.c b/arch/arm26/kernel/ptrace.c
index 8a52124de0e1..cf7e977d18c8 100644
--- a/arch/arm26/kernel/ptrace.c
+++ b/arch/arm26/kernel/ptrace.c
@@ -665,7 +665,7 @@ static int do_ptrace(int request, struct task_struct *child, long addr, long dat
665 return ret; 665 return ret;
666} 666}
667 667
668asmlinkage int sys_ptrace(long request, long pid, long addr, long data) 668asmlinkage long sys_ptrace(long request, long pid, long addr, long data)
669{ 669{
670 struct task_struct *child; 670 struct task_struct *child;
671 int ret; 671 int ret;
diff --git a/arch/arm26/kernel/time.c b/arch/arm26/kernel/time.c
index e66aedd02fad..335525339ad6 100644
--- a/arch/arm26/kernel/time.c
+++ b/arch/arm26/kernel/time.c
@@ -34,10 +34,6 @@
34#include <asm/irq.h> 34#include <asm/irq.h>
35#include <asm/ioc.h> 35#include <asm/ioc.h>
36 36
37u64 jiffies_64 = INITIAL_JIFFIES;
38
39EXPORT_SYMBOL(jiffies_64);
40
41extern unsigned long wall_jiffies; 37extern unsigned long wall_jiffies;
42 38
43/* this needs a better home */ 39/* this needs a better home */
diff --git a/arch/cris/arch-v10/drivers/axisflashmap.c b/arch/cris/arch-v10/drivers/axisflashmap.c
index 11ab3836aac6..56b038c8d482 100644
--- a/arch/cris/arch-v10/drivers/axisflashmap.c
+++ b/arch/cris/arch-v10/drivers/axisflashmap.c
@@ -140,6 +140,7 @@
140#include <linux/kernel.h> 140#include <linux/kernel.h>
141#include <linux/config.h> 141#include <linux/config.h>
142#include <linux/init.h> 142#include <linux/init.h>
143#include <linux/slab.h>
143 144
144#include <linux/mtd/concat.h> 145#include <linux/mtd/concat.h>
145#include <linux/mtd/map.h> 146#include <linux/mtd/map.h>
diff --git a/arch/cris/arch-v32/drivers/axisflashmap.c b/arch/cris/arch-v32/drivers/axisflashmap.c
index 78ed52b1cdac..b679f983b90a 100644
--- a/arch/cris/arch-v32/drivers/axisflashmap.c
+++ b/arch/cris/arch-v32/drivers/axisflashmap.c
@@ -20,6 +20,7 @@
20#include <linux/kernel.h> 20#include <linux/kernel.h>
21#include <linux/config.h> 21#include <linux/config.h>
22#include <linux/init.h> 22#include <linux/init.h>
23#include <linux/slab.h>
23 24
24#include <linux/mtd/concat.h> 25#include <linux/mtd/concat.h>
25#include <linux/mtd/map.h> 26#include <linux/mtd/map.h>
diff --git a/arch/cris/kernel/time.c b/arch/cris/kernel/time.c
index a2d99b4aedcd..66ba8898db07 100644
--- a/arch/cris/kernel/time.c
+++ b/arch/cris/kernel/time.c
@@ -31,10 +31,7 @@
31#include <linux/timex.h> 31#include <linux/timex.h>
32#include <linux/init.h> 32#include <linux/init.h>
33#include <linux/profile.h> 33#include <linux/profile.h>
34 34#include <linux/sched.h> /* just for sched_clock() - funny that */
35u64 jiffies_64 = INITIAL_JIFFIES;
36
37EXPORT_SYMBOL(jiffies_64);
38 35
39int have_rtc; /* used to remember if we have an RTC or not */; 36int have_rtc; /* used to remember if we have an RTC or not */;
40 37
diff --git a/arch/frv/kernel/ptrace.c b/arch/frv/kernel/ptrace.c
index cbe03cba9f02..cb335a14a315 100644
--- a/arch/frv/kernel/ptrace.c
+++ b/arch/frv/kernel/ptrace.c
@@ -106,7 +106,7 @@ void ptrace_enable(struct task_struct *child)
106 child->thread.frame0->__status |= REG__STATUS_STEP; 106 child->thread.frame0->__status |= REG__STATUS_STEP;
107} 107}
108 108
109asmlinkage int sys_ptrace(long request, long pid, long addr, long data) 109asmlinkage long sys_ptrace(long request, long pid, long addr, long data)
110{ 110{
111 struct task_struct *child; 111 struct task_struct *child;
112 unsigned long tmp; 112 unsigned long tmp;
diff --git a/arch/frv/kernel/time.c b/arch/frv/kernel/time.c
index f43b734482e3..2e9741227b73 100644
--- a/arch/frv/kernel/time.c
+++ b/arch/frv/kernel/time.c
@@ -34,9 +34,6 @@
34 34
35extern unsigned long wall_jiffies; 35extern unsigned long wall_jiffies;
36 36
37u64 jiffies_64 = INITIAL_JIFFIES;
38EXPORT_SYMBOL(jiffies_64);
39
40unsigned long __nongprelbss __clkin_clock_speed_HZ; 37unsigned long __nongprelbss __clkin_clock_speed_HZ;
41unsigned long __nongprelbss __ext_bus_clock_speed_HZ; 38unsigned long __nongprelbss __ext_bus_clock_speed_HZ;
42unsigned long __nongprelbss __res_bus_clock_speed_HZ; 39unsigned long __nongprelbss __res_bus_clock_speed_HZ;
diff --git a/arch/h8300/kernel/ptrace.c b/arch/h8300/kernel/ptrace.c
index 05c15e869777..a569fe4aa284 100644
--- a/arch/h8300/kernel/ptrace.c
+++ b/arch/h8300/kernel/ptrace.c
@@ -57,7 +57,7 @@ void ptrace_disable(struct task_struct *child)
57 h8300_disable_trace(child); 57 h8300_disable_trace(child);
58} 58}
59 59
60asmlinkage int sys_ptrace(long request, long pid, long addr, long data) 60asmlinkage long sys_ptrace(long request, long pid, long addr, long data)
61{ 61{
62 struct task_struct *child; 62 struct task_struct *child;
63 int ret; 63 int ret;
diff --git a/arch/h8300/kernel/time.c b/arch/h8300/kernel/time.c
index af8c5d2057dd..688a5100604c 100644
--- a/arch/h8300/kernel/time.c
+++ b/arch/h8300/kernel/time.c
@@ -32,10 +32,6 @@
32 32
33#define TICK_SIZE (tick_nsec / 1000) 33#define TICK_SIZE (tick_nsec / 1000)
34 34
35u64 jiffies_64;
36
37EXPORT_SYMBOL(jiffies_64);
38
39/* 35/*
40 * timer_interrupt() needs to keep up the real-time clock, 36 * timer_interrupt() needs to keep up the real-time clock,
41 * as well as call the "do_timer()" routine every clocktick 37 * as well as call the "do_timer()" routine every clocktick
diff --git a/arch/i386/Kconfig b/arch/i386/Kconfig
index d2703cda61ea..5383e5e2d9b7 100644
--- a/arch/i386/Kconfig
+++ b/arch/i386/Kconfig
@@ -5,7 +5,7 @@
5 5
6mainmenu "Linux Kernel Configuration" 6mainmenu "Linux Kernel Configuration"
7 7
8config X86 8config X86_32
9 bool 9 bool
10 default y 10 default y
11 help 11 help
@@ -18,6 +18,10 @@ config SEMAPHORE_SLEEPERS
18 bool 18 bool
19 default y 19 default y
20 20
21config X86
22 bool
23 default y
24
21config MMU 25config MMU
22 bool 26 bool
23 default y 27 default y
@@ -151,304 +155,7 @@ config ES7000_CLUSTERED_APIC
151 default y 155 default y
152 depends on SMP && X86_ES7000 && MPENTIUMIII 156 depends on SMP && X86_ES7000 && MPENTIUMIII
153 157
154if !X86_ELAN 158source "arch/i386/Kconfig.cpu"
155
156choice
157 prompt "Processor family"
158 default M686
159
160config M386
161 bool "386"
162 ---help---
163 This is the processor type of your CPU. This information is used for
164 optimizing purposes. In order to compile a kernel that can run on
165 all x86 CPU types (albeit not optimally fast), you can specify
166 "386" here.
167
168 The kernel will not necessarily run on earlier architectures than
169 the one you have chosen, e.g. a Pentium optimized kernel will run on
170 a PPro, but not necessarily on a i486.
171
172 Here are the settings recommended for greatest speed:
173 - "386" for the AMD/Cyrix/Intel 386DX/DXL/SL/SLC/SX, Cyrix/TI
174 486DLC/DLC2, UMC 486SX-S and NexGen Nx586. Only "386" kernels
175 will run on a 386 class machine.
176 - "486" for the AMD/Cyrix/IBM/Intel 486DX/DX2/DX4 or
177 SL/SLC/SLC2/SLC3/SX/SX2 and UMC U5D or U5S.
178 - "586" for generic Pentium CPUs lacking the TSC
179 (time stamp counter) register.
180 - "Pentium-Classic" for the Intel Pentium.
181 - "Pentium-MMX" for the Intel Pentium MMX.
182 - "Pentium-Pro" for the Intel Pentium Pro.
183 - "Pentium-II" for the Intel Pentium II or pre-Coppermine Celeron.
184 - "Pentium-III" for the Intel Pentium III or Coppermine Celeron.
185 - "Pentium-4" for the Intel Pentium 4 or P4-based Celeron.
186 - "K6" for the AMD K6, K6-II and K6-III (aka K6-3D).
187 - "Athlon" for the AMD K7 family (Athlon/Duron/Thunderbird).
188 - "Crusoe" for the Transmeta Crusoe series.
189 - "Efficeon" for the Transmeta Efficeon series.
190 - "Winchip-C6" for original IDT Winchip.
191 - "Winchip-2" for IDT Winchip 2.
192 - "Winchip-2A" for IDT Winchips with 3dNow! capabilities.
193 - "GeodeGX1" for Geode GX1 (Cyrix MediaGX).
194 - "CyrixIII/VIA C3" for VIA Cyrix III or VIA C3.
195 - "VIA C3-2 for VIA C3-2 "Nehemiah" (model 9 and above).
196
197 If you don't know what to do, choose "386".
198
199config M486
200 bool "486"
201 help
202 Select this for a 486 series processor, either Intel or one of the
203 compatible processors from AMD, Cyrix, IBM, or Intel. Includes DX,
204 DX2, and DX4 variants; also SL/SLC/SLC2/SLC3/SX/SX2 and UMC U5D or
205 U5S.
206
207config M586
208 bool "586/K5/5x86/6x86/6x86MX"
209 help
210 Select this for an 586 or 686 series processor such as the AMD K5,
211 the Cyrix 5x86, 6x86 and 6x86MX. This choice does not
212 assume the RDTSC (Read Time Stamp Counter) instruction.
213
214config M586TSC
215 bool "Pentium-Classic"
216 help
217 Select this for a Pentium Classic processor with the RDTSC (Read
218 Time Stamp Counter) instruction for benchmarking.
219
220config M586MMX
221 bool "Pentium-MMX"
222 help
223 Select this for a Pentium with the MMX graphics/multimedia
224 extended instructions.
225
226config M686
227 bool "Pentium-Pro"
228 help
229 Select this for Intel Pentium Pro chips. This enables the use of
230 Pentium Pro extended instructions, and disables the init-time guard
231 against the f00f bug found in earlier Pentiums.
232
233config MPENTIUMII
234 bool "Pentium-II/Celeron(pre-Coppermine)"
235 help
236 Select this for Intel chips based on the Pentium-II and
237 pre-Coppermine Celeron core. This option enables an unaligned
238 copy optimization, compiles the kernel with optimization flags
239 tailored for the chip, and applies any applicable Pentium Pro
240 optimizations.
241
242config MPENTIUMIII
243 bool "Pentium-III/Celeron(Coppermine)/Pentium-III Xeon"
244 help
245 Select this for Intel chips based on the Pentium-III and
246 Celeron-Coppermine core. This option enables use of some
247 extended prefetch instructions in addition to the Pentium II
248 extensions.
249
250config MPENTIUMM
251 bool "Pentium M"
252 help
253 Select this for Intel Pentium M (not Pentium-4 M)
254 notebook chips.
255
256config MPENTIUM4
257 bool "Pentium-4/Celeron(P4-based)/Pentium-4 M/Xeon"
258 help
259 Select this for Intel Pentium 4 chips. This includes the
260 Pentium 4, P4-based Celeron and Xeon, and Pentium-4 M
261 (not Pentium M) chips. This option enables compile flags
262 optimized for the chip, uses the correct cache shift, and
263 applies any applicable Pentium III optimizations.
264
265config MK6
266 bool "K6/K6-II/K6-III"
267 help
268 Select this for an AMD K6-family processor. Enables use of
269 some extended instructions, and passes appropriate optimization
270 flags to GCC.
271
272config MK7
273 bool "Athlon/Duron/K7"
274 help
275 Select this for an AMD Athlon K7-family processor. Enables use of
276 some extended instructions, and passes appropriate optimization
277 flags to GCC.
278
279config MK8
280 bool "Opteron/Athlon64/Hammer/K8"
281 help
282 Select this for an AMD Opteron or Athlon64 Hammer-family processor. Enables
283 use of some extended instructions, and passes appropriate optimization
284 flags to GCC.
285
286config MCRUSOE
287 bool "Crusoe"
288 help
289 Select this for a Transmeta Crusoe processor. Treats the processor
290 like a 586 with TSC, and sets some GCC optimization flags (like a
291 Pentium Pro with no alignment requirements).
292
293config MEFFICEON
294 bool "Efficeon"
295 help
296 Select this for a Transmeta Efficeon processor.
297
298config MWINCHIPC6
299 bool "Winchip-C6"
300 help
301 Select this for an IDT Winchip C6 chip. Linux and GCC
302 treat this chip as a 586TSC with some extended instructions
303 and alignment requirements.
304
305config MWINCHIP2
306 bool "Winchip-2"
307 help
308 Select this for an IDT Winchip-2. Linux and GCC
309 treat this chip as a 586TSC with some extended instructions
310 and alignment requirements.
311
312config MWINCHIP3D
313 bool "Winchip-2A/Winchip-3"
314 help
315 Select this for an IDT Winchip-2A or 3. Linux and GCC
316 treat this chip as a 586TSC with some extended instructions
317 and alignment reqirements. Also enable out of order memory
318 stores for this CPU, which can increase performance of some
319 operations.
320
321config MGEODEGX1
322 bool "GeodeGX1"
323 help
324 Select this for a Geode GX1 (Cyrix MediaGX) chip.
325
326config MCYRIXIII
327 bool "CyrixIII/VIA-C3"
328 help
329 Select this for a Cyrix III or C3 chip. Presently Linux and GCC
330 treat this chip as a generic 586. Whilst the CPU is 686 class,
331 it lacks the cmov extension which gcc assumes is present when
332 generating 686 code.
333 Note that Nehemiah (Model 9) and above will not boot with this
334 kernel due to them lacking the 3DNow! instructions used in earlier
335 incarnations of the CPU.
336
337config MVIAC3_2
338 bool "VIA C3-2 (Nehemiah)"
339 help
340 Select this for a VIA C3 "Nehemiah". Selecting this enables usage
341 of SSE and tells gcc to treat the CPU as a 686.
342 Note, this kernel will not boot on older (pre model 9) C3s.
343
344endchoice
345
346config X86_GENERIC
347 bool "Generic x86 support"
348 help
349 Instead of just including optimizations for the selected
350 x86 variant (e.g. PII, Crusoe or Athlon), include some more
351 generic optimizations as well. This will make the kernel
352 perform better on x86 CPUs other than that selected.
353
354 This is really intended for distributors who need more
355 generic optimizations.
356
357endif
358
359#
360# Define implied options from the CPU selection here
361#
362config X86_CMPXCHG
363 bool
364 depends on !M386
365 default y
366
367config X86_XADD
368 bool
369 depends on !M386
370 default y
371
372config X86_L1_CACHE_SHIFT
373 int
374 default "7" if MPENTIUM4 || X86_GENERIC
375 default "4" if X86_ELAN || M486 || M386
376 default "5" if MWINCHIP3D || MWINCHIP2 || MWINCHIPC6 || MCRUSOE || MEFFICEON || MCYRIXIII || MK6 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || MVIAC3_2 || MGEODEGX1
377 default "6" if MK7 || MK8 || MPENTIUMM
378
379config RWSEM_GENERIC_SPINLOCK
380 bool
381 depends on M386
382 default y
383
384config RWSEM_XCHGADD_ALGORITHM
385 bool
386 depends on !M386
387 default y
388
389config GENERIC_CALIBRATE_DELAY
390 bool
391 default y
392
393config X86_PPRO_FENCE
394 bool
395 depends on M686 || M586MMX || M586TSC || M586 || M486 || M386 || MGEODEGX1
396 default y
397
398config X86_F00F_BUG
399 bool
400 depends on M586MMX || M586TSC || M586 || M486 || M386
401 default y
402
403config X86_WP_WORKS_OK
404 bool
405 depends on !M386
406 default y
407
408config X86_INVLPG
409 bool
410 depends on !M386
411 default y
412
413config X86_BSWAP
414 bool
415 depends on !M386
416 default y
417
418config X86_POPAD_OK
419 bool
420 depends on !M386
421 default y
422
423config X86_ALIGNMENT_16
424 bool
425 depends on MWINCHIP3D || MWINCHIP2 || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK6 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
426 default y
427
428config X86_GOOD_APIC
429 bool
430 depends on MK7 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || MK8 || MEFFICEON
431 default y
432
433config X86_INTEL_USERCOPY
434 bool
435 depends on MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M586MMX || X86_GENERIC || MK8 || MK7 || MEFFICEON
436 default y
437
438config X86_USE_PPRO_CHECKSUM
439 bool
440 depends on MWINCHIP3D || MWINCHIP2 || MWINCHIPC6 || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MK8 || MVIAC3_2 || MEFFICEON
441 default y
442
443config X86_USE_3DNOW
444 bool
445 depends on MCYRIXIII || MK7
446 default y
447
448config X86_OOSTORE
449 bool
450 depends on (MWINCHIP3D || MWINCHIP2 || MWINCHIPC6) && MTRR
451 default y
452 159
453config HPET_TIMER 160config HPET_TIMER
454 bool "HPET Timer Support" 161 bool "HPET Timer Support"
@@ -561,11 +268,6 @@ config X86_VISWS_APIC
561 depends on X86_VISWS 268 depends on X86_VISWS
562 default y 269 default y
563 270
564config X86_TSC
565 bool
566 depends on (MWINCHIP3D || MWINCHIP2 || MCRUSOE || MEFFICEON || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || MK8 || MVIAC3_2 || MGEODEGX1) && !X86_NUMAQ
567 default y
568
569config X86_MCE 271config X86_MCE
570 bool "Machine Check Exception" 272 bool "Machine Check Exception"
571 depends on !X86_VOYAGER 273 depends on !X86_VOYAGER
diff --git a/arch/i386/Kconfig.cpu b/arch/i386/Kconfig.cpu
new file mode 100644
index 000000000000..53bbb3c008ee
--- /dev/null
+++ b/arch/i386/Kconfig.cpu
@@ -0,0 +1,309 @@
1# Put here option for CPU selection and depending optimization
2if !X86_ELAN
3
4choice
5 prompt "Processor family"
6 default M686
7
8config M386
9 bool "386"
10 ---help---
11 This is the processor type of your CPU. This information is used for
12 optimizing purposes. In order to compile a kernel that can run on
13 all x86 CPU types (albeit not optimally fast), you can specify
14 "386" here.
15
16 The kernel will not necessarily run on earlier architectures than
17 the one you have chosen, e.g. a Pentium optimized kernel will run on
18 a PPro, but not necessarily on a i486.
19
20 Here are the settings recommended for greatest speed:
21 - "386" for the AMD/Cyrix/Intel 386DX/DXL/SL/SLC/SX, Cyrix/TI
22 486DLC/DLC2, UMC 486SX-S and NexGen Nx586. Only "386" kernels
23 will run on a 386 class machine.
24 - "486" for the AMD/Cyrix/IBM/Intel 486DX/DX2/DX4 or
25 SL/SLC/SLC2/SLC3/SX/SX2 and UMC U5D or U5S.
26 - "586" for generic Pentium CPUs lacking the TSC
27 (time stamp counter) register.
28 - "Pentium-Classic" for the Intel Pentium.
29 - "Pentium-MMX" for the Intel Pentium MMX.
30 - "Pentium-Pro" for the Intel Pentium Pro.
31 - "Pentium-II" for the Intel Pentium II or pre-Coppermine Celeron.
32 - "Pentium-III" for the Intel Pentium III or Coppermine Celeron.
33 - "Pentium-4" for the Intel Pentium 4 or P4-based Celeron.
34 - "K6" for the AMD K6, K6-II and K6-III (aka K6-3D).
35 - "Athlon" for the AMD K7 family (Athlon/Duron/Thunderbird).
36 - "Crusoe" for the Transmeta Crusoe series.
37 - "Efficeon" for the Transmeta Efficeon series.
38 - "Winchip-C6" for original IDT Winchip.
39 - "Winchip-2" for IDT Winchip 2.
40 - "Winchip-2A" for IDT Winchips with 3dNow! capabilities.
41 - "GeodeGX1" for Geode GX1 (Cyrix MediaGX).
42 - "CyrixIII/VIA C3" for VIA Cyrix III or VIA C3.
43 - "VIA C3-2 for VIA C3-2 "Nehemiah" (model 9 and above).
44
45 If you don't know what to do, choose "386".
46
47config M486
48 bool "486"
49 help
50 Select this for a 486 series processor, either Intel or one of the
51 compatible processors from AMD, Cyrix, IBM, or Intel. Includes DX,
52 DX2, and DX4 variants; also SL/SLC/SLC2/SLC3/SX/SX2 and UMC U5D or
53 U5S.
54
55config M586
56 bool "586/K5/5x86/6x86/6x86MX"
57 help
58 Select this for an 586 or 686 series processor such as the AMD K5,
59 the Cyrix 5x86, 6x86 and 6x86MX. This choice does not
60 assume the RDTSC (Read Time Stamp Counter) instruction.
61
62config M586TSC
63 bool "Pentium-Classic"
64 help
65 Select this for a Pentium Classic processor with the RDTSC (Read
66 Time Stamp Counter) instruction for benchmarking.
67
68config M586MMX
69 bool "Pentium-MMX"
70 help
71 Select this for a Pentium with the MMX graphics/multimedia
72 extended instructions.
73
74config M686
75 bool "Pentium-Pro"
76 help
77 Select this for Intel Pentium Pro chips. This enables the use of
78 Pentium Pro extended instructions, and disables the init-time guard
79 against the f00f bug found in earlier Pentiums.
80
81config MPENTIUMII
82 bool "Pentium-II/Celeron(pre-Coppermine)"
83 help
84 Select this for Intel chips based on the Pentium-II and
85 pre-Coppermine Celeron core. This option enables an unaligned
86 copy optimization, compiles the kernel with optimization flags
87 tailored for the chip, and applies any applicable Pentium Pro
88 optimizations.
89
90config MPENTIUMIII
91 bool "Pentium-III/Celeron(Coppermine)/Pentium-III Xeon"
92 help
93 Select this for Intel chips based on the Pentium-III and
94 Celeron-Coppermine core. This option enables use of some
95 extended prefetch instructions in addition to the Pentium II
96 extensions.
97
98config MPENTIUMM
99 bool "Pentium M"
100 help
101 Select this for Intel Pentium M (not Pentium-4 M)
102 notebook chips.
103
104config MPENTIUM4
105 bool "Pentium-4/Celeron(P4-based)/Pentium-4 M/Xeon"
106 help
107 Select this for Intel Pentium 4 chips. This includes the
108 Pentium 4, P4-based Celeron and Xeon, and Pentium-4 M
109 (not Pentium M) chips. This option enables compile flags
110 optimized for the chip, uses the correct cache shift, and
111 applies any applicable Pentium III optimizations.
112
113config MK6
114 bool "K6/K6-II/K6-III"
115 help
116 Select this for an AMD K6-family processor. Enables use of
117 some extended instructions, and passes appropriate optimization
118 flags to GCC.
119
120config MK7
121 bool "Athlon/Duron/K7"
122 help
123 Select this for an AMD Athlon K7-family processor. Enables use of
124 some extended instructions, and passes appropriate optimization
125 flags to GCC.
126
127config MK8
128 bool "Opteron/Athlon64/Hammer/K8"
129 help
130 Select this for an AMD Opteron or Athlon64 Hammer-family processor. Enables
131 use of some extended instructions, and passes appropriate optimization
132 flags to GCC.
133
134config MCRUSOE
135 bool "Crusoe"
136 help
137 Select this for a Transmeta Crusoe processor. Treats the processor
138 like a 586 with TSC, and sets some GCC optimization flags (like a
139 Pentium Pro with no alignment requirements).
140
141config MEFFICEON
142 bool "Efficeon"
143 help
144 Select this for a Transmeta Efficeon processor.
145
146config MWINCHIPC6
147 bool "Winchip-C6"
148 help
149 Select this for an IDT Winchip C6 chip. Linux and GCC
150 treat this chip as a 586TSC with some extended instructions
151 and alignment requirements.
152
153config MWINCHIP2
154 bool "Winchip-2"
155 help
156 Select this for an IDT Winchip-2. Linux and GCC
157 treat this chip as a 586TSC with some extended instructions
158 and alignment requirements.
159
160config MWINCHIP3D
161 bool "Winchip-2A/Winchip-3"
162 help
163 Select this for an IDT Winchip-2A or 3. Linux and GCC
164 treat this chip as a 586TSC with some extended instructions
165 and alignment reqirements. Also enable out of order memory
166 stores for this CPU, which can increase performance of some
167 operations.
168
169config MGEODEGX1
170 bool "GeodeGX1"
171 help
172 Select this for a Geode GX1 (Cyrix MediaGX) chip.
173
174config MCYRIXIII
175 bool "CyrixIII/VIA-C3"
176 help
177 Select this for a Cyrix III or C3 chip. Presently Linux and GCC
178 treat this chip as a generic 586. Whilst the CPU is 686 class,
179 it lacks the cmov extension which gcc assumes is present when
180 generating 686 code.
181 Note that Nehemiah (Model 9) and above will not boot with this
182 kernel due to them lacking the 3DNow! instructions used in earlier
183 incarnations of the CPU.
184
185config MVIAC3_2
186 bool "VIA C3-2 (Nehemiah)"
187 help
188 Select this for a VIA C3 "Nehemiah". Selecting this enables usage
189 of SSE and tells gcc to treat the CPU as a 686.
190 Note, this kernel will not boot on older (pre model 9) C3s.
191
192endchoice
193
194config X86_GENERIC
195 bool "Generic x86 support"
196 help
197 Instead of just including optimizations for the selected
198 x86 variant (e.g. PII, Crusoe or Athlon), include some more
199 generic optimizations as well. This will make the kernel
200 perform better on x86 CPUs other than that selected.
201
202 This is really intended for distributors who need more
203 generic optimizations.
204
205endif
206
207#
208# Define implied options from the CPU selection here
209#
210config X86_CMPXCHG
211 bool
212 depends on !M386
213 default y
214
215config X86_XADD
216 bool
217 depends on !M386
218 default y
219
220config X86_L1_CACHE_SHIFT
221 int
222 default "7" if MPENTIUM4 || X86_GENERIC
223 default "4" if X86_ELAN || M486 || M386
224 default "5" if MWINCHIP3D || MWINCHIP2 || MWINCHIPC6 || MCRUSOE || MEFFICEON || MCYRIXIII || MK6 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || MVIAC3_2 || MGEODEGX1
225 default "6" if MK7 || MK8 || MPENTIUMM
226
227config RWSEM_GENERIC_SPINLOCK
228 bool
229 depends on M386
230 default y
231
232config RWSEM_XCHGADD_ALGORITHM
233 bool
234 depends on !M386
235 default y
236
237config GENERIC_CALIBRATE_DELAY
238 bool
239 default y
240
241config X86_PPRO_FENCE
242 bool
243 depends on M686 || M586MMX || M586TSC || M586 || M486 || M386 || MGEODEGX1
244 default y
245
246config X86_F00F_BUG
247 bool
248 depends on M586MMX || M586TSC || M586 || M486 || M386
249 default y
250
251config X86_WP_WORKS_OK
252 bool
253 depends on !M386
254 default y
255
256config X86_INVLPG
257 bool
258 depends on !M386
259 default y
260
261config X86_BSWAP
262 bool
263 depends on !M386
264 default y
265
266config X86_POPAD_OK
267 bool
268 depends on !M386
269 default y
270
271config X86_CMPXCHG64
272 bool
273 depends on !M386 && !M486
274 default y
275
276config X86_ALIGNMENT_16
277 bool
278 depends on MWINCHIP3D || MWINCHIP2 || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK6 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
279 default y
280
281config X86_GOOD_APIC
282 bool
283 depends on MK7 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || MK8 || MEFFICEON
284 default y
285
286config X86_INTEL_USERCOPY
287 bool
288 depends on MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M586MMX || X86_GENERIC || MK8 || MK7 || MEFFICEON
289 default y
290
291config X86_USE_PPRO_CHECKSUM
292 bool
293 depends on MWINCHIP3D || MWINCHIP2 || MWINCHIPC6 || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MK8 || MVIAC3_2 || MEFFICEON
294 default y
295
296config X86_USE_3DNOW
297 bool
298 depends on MCYRIXIII || MK7
299 default y
300
301config X86_OOSTORE
302 bool
303 depends on (MWINCHIP3D || MWINCHIP2 || MWINCHIPC6) && MTRR
304 default y
305
306config X86_TSC
307 bool
308 depends on (MWINCHIP3D || MWINCHIP2 || MCRUSOE || MEFFICEON || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || MK8 || MVIAC3_2 || MGEODEGX1) && !X86_NUMAQ
309 default y
diff --git a/arch/i386/Makefile b/arch/i386/Makefile
index 09951990a622..d121ea18460f 100644
--- a/arch/i386/Makefile
+++ b/arch/i386/Makefile
@@ -34,35 +34,8 @@ CFLAGS += -pipe -msoft-float
34# prevent gcc from keeping the stack 16 byte aligned 34# prevent gcc from keeping the stack 16 byte aligned
35CFLAGS += $(call cc-option,-mpreferred-stack-boundary=2) 35CFLAGS += $(call cc-option,-mpreferred-stack-boundary=2)
36 36
37align := $(cc-option-align) 37# CPU-specific tuning. Anything which can be shared with UML should go here.
38cflags-$(CONFIG_M386) += -march=i386 38include $(srctree)/arch/i386/Makefile.cpu
39cflags-$(CONFIG_M486) += -march=i486
40cflags-$(CONFIG_M586) += -march=i586
41cflags-$(CONFIG_M586TSC) += -march=i586
42cflags-$(CONFIG_M586MMX) += $(call cc-option,-march=pentium-mmx,-march=i586)
43cflags-$(CONFIG_M686) += -march=i686
44cflags-$(CONFIG_MPENTIUMII) += -march=i686 $(call cc-option,-mtune=pentium2)
45cflags-$(CONFIG_MPENTIUMIII) += -march=i686 $(call cc-option,-mtune=pentium3)
46cflags-$(CONFIG_MPENTIUMM) += -march=i686 $(call cc-option,-mtune=pentium3)
47cflags-$(CONFIG_MPENTIUM4) += -march=i686 $(call cc-option,-mtune=pentium4)
48cflags-$(CONFIG_MK6) += -march=k6
49# Please note, that patches that add -march=athlon-xp and friends are pointless.
50# They make zero difference whatsosever to performance at this time.
51cflags-$(CONFIG_MK7) += $(call cc-option,-march=athlon,-march=i686 $(align)-functions=4)
52cflags-$(CONFIG_MK8) += $(call cc-option,-march=k8,$(call cc-option,-march=athlon,-march=i686 $(align)-functions=4))
53cflags-$(CONFIG_MCRUSOE) += -march=i686 $(align)-functions=0 $(align)-jumps=0 $(align)-loops=0
54cflags-$(CONFIG_MEFFICEON) += -march=i686 $(call cc-option,-mtune=pentium3) $(align)-functions=0 $(align)-jumps=0 $(align)-loops=0
55cflags-$(CONFIG_MWINCHIPC6) += $(call cc-option,-march=winchip-c6,-march=i586)
56cflags-$(CONFIG_MWINCHIP2) += $(call cc-option,-march=winchip2,-march=i586)
57cflags-$(CONFIG_MWINCHIP3D) += $(call cc-option,-march=winchip2,-march=i586)
58cflags-$(CONFIG_MCYRIXIII) += $(call cc-option,-march=c3,-march=i486) $(align)-functions=0 $(align)-jumps=0 $(align)-loops=0
59cflags-$(CONFIG_MVIAC3_2) += $(call cc-option,-march=c3-2,-march=i686)
60
61# AMD Elan support
62cflags-$(CONFIG_X86_ELAN) += -march=i486
63
64# Geode GX1 support
65cflags-$(CONFIG_MGEODEGX1) += $(call cc-option,-march=pentium-mmx,-march=i486)
66 39
67# -mregparm=3 works ok on gcc-3.0 and later 40# -mregparm=3 works ok on gcc-3.0 and later
68# 41#
diff --git a/arch/i386/Makefile.cpu b/arch/i386/Makefile.cpu
new file mode 100644
index 000000000000..8e51456df23d
--- /dev/null
+++ b/arch/i386/Makefile.cpu
@@ -0,0 +1,41 @@
1# CPU tuning section - shared with UML.
2# Must change only cflags-y (or [yn]), not CFLAGS! That makes a difference for UML.
3
4#-mtune exists since gcc 3.4, and some -mcpu flavors didn't exist in gcc 2.95.
5HAS_MTUNE := $(call cc-option-yn, -mtune=i386)
6ifeq ($(HAS_MTUNE),y)
7tune = $(call cc-option,-mtune=$(1),)
8else
9tune = $(call cc-option,-mcpu=$(1),)
10endif
11
12align := $(cc-option-align)
13cflags-$(CONFIG_M386) += -march=i386
14cflags-$(CONFIG_M486) += -march=i486
15cflags-$(CONFIG_M586) += -march=i586
16cflags-$(CONFIG_M586TSC) += -march=i586
17cflags-$(CONFIG_M586MMX) += $(call cc-option,-march=pentium-mmx,-march=i586)
18cflags-$(CONFIG_M686) += -march=i686
19cflags-$(CONFIG_MPENTIUMII) += -march=i686 $(call tune,pentium2)
20cflags-$(CONFIG_MPENTIUMIII) += -march=i686 $(call tune,pentium3)
21cflags-$(CONFIG_MPENTIUMM) += -march=i686 $(call tune,pentium3)
22cflags-$(CONFIG_MPENTIUM4) += -march=i686 $(call tune,pentium4)
23cflags-$(CONFIG_MK6) += -march=k6
24# Please note, that patches that add -march=athlon-xp and friends are pointless.
25# They make zero difference whatsosever to performance at this time.
26cflags-$(CONFIG_MK7) += $(call cc-option,-march=athlon,-march=i686 $(align)-functions=4)
27cflags-$(CONFIG_MK8) += $(call cc-option,-march=k8,$(call cc-option,-march=athlon,-march=i686 $(align)-functions=4))
28cflags-$(CONFIG_MCRUSOE) += -march=i686 $(align)-functions=0 $(align)-jumps=0 $(align)-loops=0
29cflags-$(CONFIG_MEFFICEON) += -march=i686 $(call tune,pentium3) $(align)-functions=0 $(align)-jumps=0 $(align)-loops=0
30cflags-$(CONFIG_MWINCHIPC6) += $(call cc-option,-march=winchip-c6,-march=i586)
31cflags-$(CONFIG_MWINCHIP2) += $(call cc-option,-march=winchip2,-march=i586)
32cflags-$(CONFIG_MWINCHIP3D) += $(call cc-option,-march=winchip2,-march=i586)
33cflags-$(CONFIG_MCYRIXIII) += $(call cc-option,-march=c3,-march=i486) $(align)-functions=0 $(align)-jumps=0 $(align)-loops=0
34cflags-$(CONFIG_MVIAC3_2) += $(call cc-option,-march=c3-2,-march=i686)
35
36# AMD Elan support
37cflags-$(CONFIG_X86_ELAN) += -march=i486
38
39# Geode GX1 support
40cflags-$(CONFIG_MGEODEGX1) += $(call cc-option,-march=pentium-mmx,-march=i486)
41
diff --git a/arch/i386/kernel/apic.c b/arch/i386/kernel/apic.c
index 5546ddebec33..9204be6eedb3 100644
--- a/arch/i386/kernel/apic.c
+++ b/arch/i386/kernel/apic.c
@@ -803,6 +803,7 @@ no_apic:
803 803
804void __init init_apic_mappings(void) 804void __init init_apic_mappings(void)
805{ 805{
806 unsigned int orig_apicid;
806 unsigned long apic_phys; 807 unsigned long apic_phys;
807 808
808 /* 809 /*
@@ -824,8 +825,11 @@ void __init init_apic_mappings(void)
824 * Fetch the APIC ID of the BSP in case we have a 825 * Fetch the APIC ID of the BSP in case we have a
825 * default configuration (or the MP table is broken). 826 * default configuration (or the MP table is broken).
826 */ 827 */
827 if (boot_cpu_physical_apicid == -1U) 828 orig_apicid = boot_cpu_physical_apicid;
828 boot_cpu_physical_apicid = GET_APIC_ID(apic_read(APIC_ID)); 829 boot_cpu_physical_apicid = GET_APIC_ID(apic_read(APIC_ID));
830 if ((orig_apicid != -1U) && (orig_apicid != boot_cpu_physical_apicid))
831 printk(KERN_WARNING "Boot APIC ID in local APIC unexpected (%d vs %d)",
832 orig_apicid, boot_cpu_physical_apicid);
829 833
830#ifdef CONFIG_X86_IO_APIC 834#ifdef CONFIG_X86_IO_APIC
831 { 835 {
@@ -1046,10 +1050,11 @@ static unsigned int calibration_result;
1046 1050
1047void __init setup_boot_APIC_clock(void) 1051void __init setup_boot_APIC_clock(void)
1048{ 1052{
1053 unsigned long flags;
1049 apic_printk(APIC_VERBOSE, "Using local APIC timer interrupts.\n"); 1054 apic_printk(APIC_VERBOSE, "Using local APIC timer interrupts.\n");
1050 using_apic_timer = 1; 1055 using_apic_timer = 1;
1051 1056
1052 local_irq_disable(); 1057 local_irq_save(flags);
1053 1058
1054 calibration_result = calibrate_APIC_clock(); 1059 calibration_result = calibrate_APIC_clock();
1055 /* 1060 /*
@@ -1057,7 +1062,7 @@ void __init setup_boot_APIC_clock(void)
1057 */ 1062 */
1058 setup_APIC_timer(calibration_result); 1063 setup_APIC_timer(calibration_result);
1059 1064
1060 local_irq_enable(); 1065 local_irq_restore(flags);
1061} 1066}
1062 1067
1063void __devinit setup_secondary_APIC_clock(void) 1068void __devinit setup_secondary_APIC_clock(void)
@@ -1254,40 +1259,81 @@ fastcall void smp_error_interrupt(struct pt_regs *regs)
1254} 1259}
1255 1260
1256/* 1261/*
1257 * This initializes the IO-APIC and APIC hardware if this is 1262 * This initializes the IO-APIC and APIC hardware.
1258 * a UP kernel.
1259 */ 1263 */
1260int __init APIC_init_uniprocessor (void) 1264int __init APIC_init(void)
1261{ 1265{
1262 if (enable_local_apic < 0) 1266 if (enable_local_apic < 0) {
1263 clear_bit(X86_FEATURE_APIC, boot_cpu_data.x86_capability); 1267 printk(KERN_INFO "APIC disabled\n");
1268 return -1;
1269 }
1264 1270
1265 if (!smp_found_config && !cpu_has_apic) 1271 /* See if we have a SMP configuration or have forced enabled
1272 * the local apic.
1273 */
1274 if (!smp_found_config && !acpi_lapic && !cpu_has_apic) {
1275 enable_local_apic = -1;
1266 return -1; 1276 return -1;
1277 }
1267 1278
1268 /* 1279 /*
1269 * Complain if the BIOS pretends there is one. 1280 * Complain if the BIOS pretends there is an apic.
1281 * Then get out because we don't have an a local apic.
1270 */ 1282 */
1271 if (!cpu_has_apic && APIC_INTEGRATED(apic_version[boot_cpu_physical_apicid])) { 1283 if (!cpu_has_apic && APIC_INTEGRATED(apic_version[boot_cpu_physical_apicid])) {
1272 printk(KERN_ERR "BIOS bug, local APIC #%d not detected!...\n", 1284 printk(KERN_ERR "BIOS bug, local APIC #%d not detected!...\n",
1273 boot_cpu_physical_apicid); 1285 boot_cpu_physical_apicid);
1286 printk(KERN_ERR "... forcing use of dummy APIC emulation. (tell your hw vendor)\n");
1287 enable_local_apic = -1;
1274 return -1; 1288 return -1;
1275 } 1289 }
1276 1290
1277 verify_local_APIC(); 1291 verify_local_APIC();
1278 1292
1293 /*
1294 * Should not be necessary because the MP table should list the boot
1295 * CPU too, but we do it for the sake of robustness anyway.
1296 * Makes no sense to do this check in clustered apic mode, so skip it
1297 */
1298 if (!check_phys_apicid_present(boot_cpu_physical_apicid)) {
1299 printk("weird, boot CPU (#%d) not listed by the BIOS.\n",
1300 boot_cpu_physical_apicid);
1301 physid_set(boot_cpu_physical_apicid, phys_cpu_present_map);
1302 }
1303
1304 /*
1305 * Switch from PIC to APIC mode.
1306 */
1279 connect_bsp_APIC(); 1307 connect_bsp_APIC();
1308 setup_local_APIC();
1280 1309
1281 phys_cpu_present_map = physid_mask_of_physid(boot_cpu_physical_apicid); 1310#ifdef CONFIG_X86_IO_APIC
1311 /*
1312 * Now start the IO-APICs
1313 */
1314 if (smp_found_config && !skip_ioapic_setup && nr_ioapics)
1315 setup_IO_APIC();
1316#endif
1317 return 0;
1318}
1282 1319
1283 setup_local_APIC(); 1320void __init APIC_late_time_init(void)
1321{
1322 /* Improve our loops per jiffy estimate */
1323 loops_per_jiffy = ((1000 + HZ - 1)/HZ)*cpu_khz;
1324 boot_cpu_data.loops_per_jiffy = loops_per_jiffy;
1325 cpu_data[0].loops_per_jiffy = loops_per_jiffy;
1326
1327 /* setup_apic_nmi_watchdog doesn't work properly before cpu_khz is
1328 * initialized. So redo it here to ensure the boot cpu is setup
1329 * properly.
1330 */
1331 if (nmi_watchdog == NMI_LOCAL_APIC)
1332 setup_apic_nmi_watchdog();
1284 1333
1285#ifdef CONFIG_X86_IO_APIC 1334#ifdef CONFIG_X86_IO_APIC
1286 if (smp_found_config) 1335 if (smp_found_config && !skip_ioapic_setup && nr_ioapics)
1287 if (!skip_ioapic_setup && nr_ioapics) 1336 IO_APIC_late_time_init();
1288 setup_IO_APIC();
1289#endif 1337#endif
1290 setup_boot_APIC_clock(); 1338 setup_boot_APIC_clock();
1291
1292 return 0;
1293} 1339}
diff --git a/arch/i386/kernel/apm.c b/arch/i386/kernel/apm.c
index d7811c4e8b50..d2ef0c2aa93e 100644
--- a/arch/i386/kernel/apm.c
+++ b/arch/i386/kernel/apm.c
@@ -597,12 +597,14 @@ static u8 apm_bios_call(u32 func, u32 ebx_in, u32 ecx_in,
597 cpumask_t cpus; 597 cpumask_t cpus;
598 int cpu; 598 int cpu;
599 struct desc_struct save_desc_40; 599 struct desc_struct save_desc_40;
600 struct desc_struct *gdt;
600 601
601 cpus = apm_save_cpus(); 602 cpus = apm_save_cpus();
602 603
603 cpu = get_cpu(); 604 cpu = get_cpu();
604 save_desc_40 = per_cpu(cpu_gdt_table, cpu)[0x40 / 8]; 605 gdt = get_cpu_gdt_table(cpu);
605 per_cpu(cpu_gdt_table, cpu)[0x40 / 8] = bad_bios_desc; 606 save_desc_40 = gdt[0x40 / 8];
607 gdt[0x40 / 8] = bad_bios_desc;
606 608
607 local_save_flags(flags); 609 local_save_flags(flags);
608 APM_DO_CLI; 610 APM_DO_CLI;
@@ -610,7 +612,7 @@ static u8 apm_bios_call(u32 func, u32 ebx_in, u32 ecx_in,
610 apm_bios_call_asm(func, ebx_in, ecx_in, eax, ebx, ecx, edx, esi); 612 apm_bios_call_asm(func, ebx_in, ecx_in, eax, ebx, ecx, edx, esi);
611 APM_DO_RESTORE_SEGS; 613 APM_DO_RESTORE_SEGS;
612 local_irq_restore(flags); 614 local_irq_restore(flags);
613 per_cpu(cpu_gdt_table, cpu)[0x40 / 8] = save_desc_40; 615 gdt[0x40 / 8] = save_desc_40;
614 put_cpu(); 616 put_cpu();
615 apm_restore_cpus(cpus); 617 apm_restore_cpus(cpus);
616 618
@@ -639,13 +641,14 @@ static u8 apm_bios_call_simple(u32 func, u32 ebx_in, u32 ecx_in, u32 *eax)
639 cpumask_t cpus; 641 cpumask_t cpus;
640 int cpu; 642 int cpu;
641 struct desc_struct save_desc_40; 643 struct desc_struct save_desc_40;
642 644 struct desc_struct *gdt;
643 645
644 cpus = apm_save_cpus(); 646 cpus = apm_save_cpus();
645 647
646 cpu = get_cpu(); 648 cpu = get_cpu();
647 save_desc_40 = per_cpu(cpu_gdt_table, cpu)[0x40 / 8]; 649 gdt = get_cpu_gdt_table(cpu);
648 per_cpu(cpu_gdt_table, cpu)[0x40 / 8] = bad_bios_desc; 650 save_desc_40 = gdt[0x40 / 8];
651 gdt[0x40 / 8] = bad_bios_desc;
649 652
650 local_save_flags(flags); 653 local_save_flags(flags);
651 APM_DO_CLI; 654 APM_DO_CLI;
@@ -653,7 +656,7 @@ static u8 apm_bios_call_simple(u32 func, u32 ebx_in, u32 ecx_in, u32 *eax)
653 error = apm_bios_call_simple_asm(func, ebx_in, ecx_in, eax); 656 error = apm_bios_call_simple_asm(func, ebx_in, ecx_in, eax);
654 APM_DO_RESTORE_SEGS; 657 APM_DO_RESTORE_SEGS;
655 local_irq_restore(flags); 658 local_irq_restore(flags);
656 __get_cpu_var(cpu_gdt_table)[0x40 / 8] = save_desc_40; 659 gdt[0x40 / 8] = save_desc_40;
657 put_cpu(); 660 put_cpu();
658 apm_restore_cpus(cpus); 661 apm_restore_cpus(cpus);
659 return error; 662 return error;
@@ -2295,35 +2298,36 @@ static int __init apm_init(void)
2295 apm_bios_entry.segment = APM_CS; 2298 apm_bios_entry.segment = APM_CS;
2296 2299
2297 for (i = 0; i < NR_CPUS; i++) { 2300 for (i = 0; i < NR_CPUS; i++) {
2298 set_base(per_cpu(cpu_gdt_table, i)[APM_CS >> 3], 2301 struct desc_struct *gdt = get_cpu_gdt_table(i);
2302 set_base(gdt[APM_CS >> 3],
2299 __va((unsigned long)apm_info.bios.cseg << 4)); 2303 __va((unsigned long)apm_info.bios.cseg << 4));
2300 set_base(per_cpu(cpu_gdt_table, i)[APM_CS_16 >> 3], 2304 set_base(gdt[APM_CS_16 >> 3],
2301 __va((unsigned long)apm_info.bios.cseg_16 << 4)); 2305 __va((unsigned long)apm_info.bios.cseg_16 << 4));
2302 set_base(per_cpu(cpu_gdt_table, i)[APM_DS >> 3], 2306 set_base(gdt[APM_DS >> 3],
2303 __va((unsigned long)apm_info.bios.dseg << 4)); 2307 __va((unsigned long)apm_info.bios.dseg << 4));
2304#ifndef APM_RELAX_SEGMENTS 2308#ifndef APM_RELAX_SEGMENTS
2305 if (apm_info.bios.version == 0x100) { 2309 if (apm_info.bios.version == 0x100) {
2306#endif 2310#endif
2307 /* For ASUS motherboard, Award BIOS rev 110 (and others?) */ 2311 /* For ASUS motherboard, Award BIOS rev 110 (and others?) */
2308 _set_limit((char *)&per_cpu(cpu_gdt_table, i)[APM_CS >> 3], 64 * 1024 - 1); 2312 _set_limit((char *)&gdt[APM_CS >> 3], 64 * 1024 - 1);
2309 /* For some unknown machine. */ 2313 /* For some unknown machine. */
2310 _set_limit((char *)&per_cpu(cpu_gdt_table, i)[APM_CS_16 >> 3], 64 * 1024 - 1); 2314 _set_limit((char *)&gdt[APM_CS_16 >> 3], 64 * 1024 - 1);
2311 /* For the DEC Hinote Ultra CT475 (and others?) */ 2315 /* For the DEC Hinote Ultra CT475 (and others?) */
2312 _set_limit((char *)&per_cpu(cpu_gdt_table, i)[APM_DS >> 3], 64 * 1024 - 1); 2316 _set_limit((char *)&gdt[APM_DS >> 3], 64 * 1024 - 1);
2313#ifndef APM_RELAX_SEGMENTS 2317#ifndef APM_RELAX_SEGMENTS
2314 } else { 2318 } else {
2315 _set_limit((char *)&per_cpu(cpu_gdt_table, i)[APM_CS >> 3], 2319 _set_limit((char *)&gdt[APM_CS >> 3],
2316 (apm_info.bios.cseg_len - 1) & 0xffff); 2320 (apm_info.bios.cseg_len - 1) & 0xffff);
2317 _set_limit((char *)&per_cpu(cpu_gdt_table, i)[APM_CS_16 >> 3], 2321 _set_limit((char *)&gdt[APM_CS_16 >> 3],
2318 (apm_info.bios.cseg_16_len - 1) & 0xffff); 2322 (apm_info.bios.cseg_16_len - 1) & 0xffff);
2319 _set_limit((char *)&per_cpu(cpu_gdt_table, i)[APM_DS >> 3], 2323 _set_limit((char *)&gdt[APM_DS >> 3],
2320 (apm_info.bios.dseg_len - 1) & 0xffff); 2324 (apm_info.bios.dseg_len - 1) & 0xffff);
2321 /* workaround for broken BIOSes */ 2325 /* workaround for broken BIOSes */
2322 if (apm_info.bios.cseg_len <= apm_info.bios.offset) 2326 if (apm_info.bios.cseg_len <= apm_info.bios.offset)
2323 _set_limit((char *)&per_cpu(cpu_gdt_table, i)[APM_CS >> 3], 64 * 1024 -1); 2327 _set_limit((char *)&gdt[APM_CS >> 3], 64 * 1024 -1);
2324 if (apm_info.bios.dseg_len <= 0x40) { /* 0x40 * 4kB == 64kB */ 2328 if (apm_info.bios.dseg_len <= 0x40) { /* 0x40 * 4kB == 64kB */
2325 /* for the BIOS that assumes granularity = 1 */ 2329 /* for the BIOS that assumes granularity = 1 */
2326 per_cpu(cpu_gdt_table, i)[APM_DS >> 3].b |= 0x800000; 2330 gdt[APM_DS >> 3].b |= 0x800000;
2327 printk(KERN_NOTICE "apm: we set the granularity of dseg.\n"); 2331 printk(KERN_NOTICE "apm: we set the granularity of dseg.\n");
2328 } 2332 }
2329 } 2333 }
diff --git a/arch/i386/kernel/cpu/common.c b/arch/i386/kernel/cpu/common.c
index 9ad43be9a01f..74145a33cb0f 100644
--- a/arch/i386/kernel/cpu/common.c
+++ b/arch/i386/kernel/cpu/common.c
@@ -573,6 +573,7 @@ void __devinit cpu_init(void)
573 int cpu = smp_processor_id(); 573 int cpu = smp_processor_id();
574 struct tss_struct * t = &per_cpu(init_tss, cpu); 574 struct tss_struct * t = &per_cpu(init_tss, cpu);
575 struct thread_struct *thread = &current->thread; 575 struct thread_struct *thread = &current->thread;
576 struct desc_struct *gdt = get_cpu_gdt_table(cpu);
576 __u32 stk16_off = (__u32)&per_cpu(cpu_16bit_stack, cpu); 577 __u32 stk16_off = (__u32)&per_cpu(cpu_16bit_stack, cpu);
577 578
578 if (cpu_test_and_set(cpu, cpu_initialized)) { 579 if (cpu_test_and_set(cpu, cpu_initialized)) {
@@ -594,24 +595,16 @@ void __devinit cpu_init(void)
594 * Initialize the per-CPU GDT with the boot GDT, 595 * Initialize the per-CPU GDT with the boot GDT,
595 * and set up the GDT descriptor: 596 * and set up the GDT descriptor:
596 */ 597 */
597 memcpy(&per_cpu(cpu_gdt_table, cpu), cpu_gdt_table, 598 memcpy(gdt, cpu_gdt_table, GDT_SIZE);
598 GDT_SIZE);
599 599
600 /* Set up GDT entry for 16bit stack */ 600 /* Set up GDT entry for 16bit stack */
601 *(__u64 *)&(per_cpu(cpu_gdt_table, cpu)[GDT_ENTRY_ESPFIX_SS]) |= 601 *(__u64 *)(&gdt[GDT_ENTRY_ESPFIX_SS]) |=
602 ((((__u64)stk16_off) << 16) & 0x000000ffffff0000ULL) | 602 ((((__u64)stk16_off) << 16) & 0x000000ffffff0000ULL) |
603 ((((__u64)stk16_off) << 32) & 0xff00000000000000ULL) | 603 ((((__u64)stk16_off) << 32) & 0xff00000000000000ULL) |
604 (CPU_16BIT_STACK_SIZE - 1); 604 (CPU_16BIT_STACK_SIZE - 1);
605 605
606 cpu_gdt_descr[cpu].size = GDT_SIZE - 1; 606 cpu_gdt_descr[cpu].size = GDT_SIZE - 1;
607 cpu_gdt_descr[cpu].address = 607 cpu_gdt_descr[cpu].address = (unsigned long)gdt;
608 (unsigned long)&per_cpu(cpu_gdt_table, cpu);
609
610 /*
611 * Set up the per-thread TLS descriptor cache:
612 */
613 memcpy(thread->tls_array, &per_cpu(cpu_gdt_table, cpu),
614 GDT_ENTRY_TLS_ENTRIES * 8);
615 608
616 load_gdt(&cpu_gdt_descr[cpu]); 609 load_gdt(&cpu_gdt_descr[cpu]);
617 load_idt(&idt_descr); 610 load_idt(&idt_descr);
diff --git a/arch/i386/kernel/cpu/cpufreq/acpi-cpufreq.c b/arch/i386/kernel/cpu/cpufreq/acpi-cpufreq.c
index 822c8ce9d1f1..caa9f7711343 100644
--- a/arch/i386/kernel/cpu/cpufreq/acpi-cpufreq.c
+++ b/arch/i386/kernel/cpu/cpufreq/acpi-cpufreq.c
@@ -32,6 +32,7 @@
32#include <linux/proc_fs.h> 32#include <linux/proc_fs.h>
33#include <linux/seq_file.h> 33#include <linux/seq_file.h>
34#include <linux/compiler.h> 34#include <linux/compiler.h>
35#include <linux/sched.h> /* current */
35#include <asm/io.h> 36#include <asm/io.h>
36#include <asm/delay.h> 37#include <asm/delay.h>
37#include <asm/uaccess.h> 38#include <asm/uaccess.h>
diff --git a/arch/i386/kernel/cpu/cpufreq/p4-clockmod.c b/arch/i386/kernel/cpu/cpufreq/p4-clockmod.c
index aa622d52c6e5..270f2188d68b 100644
--- a/arch/i386/kernel/cpu/cpufreq/p4-clockmod.c
+++ b/arch/i386/kernel/cpu/cpufreq/p4-clockmod.c
@@ -28,6 +28,7 @@
28#include <linux/cpufreq.h> 28#include <linux/cpufreq.h>
29#include <linux/slab.h> 29#include <linux/slab.h>
30#include <linux/cpumask.h> 30#include <linux/cpumask.h>
31#include <linux/sched.h> /* current / set_cpus_allowed() */
31 32
32#include <asm/processor.h> 33#include <asm/processor.h>
33#include <asm/msr.h> 34#include <asm/msr.h>
diff --git a/arch/i386/kernel/cpu/cpufreq/powernow-k8.c b/arch/i386/kernel/cpu/cpufreq/powernow-k8.c
index 58ca98fdc2ca..2d5c9adba0cd 100644
--- a/arch/i386/kernel/cpu/cpufreq/powernow-k8.c
+++ b/arch/i386/kernel/cpu/cpufreq/powernow-k8.c
@@ -32,6 +32,7 @@
32#include <linux/slab.h> 32#include <linux/slab.h>
33#include <linux/string.h> 33#include <linux/string.h>
34#include <linux/cpumask.h> 34#include <linux/cpumask.h>
35#include <linux/sched.h> /* for current / set_cpus_allowed() */
35 36
36#include <asm/msr.h> 37#include <asm/msr.h>
37#include <asm/io.h> 38#include <asm/io.h>
diff --git a/arch/i386/kernel/cpu/cpufreq/speedstep-centrino.c b/arch/i386/kernel/cpu/cpufreq/speedstep-centrino.c
index c397b6220430..1465974256c9 100644
--- a/arch/i386/kernel/cpu/cpufreq/speedstep-centrino.c
+++ b/arch/i386/kernel/cpu/cpufreq/speedstep-centrino.c
@@ -22,6 +22,7 @@
22#include <linux/init.h> 22#include <linux/init.h>
23#include <linux/cpufreq.h> 23#include <linux/cpufreq.h>
24#include <linux/config.h> 24#include <linux/config.h>
25#include <linux/sched.h> /* current */
25#include <linux/delay.h> 26#include <linux/delay.h>
26#include <linux/compiler.h> 27#include <linux/compiler.h>
27 28
diff --git a/arch/i386/kernel/cpu/intel_cacheinfo.c b/arch/i386/kernel/cpu/intel_cacheinfo.c
index 9e0d5f83cb9f..4dc42a189ae5 100644
--- a/arch/i386/kernel/cpu/intel_cacheinfo.c
+++ b/arch/i386/kernel/cpu/intel_cacheinfo.c
@@ -3,6 +3,7 @@
3 * 3 *
4 * Changes: 4 * Changes:
5 * Venkatesh Pallipadi : Adding cache identification through cpuid(4) 5 * Venkatesh Pallipadi : Adding cache identification through cpuid(4)
6 * Ashok Raj <ashok.raj@intel.com>: Work with CPU hotplug infrastructure.
6 */ 7 */
7 8
8#include <linux/init.h> 9#include <linux/init.h>
@@ -10,6 +11,7 @@
10#include <linux/device.h> 11#include <linux/device.h>
11#include <linux/compiler.h> 12#include <linux/compiler.h>
12#include <linux/cpu.h> 13#include <linux/cpu.h>
14#include <linux/sched.h>
13 15
14#include <asm/processor.h> 16#include <asm/processor.h>
15#include <asm/smp.h> 17#include <asm/smp.h>
@@ -28,7 +30,7 @@ struct _cache_table
28}; 30};
29 31
30/* all the cache descriptor types we care about (no TLB or trace cache entries) */ 32/* all the cache descriptor types we care about (no TLB or trace cache entries) */
31static struct _cache_table cache_table[] __devinitdata = 33static struct _cache_table cache_table[] __cpuinitdata =
32{ 34{
33 { 0x06, LVL_1_INST, 8 }, /* 4-way set assoc, 32 byte line size */ 35 { 0x06, LVL_1_INST, 8 }, /* 4-way set assoc, 32 byte line size */
34 { 0x08, LVL_1_INST, 16 }, /* 4-way set assoc, 32 byte line size */ 36 { 0x08, LVL_1_INST, 16 }, /* 4-way set assoc, 32 byte line size */
@@ -117,10 +119,9 @@ struct _cpuid4_info {
117 cpumask_t shared_cpu_map; 119 cpumask_t shared_cpu_map;
118}; 120};
119 121
120#define MAX_CACHE_LEAVES 4
121static unsigned short num_cache_leaves; 122static unsigned short num_cache_leaves;
122 123
123static int __devinit cpuid4_cache_lookup(int index, struct _cpuid4_info *this_leaf) 124static int __cpuinit cpuid4_cache_lookup(int index, struct _cpuid4_info *this_leaf)
124{ 125{
125 unsigned int eax, ebx, ecx, edx; 126 unsigned int eax, ebx, ecx, edx;
126 union _cpuid4_leaf_eax cache_eax; 127 union _cpuid4_leaf_eax cache_eax;
@@ -144,23 +145,18 @@ static int __init find_num_cache_leaves(void)
144{ 145{
145 unsigned int eax, ebx, ecx, edx; 146 unsigned int eax, ebx, ecx, edx;
146 union _cpuid4_leaf_eax cache_eax; 147 union _cpuid4_leaf_eax cache_eax;
147 int i; 148 int i = -1;
148 int retval;
149 149
150 retval = MAX_CACHE_LEAVES; 150 do {
151 /* Do cpuid(4) loop to find out num_cache_leaves */ 151 ++i;
152 for (i = 0; i < MAX_CACHE_LEAVES; i++) { 152 /* Do cpuid(4) loop to find out num_cache_leaves */
153 cpuid_count(4, i, &eax, &ebx, &ecx, &edx); 153 cpuid_count(4, i, &eax, &ebx, &ecx, &edx);
154 cache_eax.full = eax; 154 cache_eax.full = eax;
155 if (cache_eax.split.type == CACHE_TYPE_NULL) { 155 } while (cache_eax.split.type != CACHE_TYPE_NULL);
156 retval = i; 156 return i;
157 break;
158 }
159 }
160 return retval;
161} 157}
162 158
163unsigned int __devinit init_intel_cacheinfo(struct cpuinfo_x86 *c) 159unsigned int __cpuinit init_intel_cacheinfo(struct cpuinfo_x86 *c)
164{ 160{
165 unsigned int trace = 0, l1i = 0, l1d = 0, l2 = 0, l3 = 0; /* Cache sizes */ 161 unsigned int trace = 0, l1i = 0, l1d = 0, l2 = 0, l3 = 0; /* Cache sizes */
166 unsigned int new_l1d = 0, new_l1i = 0; /* Cache sizes from cpuid(4) */ 162 unsigned int new_l1d = 0, new_l1i = 0; /* Cache sizes from cpuid(4) */
@@ -284,13 +280,7 @@ unsigned int __devinit init_intel_cacheinfo(struct cpuinfo_x86 *c)
284 if ( l3 ) 280 if ( l3 )
285 printk(KERN_INFO "CPU: L3 cache: %dK\n", l3); 281 printk(KERN_INFO "CPU: L3 cache: %dK\n", l3);
286 282
287 /* 283 c->x86_cache_size = l3 ? l3 : (l2 ? l2 : (l1i+l1d));
288 * This assumes the L3 cache is shared; it typically lives in
289 * the northbridge. The L1 caches are included by the L2
290 * cache, and so should not be included for the purpose of
291 * SMP switching weights.
292 */
293 c->x86_cache_size = l2 ? l2 : (l1i+l1d);
294 } 284 }
295 285
296 return l2; 286 return l2;
@@ -301,7 +291,7 @@ static struct _cpuid4_info *cpuid4_info[NR_CPUS];
301#define CPUID4_INFO_IDX(x,y) (&((cpuid4_info[x])[y])) 291#define CPUID4_INFO_IDX(x,y) (&((cpuid4_info[x])[y]))
302 292
303#ifdef CONFIG_SMP 293#ifdef CONFIG_SMP
304static void __devinit cache_shared_cpu_map_setup(unsigned int cpu, int index) 294static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index)
305{ 295{
306 struct _cpuid4_info *this_leaf; 296 struct _cpuid4_info *this_leaf;
307 unsigned long num_threads_sharing; 297 unsigned long num_threads_sharing;
@@ -334,7 +324,7 @@ static void free_cache_attributes(unsigned int cpu)
334 cpuid4_info[cpu] = NULL; 324 cpuid4_info[cpu] = NULL;
335} 325}
336 326
337static int __devinit detect_cache_attributes(unsigned int cpu) 327static int __cpuinit detect_cache_attributes(unsigned int cpu)
338{ 328{
339 struct _cpuid4_info *this_leaf; 329 struct _cpuid4_info *this_leaf;
340 unsigned long j; 330 unsigned long j;
@@ -511,7 +501,7 @@ static void cpuid4_cache_sysfs_exit(unsigned int cpu)
511 free_cache_attributes(cpu); 501 free_cache_attributes(cpu);
512} 502}
513 503
514static int __devinit cpuid4_cache_sysfs_init(unsigned int cpu) 504static int __cpuinit cpuid4_cache_sysfs_init(unsigned int cpu)
515{ 505{
516 506
517 if (num_cache_leaves == 0) 507 if (num_cache_leaves == 0)
@@ -542,7 +532,7 @@ err_out:
542} 532}
543 533
544/* Add/Remove cache interface for CPU device */ 534/* Add/Remove cache interface for CPU device */
545static int __devinit cache_add_dev(struct sys_device * sys_dev) 535static int __cpuinit cache_add_dev(struct sys_device * sys_dev)
546{ 536{
547 unsigned int cpu = sys_dev->id; 537 unsigned int cpu = sys_dev->id;
548 unsigned long i, j; 538 unsigned long i, j;
@@ -579,7 +569,7 @@ static int __devinit cache_add_dev(struct sys_device * sys_dev)
579 return retval; 569 return retval;
580} 570}
581 571
582static int __devexit cache_remove_dev(struct sys_device * sys_dev) 572static void __cpuexit cache_remove_dev(struct sys_device * sys_dev)
583{ 573{
584 unsigned int cpu = sys_dev->id; 574 unsigned int cpu = sys_dev->id;
585 unsigned long i; 575 unsigned long i;
@@ -588,24 +578,49 @@ static int __devexit cache_remove_dev(struct sys_device * sys_dev)
588 kobject_unregister(&(INDEX_KOBJECT_PTR(cpu,i)->kobj)); 578 kobject_unregister(&(INDEX_KOBJECT_PTR(cpu,i)->kobj));
589 kobject_unregister(cache_kobject[cpu]); 579 kobject_unregister(cache_kobject[cpu]);
590 cpuid4_cache_sysfs_exit(cpu); 580 cpuid4_cache_sysfs_exit(cpu);
591 return 0; 581 return;
582}
583
584static int __cpuinit cacheinfo_cpu_callback(struct notifier_block *nfb,
585 unsigned long action, void *hcpu)
586{
587 unsigned int cpu = (unsigned long)hcpu;
588 struct sys_device *sys_dev;
589
590 sys_dev = get_cpu_sysdev(cpu);
591 switch (action) {
592 case CPU_ONLINE:
593 cache_add_dev(sys_dev);
594 break;
595 case CPU_DEAD:
596 cache_remove_dev(sys_dev);
597 break;
598 }
599 return NOTIFY_OK;
592} 600}
593 601
594static struct sysdev_driver cache_sysdev_driver = { 602static struct notifier_block cacheinfo_cpu_notifier =
595 .add = cache_add_dev, 603{
596 .remove = __devexit_p(cache_remove_dev), 604 .notifier_call = cacheinfo_cpu_callback,
597}; 605};
598 606
599/* Register/Unregister the cpu_cache driver */ 607static int __cpuinit cache_sysfs_init(void)
600static int __devinit cache_register_driver(void)
601{ 608{
609 int i;
610
602 if (num_cache_leaves == 0) 611 if (num_cache_leaves == 0)
603 return 0; 612 return 0;
604 613
605 return sysdev_driver_register(&cpu_sysdev_class,&cache_sysdev_driver); 614 register_cpu_notifier(&cacheinfo_cpu_notifier);
615
616 for_each_online_cpu(i) {
617 cacheinfo_cpu_callback(&cacheinfo_cpu_notifier, CPU_ONLINE,
618 (void *)(long)i);
619 }
620
621 return 0;
606} 622}
607 623
608device_initcall(cache_register_driver); 624device_initcall(cache_sysfs_init);
609 625
610#endif 626#endif
611
diff --git a/arch/i386/kernel/cpu/mcheck/p6.c b/arch/i386/kernel/cpu/mcheck/p6.c
index 3c035b8fa3d9..979b18bc95c1 100644
--- a/arch/i386/kernel/cpu/mcheck/p6.c
+++ b/arch/i386/kernel/cpu/mcheck/p6.c
@@ -102,11 +102,16 @@ void __devinit intel_p6_mcheck_init(struct cpuinfo_x86 *c)
102 wrmsr(MSR_IA32_MCG_CTL, 0xffffffff, 0xffffffff); 102 wrmsr(MSR_IA32_MCG_CTL, 0xffffffff, 0xffffffff);
103 nr_mce_banks = l & 0xff; 103 nr_mce_banks = l & 0xff;
104 104
105 /* Don't enable bank 0 on intel P6 cores, it goes bang quickly. */ 105 /*
106 for (i=1; i<nr_mce_banks; i++) { 106 * Following the example in IA-32 SDM Vol 3:
107 * - MC0_CTL should not be written
108 * - Status registers on all banks should be cleared on reset
109 */
110 for (i=1; i<nr_mce_banks; i++)
107 wrmsr (MSR_IA32_MC0_CTL+4*i, 0xffffffff, 0xffffffff); 111 wrmsr (MSR_IA32_MC0_CTL+4*i, 0xffffffff, 0xffffffff);
112
113 for (i=0; i<nr_mce_banks; i++)
108 wrmsr (MSR_IA32_MC0_STATUS+4*i, 0x0, 0x0); 114 wrmsr (MSR_IA32_MC0_STATUS+4*i, 0x0, 0x0);
109 }
110 115
111 set_in_cr4 (X86_CR4_MCE); 116 set_in_cr4 (X86_CR4_MCE);
112 printk (KERN_INFO "Intel machine check reporting enabled on CPU#%d.\n", 117 printk (KERN_INFO "Intel machine check reporting enabled on CPU#%d.\n",
diff --git a/arch/i386/kernel/cpu/mtrr/if.c b/arch/i386/kernel/cpu/mtrr/if.c
index 1923e0aed26a..cf39e205d33c 100644
--- a/arch/i386/kernel/cpu/mtrr/if.c
+++ b/arch/i386/kernel/cpu/mtrr/if.c
@@ -149,60 +149,89 @@ mtrr_write(struct file *file, const char __user *buf, size_t len, loff_t * ppos)
149 return -EINVAL; 149 return -EINVAL;
150} 150}
151 151
152static int 152static long
153mtrr_ioctl(struct inode *inode, struct file *file, 153mtrr_ioctl(struct file *file, unsigned int cmd, unsigned long __arg)
154 unsigned int cmd, unsigned long __arg)
155{ 154{
156 int err; 155 int err = 0;
157 mtrr_type type; 156 mtrr_type type;
158 struct mtrr_sentry sentry; 157 struct mtrr_sentry sentry;
159 struct mtrr_gentry gentry; 158 struct mtrr_gentry gentry;
160 void __user *arg = (void __user *) __arg; 159 void __user *arg = (void __user *) __arg;
161 160
162 switch (cmd) { 161 switch (cmd) {
162 case MTRRIOC_ADD_ENTRY:
163 case MTRRIOC_SET_ENTRY:
164 case MTRRIOC_DEL_ENTRY:
165 case MTRRIOC_KILL_ENTRY:
166 case MTRRIOC_ADD_PAGE_ENTRY:
167 case MTRRIOC_SET_PAGE_ENTRY:
168 case MTRRIOC_DEL_PAGE_ENTRY:
169 case MTRRIOC_KILL_PAGE_ENTRY:
170 if (copy_from_user(&sentry, arg, sizeof sentry))
171 return -EFAULT;
172 break;
173 case MTRRIOC_GET_ENTRY:
174 case MTRRIOC_GET_PAGE_ENTRY:
175 if (copy_from_user(&gentry, arg, sizeof gentry))
176 return -EFAULT;
177 break;
178#ifdef CONFIG_COMPAT
179 case MTRRIOC32_ADD_ENTRY:
180 case MTRRIOC32_SET_ENTRY:
181 case MTRRIOC32_DEL_ENTRY:
182 case MTRRIOC32_KILL_ENTRY:
183 case MTRRIOC32_ADD_PAGE_ENTRY:
184 case MTRRIOC32_SET_PAGE_ENTRY:
185 case MTRRIOC32_DEL_PAGE_ENTRY:
186 case MTRRIOC32_KILL_PAGE_ENTRY: {
187 struct mtrr_sentry32 __user *s32 = (struct mtrr_sentry32 __user *)__arg;
188 err = get_user(sentry.base, &s32->base);
189 err |= get_user(sentry.size, &s32->size);
190 err |= get_user(sentry.type, &s32->type);
191 if (err)
192 return err;
193 break;
194 }
195 case MTRRIOC32_GET_ENTRY:
196 case MTRRIOC32_GET_PAGE_ENTRY: {
197 struct mtrr_gentry32 __user *g32 = (struct mtrr_gentry32 __user *)__arg;
198 err = get_user(gentry.regnum, &g32->regnum);
199 err |= get_user(gentry.base, &g32->base);
200 err |= get_user(gentry.size, &g32->size);
201 err |= get_user(gentry.type, &g32->type);
202 if (err)
203 return err;
204 break;
205 }
206#endif
207 }
208
209 switch (cmd) {
163 default: 210 default:
164 return -ENOTTY; 211 return -ENOTTY;
165 case MTRRIOC_ADD_ENTRY: 212 case MTRRIOC_ADD_ENTRY:
166 if (!capable(CAP_SYS_ADMIN)) 213 if (!capable(CAP_SYS_ADMIN))
167 return -EPERM; 214 return -EPERM;
168 if (copy_from_user(&sentry, arg, sizeof sentry))
169 return -EFAULT;
170 err = 215 err =
171 mtrr_file_add(sentry.base, sentry.size, sentry.type, 1, 216 mtrr_file_add(sentry.base, sentry.size, sentry.type, 1,
172 file, 0); 217 file, 0);
173 if (err < 0)
174 return err;
175 break; 218 break;
176 case MTRRIOC_SET_ENTRY: 219 case MTRRIOC_SET_ENTRY:
177 if (!capable(CAP_SYS_ADMIN)) 220 if (!capable(CAP_SYS_ADMIN))
178 return -EPERM; 221 return -EPERM;
179 if (copy_from_user(&sentry, arg, sizeof sentry))
180 return -EFAULT;
181 err = mtrr_add(sentry.base, sentry.size, sentry.type, 0); 222 err = mtrr_add(sentry.base, sentry.size, sentry.type, 0);
182 if (err < 0)
183 return err;
184 break; 223 break;
185 case MTRRIOC_DEL_ENTRY: 224 case MTRRIOC_DEL_ENTRY:
186 if (!capable(CAP_SYS_ADMIN)) 225 if (!capable(CAP_SYS_ADMIN))
187 return -EPERM; 226 return -EPERM;
188 if (copy_from_user(&sentry, arg, sizeof sentry))
189 return -EFAULT;
190 err = mtrr_file_del(sentry.base, sentry.size, file, 0); 227 err = mtrr_file_del(sentry.base, sentry.size, file, 0);
191 if (err < 0)
192 return err;
193 break; 228 break;
194 case MTRRIOC_KILL_ENTRY: 229 case MTRRIOC_KILL_ENTRY:
195 if (!capable(CAP_SYS_ADMIN)) 230 if (!capable(CAP_SYS_ADMIN))
196 return -EPERM; 231 return -EPERM;
197 if (copy_from_user(&sentry, arg, sizeof sentry))
198 return -EFAULT;
199 err = mtrr_del(-1, sentry.base, sentry.size); 232 err = mtrr_del(-1, sentry.base, sentry.size);
200 if (err < 0)
201 return err;
202 break; 233 break;
203 case MTRRIOC_GET_ENTRY: 234 case MTRRIOC_GET_ENTRY:
204 if (copy_from_user(&gentry, arg, sizeof gentry))
205 return -EFAULT;
206 if (gentry.regnum >= num_var_ranges) 235 if (gentry.regnum >= num_var_ranges)
207 return -EINVAL; 236 return -EINVAL;
208 mtrr_if->get(gentry.regnum, &gentry.base, &gentry.size, &type); 237 mtrr_if->get(gentry.regnum, &gentry.base, &gentry.size, &type);
@@ -217,60 +246,59 @@ mtrr_ioctl(struct inode *inode, struct file *file,
217 gentry.type = type; 246 gentry.type = type;
218 } 247 }
219 248
220 if (copy_to_user(arg, &gentry, sizeof gentry))
221 return -EFAULT;
222 break; 249 break;
223 case MTRRIOC_ADD_PAGE_ENTRY: 250 case MTRRIOC_ADD_PAGE_ENTRY:
224 if (!capable(CAP_SYS_ADMIN)) 251 if (!capable(CAP_SYS_ADMIN))
225 return -EPERM; 252 return -EPERM;
226 if (copy_from_user(&sentry, arg, sizeof sentry))
227 return -EFAULT;
228 err = 253 err =
229 mtrr_file_add(sentry.base, sentry.size, sentry.type, 1, 254 mtrr_file_add(sentry.base, sentry.size, sentry.type, 1,
230 file, 1); 255 file, 1);
231 if (err < 0)
232 return err;
233 break; 256 break;
234 case MTRRIOC_SET_PAGE_ENTRY: 257 case MTRRIOC_SET_PAGE_ENTRY:
235 if (!capable(CAP_SYS_ADMIN)) 258 if (!capable(CAP_SYS_ADMIN))
236 return -EPERM; 259 return -EPERM;
237 if (copy_from_user(&sentry, arg, sizeof sentry))
238 return -EFAULT;
239 err = mtrr_add_page(sentry.base, sentry.size, sentry.type, 0); 260 err = mtrr_add_page(sentry.base, sentry.size, sentry.type, 0);
240 if (err < 0)
241 return err;
242 break; 261 break;
243 case MTRRIOC_DEL_PAGE_ENTRY: 262 case MTRRIOC_DEL_PAGE_ENTRY:
244 if (!capable(CAP_SYS_ADMIN)) 263 if (!capable(CAP_SYS_ADMIN))
245 return -EPERM; 264 return -EPERM;
246 if (copy_from_user(&sentry, arg, sizeof sentry))
247 return -EFAULT;
248 err = mtrr_file_del(sentry.base, sentry.size, file, 1); 265 err = mtrr_file_del(sentry.base, sentry.size, file, 1);
249 if (err < 0)
250 return err;
251 break; 266 break;
252 case MTRRIOC_KILL_PAGE_ENTRY: 267 case MTRRIOC_KILL_PAGE_ENTRY:
253 if (!capable(CAP_SYS_ADMIN)) 268 if (!capable(CAP_SYS_ADMIN))
254 return -EPERM; 269 return -EPERM;
255 if (copy_from_user(&sentry, arg, sizeof sentry))
256 return -EFAULT;
257 err = mtrr_del_page(-1, sentry.base, sentry.size); 270 err = mtrr_del_page(-1, sentry.base, sentry.size);
258 if (err < 0)
259 return err;
260 break; 271 break;
261 case MTRRIOC_GET_PAGE_ENTRY: 272 case MTRRIOC_GET_PAGE_ENTRY:
262 if (copy_from_user(&gentry, arg, sizeof gentry))
263 return -EFAULT;
264 if (gentry.regnum >= num_var_ranges) 273 if (gentry.regnum >= num_var_ranges)
265 return -EINVAL; 274 return -EINVAL;
266 mtrr_if->get(gentry.regnum, &gentry.base, &gentry.size, &type); 275 mtrr_if->get(gentry.regnum, &gentry.base, &gentry.size, &type);
267 gentry.type = type; 276 gentry.type = type;
277 break;
278 }
279
280 if (err)
281 return err;
268 282
283 switch(cmd) {
284 case MTRRIOC_GET_ENTRY:
285 case MTRRIOC_GET_PAGE_ENTRY:
269 if (copy_to_user(arg, &gentry, sizeof gentry)) 286 if (copy_to_user(arg, &gentry, sizeof gentry))
270 return -EFAULT; 287 err = -EFAULT;
288 break;
289#ifdef CONFIG_COMPAT
290 case MTRRIOC32_GET_ENTRY:
291 case MTRRIOC32_GET_PAGE_ENTRY: {
292 struct mtrr_gentry32 __user *g32 = (struct mtrr_gentry32 __user *)__arg;
293 err = put_user(gentry.base, &g32->base);
294 err |= put_user(gentry.size, &g32->size);
295 err |= put_user(gentry.regnum, &g32->regnum);
296 err |= put_user(gentry.type, &g32->type);
271 break; 297 break;
272 } 298 }
273 return 0; 299#endif
300 }
301 return err;
274} 302}
275 303
276static int 304static int
@@ -310,7 +338,8 @@ static struct file_operations mtrr_fops = {
310 .read = seq_read, 338 .read = seq_read,
311 .llseek = seq_lseek, 339 .llseek = seq_lseek,
312 .write = mtrr_write, 340 .write = mtrr_write,
313 .ioctl = mtrr_ioctl, 341 .unlocked_ioctl = mtrr_ioctl,
342 .compat_ioctl = mtrr_ioctl,
314 .release = mtrr_close, 343 .release = mtrr_close,
315}; 344};
316 345
diff --git a/arch/i386/kernel/cpu/proc.c b/arch/i386/kernel/cpu/proc.c
index 8bd77d948a84..41b871ecf4b3 100644
--- a/arch/i386/kernel/cpu/proc.c
+++ b/arch/i386/kernel/cpu/proc.c
@@ -44,7 +44,7 @@ static int show_cpuinfo(struct seq_file *m, void *v)
44 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, 44 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
45 45
46 /* Intel-defined (#2) */ 46 /* Intel-defined (#2) */
47 "pni", NULL, NULL, "monitor", "ds_cpl", NULL, NULL, "est", 47 "pni", NULL, NULL, "monitor", "ds_cpl", "vmx", NULL, "est",
48 "tm2", NULL, "cid", NULL, NULL, "cx16", "xtpr", NULL, 48 "tm2", NULL, "cid", NULL, NULL, "cx16", "xtpr", NULL,
49 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, 49 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
50 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, 50 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
diff --git a/arch/i386/kernel/crash.c b/arch/i386/kernel/crash.c
index 0248e084017c..af809ccf5fbe 100644
--- a/arch/i386/kernel/crash.c
+++ b/arch/i386/kernel/crash.c
@@ -21,7 +21,6 @@
21#include <asm/hardirq.h> 21#include <asm/hardirq.h>
22#include <asm/nmi.h> 22#include <asm/nmi.h>
23#include <asm/hw_irq.h> 23#include <asm/hw_irq.h>
24#include <asm/apic.h>
25#include <mach_ipi.h> 24#include <mach_ipi.h>
26 25
27 26
@@ -148,7 +147,6 @@ static int crash_nmi_callback(struct pt_regs *regs, int cpu)
148 regs = &fixed_regs; 147 regs = &fixed_regs;
149 } 148 }
150 crash_save_this_cpu(regs, cpu); 149 crash_save_this_cpu(regs, cpu);
151 disable_local_APIC();
152 atomic_dec(&waiting_for_crash_ipi); 150 atomic_dec(&waiting_for_crash_ipi);
153 /* Assume hlt works */ 151 /* Assume hlt works */
154 halt(); 152 halt();
@@ -188,7 +186,6 @@ static void nmi_shootdown_cpus(void)
188 } 186 }
189 187
190 /* Leave the nmi callback set */ 188 /* Leave the nmi callback set */
191 disable_local_APIC();
192} 189}
193#else 190#else
194static void nmi_shootdown_cpus(void) 191static void nmi_shootdown_cpus(void)
@@ -213,9 +210,5 @@ void machine_crash_shutdown(struct pt_regs *regs)
213 /* Make a note of crashing cpu. Will be used in NMI callback.*/ 210 /* Make a note of crashing cpu. Will be used in NMI callback.*/
214 crashing_cpu = smp_processor_id(); 211 crashing_cpu = smp_processor_id();
215 nmi_shootdown_cpus(); 212 nmi_shootdown_cpus();
216 lapic_shutdown();
217#if defined(CONFIG_X86_IO_APIC)
218 disable_IO_APIC();
219#endif
220 crash_save_self(regs); 213 crash_save_self(regs);
221} 214}
diff --git a/arch/i386/kernel/i8259.c b/arch/i386/kernel/i8259.c
index 323ef8ab3244..d86f24909284 100644
--- a/arch/i386/kernel/i8259.c
+++ b/arch/i386/kernel/i8259.c
@@ -435,4 +435,8 @@ void __init init_IRQ(void)
435 setup_irq(FPU_IRQ, &fpu_irq); 435 setup_irq(FPU_IRQ, &fpu_irq);
436 436
437 irq_ctx_init(smp_processor_id()); 437 irq_ctx_init(smp_processor_id());
438
439#ifdef CONFIG_X86_LOCAL_APIC
440 APIC_init();
441#endif
438} 442}
diff --git a/arch/i386/kernel/io_apic.c b/arch/i386/kernel/io_apic.c
index fb3991e8229e..5a77c52b20a9 100644
--- a/arch/i386/kernel/io_apic.c
+++ b/arch/i386/kernel/io_apic.c
@@ -46,6 +46,9 @@
46int (*ioapic_renumber_irq)(int ioapic, int irq); 46int (*ioapic_renumber_irq)(int ioapic, int irq);
47atomic_t irq_mis_count; 47atomic_t irq_mis_count;
48 48
49/* Where if anywhere is the i8259 connect in external int mode */
50static struct { int pin, apic; } ioapic_i8259 = { -1, -1 };
51
49static DEFINE_SPINLOCK(ioapic_lock); 52static DEFINE_SPINLOCK(ioapic_lock);
50 53
51/* 54/*
@@ -738,7 +741,7 @@ static int find_irq_entry(int apic, int pin, int type)
738/* 741/*
739 * Find the pin to which IRQ[irq] (ISA) is connected 742 * Find the pin to which IRQ[irq] (ISA) is connected
740 */ 743 */
741static int find_isa_irq_pin(int irq, int type) 744static int __init find_isa_irq_pin(int irq, int type)
742{ 745{
743 int i; 746 int i;
744 747
@@ -758,6 +761,33 @@ static int find_isa_irq_pin(int irq, int type)
758 return -1; 761 return -1;
759} 762}
760 763
764static int __init find_isa_irq_apic(int irq, int type)
765{
766 int i;
767
768 for (i = 0; i < mp_irq_entries; i++) {
769 int lbus = mp_irqs[i].mpc_srcbus;
770
771 if ((mp_bus_id_to_type[lbus] == MP_BUS_ISA ||
772 mp_bus_id_to_type[lbus] == MP_BUS_EISA ||
773 mp_bus_id_to_type[lbus] == MP_BUS_MCA ||
774 mp_bus_id_to_type[lbus] == MP_BUS_NEC98
775 ) &&
776 (mp_irqs[i].mpc_irqtype == type) &&
777 (mp_irqs[i].mpc_srcbusirq == irq))
778 break;
779 }
780 if (i < mp_irq_entries) {
781 int apic;
782 for(apic = 0; apic < nr_ioapics; apic++) {
783 if (mp_ioapics[apic].mpc_apicid == mp_irqs[i].mpc_dstapic)
784 return apic;
785 }
786 }
787
788 return -1;
789}
790
761/* 791/*
762 * Find a specific PCI IRQ entry. 792 * Find a specific PCI IRQ entry.
763 * Not an __init, possibly needed by modules 793 * Not an __init, possibly needed by modules
@@ -1253,7 +1283,7 @@ static void __init setup_IO_APIC_irqs(void)
1253/* 1283/*
1254 * Set up the 8259A-master output pin: 1284 * Set up the 8259A-master output pin:
1255 */ 1285 */
1256static void __init setup_ExtINT_IRQ0_pin(unsigned int pin, int vector) 1286static void __init setup_ExtINT_IRQ0_pin(unsigned int apic, unsigned int pin, int vector)
1257{ 1287{
1258 struct IO_APIC_route_entry entry; 1288 struct IO_APIC_route_entry entry;
1259 unsigned long flags; 1289 unsigned long flags;
@@ -1287,8 +1317,8 @@ static void __init setup_ExtINT_IRQ0_pin(unsigned int pin, int vector)
1287 * Add it to the IO-APIC irq-routing table: 1317 * Add it to the IO-APIC irq-routing table:
1288 */ 1318 */
1289 spin_lock_irqsave(&ioapic_lock, flags); 1319 spin_lock_irqsave(&ioapic_lock, flags);
1290 io_apic_write(0, 0x11+2*pin, *(((int *)&entry)+1)); 1320 io_apic_write(apic, 0x11+2*pin, *(((int *)&entry)+1));
1291 io_apic_write(0, 0x10+2*pin, *(((int *)&entry)+0)); 1321 io_apic_write(apic, 0x10+2*pin, *(((int *)&entry)+0));
1292 spin_unlock_irqrestore(&ioapic_lock, flags); 1322 spin_unlock_irqrestore(&ioapic_lock, flags);
1293 1323
1294 enable_8259A_irq(0); 1324 enable_8259A_irq(0);
@@ -1595,7 +1625,8 @@ void /*__init*/ print_PIC(void)
1595static void __init enable_IO_APIC(void) 1625static void __init enable_IO_APIC(void)
1596{ 1626{
1597 union IO_APIC_reg_01 reg_01; 1627 union IO_APIC_reg_01 reg_01;
1598 int i; 1628 int i8259_apic, i8259_pin;
1629 int i, apic;
1599 unsigned long flags; 1630 unsigned long flags;
1600 1631
1601 for (i = 0; i < PIN_MAP_SIZE; i++) { 1632 for (i = 0; i < PIN_MAP_SIZE; i++) {
@@ -1609,11 +1640,52 @@ static void __init enable_IO_APIC(void)
1609 /* 1640 /*
1610 * The number of IO-APIC IRQ registers (== #pins): 1641 * The number of IO-APIC IRQ registers (== #pins):
1611 */ 1642 */
1612 for (i = 0; i < nr_ioapics; i++) { 1643 for (apic = 0; apic < nr_ioapics; apic++) {
1613 spin_lock_irqsave(&ioapic_lock, flags); 1644 spin_lock_irqsave(&ioapic_lock, flags);
1614 reg_01.raw = io_apic_read(i, 1); 1645 reg_01.raw = io_apic_read(apic, 1);
1615 spin_unlock_irqrestore(&ioapic_lock, flags); 1646 spin_unlock_irqrestore(&ioapic_lock, flags);
1616 nr_ioapic_registers[i] = reg_01.bits.entries+1; 1647 nr_ioapic_registers[apic] = reg_01.bits.entries+1;
1648 }
1649 for(apic = 0; apic < nr_ioapics; apic++) {
1650 int pin;
1651 /* See if any of the pins is in ExtINT mode */
1652 for(pin = 0; pin < nr_ioapic_registers[i]; pin++) {
1653 struct IO_APIC_route_entry entry;
1654 spin_lock_irqsave(&ioapic_lock, flags);
1655 *(((int *)&entry) + 0) = io_apic_read(apic, 0x10 + 2 * pin);
1656 *(((int *)&entry) + 1) = io_apic_read(apic, 0x11 + 2 * pin);
1657 spin_unlock_irqrestore(&ioapic_lock, flags);
1658
1659
1660 /* If the interrupt line is enabled and in ExtInt mode
1661 * I have found the pin where the i8259 is connected.
1662 */
1663 if ((entry.mask == 0) && (entry.delivery_mode == dest_ExtINT)) {
1664 ioapic_i8259.apic = apic;
1665 ioapic_i8259.pin = pin;
1666 goto found_i8259;
1667 }
1668 }
1669 }
1670 found_i8259:
1671 /* Look to see what if the MP table has reported the ExtINT */
1672 /* If we could not find the appropriate pin by looking at the ioapic
1673 * the i8259 probably is not connected the ioapic but give the
1674 * mptable a chance anyway.
1675 */
1676 i8259_pin = find_isa_irq_pin(0, mp_ExtINT);
1677 i8259_apic = find_isa_irq_apic(0, mp_ExtINT);
1678 /* Trust the MP table if nothing is setup in the hardware */
1679 if ((ioapic_i8259.pin == -1) && (i8259_pin >= 0)) {
1680 printk(KERN_WARNING "ExtINT not setup in hardware but reported by MP table\n");
1681 ioapic_i8259.pin = i8259_pin;
1682 ioapic_i8259.apic = i8259_apic;
1683 }
1684 /* Complain if the MP table and the hardware disagree */
1685 if (((ioapic_i8259.apic != i8259_apic) || (ioapic_i8259.pin != i8259_pin)) &&
1686 (i8259_pin >= 0) && (ioapic_i8259.pin >= 0))
1687 {
1688 printk(KERN_WARNING "ExtINT in hardware and MP table differ\n");
1617 } 1689 }
1618 1690
1619 /* 1691 /*
@@ -1627,7 +1699,6 @@ static void __init enable_IO_APIC(void)
1627 */ 1699 */
1628void disable_IO_APIC(void) 1700void disable_IO_APIC(void)
1629{ 1701{
1630 int pin;
1631 /* 1702 /*
1632 * Clear the IO-APIC before rebooting: 1703 * Clear the IO-APIC before rebooting:
1633 */ 1704 */
@@ -1638,8 +1709,7 @@ void disable_IO_APIC(void)
1638 * Put that IOAPIC in virtual wire mode 1709 * Put that IOAPIC in virtual wire mode
1639 * so legacy interrupts can be delivered. 1710 * so legacy interrupts can be delivered.
1640 */ 1711 */
1641 pin = find_isa_irq_pin(0, mp_ExtINT); 1712 if (ioapic_i8259.pin != -1) {
1642 if (pin != -1) {
1643 struct IO_APIC_route_entry entry; 1713 struct IO_APIC_route_entry entry;
1644 unsigned long flags; 1714 unsigned long flags;
1645 1715
@@ -1650,7 +1720,7 @@ void disable_IO_APIC(void)
1650 entry.polarity = 0; /* High */ 1720 entry.polarity = 0; /* High */
1651 entry.delivery_status = 0; 1721 entry.delivery_status = 0;
1652 entry.dest_mode = 0; /* Physical */ 1722 entry.dest_mode = 0; /* Physical */
1653 entry.delivery_mode = 7; /* ExtInt */ 1723 entry.delivery_mode = dest_ExtINT; /* ExtInt */
1654 entry.vector = 0; 1724 entry.vector = 0;
1655 entry.dest.physical.physical_dest = 0; 1725 entry.dest.physical.physical_dest = 0;
1656 1726
@@ -1659,11 +1729,13 @@ void disable_IO_APIC(void)
1659 * Add it to the IO-APIC irq-routing table: 1729 * Add it to the IO-APIC irq-routing table:
1660 */ 1730 */
1661 spin_lock_irqsave(&ioapic_lock, flags); 1731 spin_lock_irqsave(&ioapic_lock, flags);
1662 io_apic_write(0, 0x11+2*pin, *(((int *)&entry)+1)); 1732 io_apic_write(ioapic_i8259.apic, 0x11+2*ioapic_i8259.pin,
1663 io_apic_write(0, 0x10+2*pin, *(((int *)&entry)+0)); 1733 *(((int *)&entry)+1));
1734 io_apic_write(ioapic_i8259.apic, 0x10+2*ioapic_i8259.pin,
1735 *(((int *)&entry)+0));
1664 spin_unlock_irqrestore(&ioapic_lock, flags); 1736 spin_unlock_irqrestore(&ioapic_lock, flags);
1665 } 1737 }
1666 disconnect_bsp_APIC(pin != -1); 1738 disconnect_bsp_APIC(ioapic_i8259.pin != -1);
1667} 1739}
1668 1740
1669/* 1741/*
@@ -2113,20 +2185,21 @@ static void setup_nmi (void)
2113 */ 2185 */
2114static inline void unlock_ExtINT_logic(void) 2186static inline void unlock_ExtINT_logic(void)
2115{ 2187{
2116 int pin, i; 2188 int apic, pin, i;
2117 struct IO_APIC_route_entry entry0, entry1; 2189 struct IO_APIC_route_entry entry0, entry1;
2118 unsigned char save_control, save_freq_select; 2190 unsigned char save_control, save_freq_select;
2119 unsigned long flags; 2191 unsigned long flags;
2120 2192
2121 pin = find_isa_irq_pin(8, mp_INT); 2193 pin = find_isa_irq_pin(8, mp_INT);
2194 apic = find_isa_irq_apic(8, mp_INT);
2122 if (pin == -1) 2195 if (pin == -1)
2123 return; 2196 return;
2124 2197
2125 spin_lock_irqsave(&ioapic_lock, flags); 2198 spin_lock_irqsave(&ioapic_lock, flags);
2126 *(((int *)&entry0) + 1) = io_apic_read(0, 0x11 + 2 * pin); 2199 *(((int *)&entry0) + 1) = io_apic_read(apic, 0x11 + 2 * pin);
2127 *(((int *)&entry0) + 0) = io_apic_read(0, 0x10 + 2 * pin); 2200 *(((int *)&entry0) + 0) = io_apic_read(apic, 0x10 + 2 * pin);
2128 spin_unlock_irqrestore(&ioapic_lock, flags); 2201 spin_unlock_irqrestore(&ioapic_lock, flags);
2129 clear_IO_APIC_pin(0, pin); 2202 clear_IO_APIC_pin(apic, pin);
2130 2203
2131 memset(&entry1, 0, sizeof(entry1)); 2204 memset(&entry1, 0, sizeof(entry1));
2132 2205
@@ -2139,8 +2212,8 @@ static inline void unlock_ExtINT_logic(void)
2139 entry1.vector = 0; 2212 entry1.vector = 0;
2140 2213
2141 spin_lock_irqsave(&ioapic_lock, flags); 2214 spin_lock_irqsave(&ioapic_lock, flags);
2142 io_apic_write(0, 0x11 + 2 * pin, *(((int *)&entry1) + 1)); 2215 io_apic_write(apic, 0x11 + 2 * pin, *(((int *)&entry1) + 1));
2143 io_apic_write(0, 0x10 + 2 * pin, *(((int *)&entry1) + 0)); 2216 io_apic_write(apic, 0x10 + 2 * pin, *(((int *)&entry1) + 0));
2144 spin_unlock_irqrestore(&ioapic_lock, flags); 2217 spin_unlock_irqrestore(&ioapic_lock, flags);
2145 2218
2146 save_control = CMOS_READ(RTC_CONTROL); 2219 save_control = CMOS_READ(RTC_CONTROL);
@@ -2158,11 +2231,11 @@ static inline void unlock_ExtINT_logic(void)
2158 2231
2159 CMOS_WRITE(save_control, RTC_CONTROL); 2232 CMOS_WRITE(save_control, RTC_CONTROL);
2160 CMOS_WRITE(save_freq_select, RTC_FREQ_SELECT); 2233 CMOS_WRITE(save_freq_select, RTC_FREQ_SELECT);
2161 clear_IO_APIC_pin(0, pin); 2234 clear_IO_APIC_pin(apic, pin);
2162 2235
2163 spin_lock_irqsave(&ioapic_lock, flags); 2236 spin_lock_irqsave(&ioapic_lock, flags);
2164 io_apic_write(0, 0x11 + 2 * pin, *(((int *)&entry0) + 1)); 2237 io_apic_write(apic, 0x11 + 2 * pin, *(((int *)&entry0) + 1));
2165 io_apic_write(0, 0x10 + 2 * pin, *(((int *)&entry0) + 0)); 2238 io_apic_write(apic, 0x10 + 2 * pin, *(((int *)&entry0) + 0));
2166 spin_unlock_irqrestore(&ioapic_lock, flags); 2239 spin_unlock_irqrestore(&ioapic_lock, flags);
2167} 2240}
2168 2241
@@ -2174,7 +2247,7 @@ static inline void unlock_ExtINT_logic(void)
2174 */ 2247 */
2175static inline void check_timer(void) 2248static inline void check_timer(void)
2176{ 2249{
2177 int pin1, pin2; 2250 int apic1, pin1, apic2, pin2;
2178 int vector; 2251 int vector;
2179 2252
2180 /* 2253 /*
@@ -2196,10 +2269,13 @@ static inline void check_timer(void)
2196 timer_ack = 1; 2269 timer_ack = 1;
2197 enable_8259A_irq(0); 2270 enable_8259A_irq(0);
2198 2271
2199 pin1 = find_isa_irq_pin(0, mp_INT); 2272 pin1 = find_isa_irq_pin(0, mp_INT);
2200 pin2 = find_isa_irq_pin(0, mp_ExtINT); 2273 apic1 = find_isa_irq_apic(0, mp_INT);
2274 pin2 = ioapic_i8259.pin;
2275 apic2 = ioapic_i8259.apic;
2201 2276
2202 printk(KERN_INFO "..TIMER: vector=0x%02X pin1=%d pin2=%d\n", vector, pin1, pin2); 2277 printk(KERN_INFO "..TIMER: vector=0x%02X apic1=%d pin1=%d apic2=%d pin2=%d\n",
2278 vector, apic1, pin1, apic2, pin2);
2203 2279
2204 if (pin1 != -1) { 2280 if (pin1 != -1) {
2205 /* 2281 /*
@@ -2216,8 +2292,9 @@ static inline void check_timer(void)
2216 clear_IO_APIC_pin(0, pin1); 2292 clear_IO_APIC_pin(0, pin1);
2217 return; 2293 return;
2218 } 2294 }
2219 clear_IO_APIC_pin(0, pin1); 2295 clear_IO_APIC_pin(apic1, pin1);
2220 printk(KERN_ERR "..MP-BIOS bug: 8254 timer not connected to IO-APIC\n"); 2296 printk(KERN_ERR "..MP-BIOS bug: 8254 timer not connected to "
2297 "IO-APIC\n");
2221 } 2298 }
2222 2299
2223 printk(KERN_INFO "...trying to set up timer (IRQ0) through the 8259A ... "); 2300 printk(KERN_INFO "...trying to set up timer (IRQ0) through the 8259A ... ");
@@ -2226,13 +2303,13 @@ static inline void check_timer(void)
2226 /* 2303 /*
2227 * legacy devices should be connected to IO APIC #0 2304 * legacy devices should be connected to IO APIC #0
2228 */ 2305 */
2229 setup_ExtINT_IRQ0_pin(pin2, vector); 2306 setup_ExtINT_IRQ0_pin(apic2, pin2, vector);
2230 if (timer_irq_works()) { 2307 if (timer_irq_works()) {
2231 printk("works.\n"); 2308 printk("works.\n");
2232 if (pin1 != -1) 2309 if (pin1 != -1)
2233 replace_pin_at_irq(0, 0, pin1, 0, pin2); 2310 replace_pin_at_irq(0, apic1, pin1, apic2, pin2);
2234 else 2311 else
2235 add_pin_to_irq(0, 0, pin2); 2312 add_pin_to_irq(0, apic2, pin2);
2236 if (nmi_watchdog == NMI_IO_APIC) { 2313 if (nmi_watchdog == NMI_IO_APIC) {
2237 setup_nmi(); 2314 setup_nmi();
2238 } 2315 }
@@ -2241,7 +2318,7 @@ static inline void check_timer(void)
2241 /* 2318 /*
2242 * Cleanup, just in case ... 2319 * Cleanup, just in case ...
2243 */ 2320 */
2244 clear_IO_APIC_pin(0, pin2); 2321 clear_IO_APIC_pin(apic2, pin2);
2245 } 2322 }
2246 printk(" failed.\n"); 2323 printk(" failed.\n");
2247 2324
@@ -2310,11 +2387,15 @@ void __init setup_IO_APIC(void)
2310 sync_Arb_IDs(); 2387 sync_Arb_IDs();
2311 setup_IO_APIC_irqs(); 2388 setup_IO_APIC_irqs();
2312 init_IO_APIC_traps(); 2389 init_IO_APIC_traps();
2313 check_timer();
2314 if (!acpi_ioapic) 2390 if (!acpi_ioapic)
2315 print_IO_APIC(); 2391 print_IO_APIC();
2316} 2392}
2317 2393
2394void __init IO_APIC_late_time_init(void)
2395{
2396 check_timer();
2397}
2398
2318/* 2399/*
2319 * Called after all the initialization is done. If we didnt find any 2400 * Called after all the initialization is done. If we didnt find any
2320 * APIC bugs then we can allow the modify fast path 2401 * APIC bugs then we can allow the modify fast path
diff --git a/arch/i386/kernel/irq.c b/arch/i386/kernel/irq.c
index ce66dcc26d90..1a201a932865 100644
--- a/arch/i386/kernel/irq.c
+++ b/arch/i386/kernel/irq.c
@@ -218,7 +218,7 @@ int show_interrupts(struct seq_file *p, void *v)
218 218
219 if (i == 0) { 219 if (i == 0) {
220 seq_printf(p, " "); 220 seq_printf(p, " ");
221 for_each_cpu(j) 221 for_each_online_cpu(j)
222 seq_printf(p, "CPU%d ",j); 222 seq_printf(p, "CPU%d ",j);
223 seq_putc(p, '\n'); 223 seq_putc(p, '\n');
224 } 224 }
@@ -232,7 +232,7 @@ int show_interrupts(struct seq_file *p, void *v)
232#ifndef CONFIG_SMP 232#ifndef CONFIG_SMP
233 seq_printf(p, "%10u ", kstat_irqs(i)); 233 seq_printf(p, "%10u ", kstat_irqs(i));
234#else 234#else
235 for_each_cpu(j) 235 for_each_online_cpu(j)
236 seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]); 236 seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
237#endif 237#endif
238 seq_printf(p, " %14s", irq_desc[i].handler->typename); 238 seq_printf(p, " %14s", irq_desc[i].handler->typename);
@@ -246,12 +246,12 @@ skip:
246 spin_unlock_irqrestore(&irq_desc[i].lock, flags); 246 spin_unlock_irqrestore(&irq_desc[i].lock, flags);
247 } else if (i == NR_IRQS) { 247 } else if (i == NR_IRQS) {
248 seq_printf(p, "NMI: "); 248 seq_printf(p, "NMI: ");
249 for_each_cpu(j) 249 for_each_online_cpu(j)
250 seq_printf(p, "%10u ", nmi_count(j)); 250 seq_printf(p, "%10u ", nmi_count(j));
251 seq_putc(p, '\n'); 251 seq_putc(p, '\n');
252#ifdef CONFIG_X86_LOCAL_APIC 252#ifdef CONFIG_X86_LOCAL_APIC
253 seq_printf(p, "LOC: "); 253 seq_printf(p, "LOC: ");
254 for_each_cpu(j) 254 for_each_online_cpu(j)
255 seq_printf(p, "%10u ", 255 seq_printf(p, "%10u ",
256 per_cpu(irq_stat,j).apic_timer_irqs); 256 per_cpu(irq_stat,j).apic_timer_irqs);
257 seq_putc(p, '\n'); 257 seq_putc(p, '\n');
diff --git a/arch/i386/kernel/mpparse.c b/arch/i386/kernel/mpparse.c
index 27aabfceb67e..8f767d9aa45d 100644
--- a/arch/i386/kernel/mpparse.c
+++ b/arch/i386/kernel/mpparse.c
@@ -69,7 +69,7 @@ unsigned int def_to_bigsmp = 0;
69/* Processor that is doing the boot up */ 69/* Processor that is doing the boot up */
70unsigned int boot_cpu_physical_apicid = -1U; 70unsigned int boot_cpu_physical_apicid = -1U;
71/* Internal processor count */ 71/* Internal processor count */
72static unsigned int __initdata num_processors; 72static unsigned int __devinitdata num_processors;
73 73
74/* Bitmask of physically existing CPUs */ 74/* Bitmask of physically existing CPUs */
75physid_mask_t phys_cpu_present_map; 75physid_mask_t phys_cpu_present_map;
@@ -119,7 +119,7 @@ static int MP_valid_apicid(int apicid, int version)
119} 119}
120#endif 120#endif
121 121
122static void __init MP_processor_info (struct mpc_config_processor *m) 122static void __devinit MP_processor_info (struct mpc_config_processor *m)
123{ 123{
124 int ver, apicid; 124 int ver, apicid;
125 physid_mask_t phys_cpu; 125 physid_mask_t phys_cpu;
@@ -182,17 +182,6 @@ static void __init MP_processor_info (struct mpc_config_processor *m)
182 boot_cpu_physical_apicid = m->mpc_apicid; 182 boot_cpu_physical_apicid = m->mpc_apicid;
183 } 183 }
184 184
185 if (num_processors >= NR_CPUS) {
186 printk(KERN_WARNING "WARNING: NR_CPUS limit of %i reached."
187 " Processor ignored.\n", NR_CPUS);
188 return;
189 }
190
191 if (num_processors >= maxcpus) {
192 printk(KERN_WARNING "WARNING: maxcpus limit of %i reached."
193 " Processor ignored.\n", maxcpus);
194 return;
195 }
196 ver = m->mpc_apicver; 185 ver = m->mpc_apicver;
197 186
198 if (!MP_valid_apicid(apicid, ver)) { 187 if (!MP_valid_apicid(apicid, ver)) {
@@ -201,11 +190,6 @@ static void __init MP_processor_info (struct mpc_config_processor *m)
201 return; 190 return;
202 } 191 }
203 192
204 cpu_set(num_processors, cpu_possible_map);
205 num_processors++;
206 phys_cpu = apicid_to_cpu_present(apicid);
207 physids_or(phys_cpu_present_map, phys_cpu_present_map, phys_cpu);
208
209 /* 193 /*
210 * Validate version 194 * Validate version
211 */ 195 */
@@ -216,6 +200,25 @@ static void __init MP_processor_info (struct mpc_config_processor *m)
216 ver = 0x10; 200 ver = 0x10;
217 } 201 }
218 apic_version[m->mpc_apicid] = ver; 202 apic_version[m->mpc_apicid] = ver;
203
204 phys_cpu = apicid_to_cpu_present(apicid);
205 physids_or(phys_cpu_present_map, phys_cpu_present_map, phys_cpu);
206
207 if (num_processors >= NR_CPUS) {
208 printk(KERN_WARNING "WARNING: NR_CPUS limit of %i reached."
209 " Processor ignored.\n", NR_CPUS);
210 return;
211 }
212
213 if (num_processors >= maxcpus) {
214 printk(KERN_WARNING "WARNING: maxcpus limit of %i reached."
215 " Processor ignored.\n", maxcpus);
216 return;
217 }
218
219 cpu_set(num_processors, cpu_possible_map);
220 num_processors++;
221
219 if ((num_processors > 8) && 222 if ((num_processors > 8) &&
220 APIC_XAPIC(ver) && 223 APIC_XAPIC(ver) &&
221 (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)) 224 (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL))
@@ -834,7 +837,7 @@ void __init mp_register_lapic_address (
834} 837}
835 838
836 839
837void __init mp_register_lapic ( 840void __devinit mp_register_lapic (
838 u8 id, 841 u8 id,
839 u8 enabled) 842 u8 enabled)
840{ 843{
diff --git a/arch/i386/kernel/nmi.c b/arch/i386/kernel/nmi.c
index 72515b8a1b12..d661703ac1cb 100644
--- a/arch/i386/kernel/nmi.c
+++ b/arch/i386/kernel/nmi.c
@@ -100,16 +100,44 @@ int nmi_active;
100 (P4_CCCR_OVF_PMI0|P4_CCCR_THRESHOLD(15)|P4_CCCR_COMPLEMENT| \ 100 (P4_CCCR_OVF_PMI0|P4_CCCR_THRESHOLD(15)|P4_CCCR_COMPLEMENT| \
101 P4_CCCR_COMPARE|P4_CCCR_REQUIRED|P4_CCCR_ESCR_SELECT(4)|P4_CCCR_ENABLE) 101 P4_CCCR_COMPARE|P4_CCCR_REQUIRED|P4_CCCR_ESCR_SELECT(4)|P4_CCCR_ENABLE)
102 102
103#ifdef CONFIG_SMP
104/* The performance counters used by NMI_LOCAL_APIC don't trigger when
105 * the CPU is idle. To make sure the NMI watchdog really ticks on all
106 * CPUs during the test make them busy.
107 */
108static __init void nmi_cpu_busy(void *data)
109{
110 volatile int *endflag = data;
111 local_irq_enable();
112 /* Intentionally don't use cpu_relax here. This is
113 to make sure that the performance counter really ticks,
114 even if there is a simulator or similar that catches the
115 pause instruction. On a real HT machine this is fine because
116 all other CPUs are busy with "useless" delay loops and don't
117 care if they get somewhat less cycles. */
118 while (*endflag == 0)
119 barrier();
120}
121#endif
122
103static int __init check_nmi_watchdog(void) 123static int __init check_nmi_watchdog(void)
104{ 124{
105 unsigned int prev_nmi_count[NR_CPUS]; 125 volatile int endflag = 0;
126 unsigned int *prev_nmi_count;
106 int cpu; 127 int cpu;
107 128
108 if (nmi_watchdog == NMI_NONE) 129 if (nmi_watchdog == NMI_NONE)
109 return 0; 130 return 0;
110 131
132 prev_nmi_count = kmalloc(NR_CPUS * sizeof(int), GFP_KERNEL);
133 if (!prev_nmi_count)
134 return -1;
135
111 printk(KERN_INFO "Testing NMI watchdog ... "); 136 printk(KERN_INFO "Testing NMI watchdog ... ");
112 137
138 if (nmi_watchdog == NMI_LOCAL_APIC)
139 smp_call_function(nmi_cpu_busy, (void *)&endflag, 0, 0);
140
113 for (cpu = 0; cpu < NR_CPUS; cpu++) 141 for (cpu = 0; cpu < NR_CPUS; cpu++)
114 prev_nmi_count[cpu] = per_cpu(irq_stat, cpu).__nmi_count; 142 prev_nmi_count[cpu] = per_cpu(irq_stat, cpu).__nmi_count;
115 local_irq_enable(); 143 local_irq_enable();
@@ -123,12 +151,18 @@ static int __init check_nmi_watchdog(void)
123 continue; 151 continue;
124#endif 152#endif
125 if (nmi_count(cpu) - prev_nmi_count[cpu] <= 5) { 153 if (nmi_count(cpu) - prev_nmi_count[cpu] <= 5) {
126 printk("CPU#%d: NMI appears to be stuck!\n", cpu); 154 endflag = 1;
155 printk("CPU#%d: NMI appears to be stuck (%d->%d)!\n",
156 cpu,
157 prev_nmi_count[cpu],
158 nmi_count(cpu));
127 nmi_active = 0; 159 nmi_active = 0;
128 lapic_nmi_owner &= ~LAPIC_NMI_WATCHDOG; 160 lapic_nmi_owner &= ~LAPIC_NMI_WATCHDOG;
161 kfree(prev_nmi_count);
129 return -1; 162 return -1;
130 } 163 }
131 } 164 }
165 endflag = 1;
132 printk("OK.\n"); 166 printk("OK.\n");
133 167
134 /* now that we know it works we can reduce NMI frequency to 168 /* now that we know it works we can reduce NMI frequency to
@@ -136,6 +170,7 @@ static int __init check_nmi_watchdog(void)
136 if (nmi_watchdog == NMI_LOCAL_APIC) 170 if (nmi_watchdog == NMI_LOCAL_APIC)
137 nmi_hz = 1; 171 nmi_hz = 1;
138 172
173 kfree(prev_nmi_count);
139 return 0; 174 return 0;
140} 175}
141/* This needs to happen later in boot so counters are working */ 176/* This needs to happen later in boot so counters are working */
diff --git a/arch/i386/kernel/ptrace.c b/arch/i386/kernel/ptrace.c
index 7b6368bf8974..efd11f09c996 100644
--- a/arch/i386/kernel/ptrace.c
+++ b/arch/i386/kernel/ptrace.c
@@ -354,7 +354,7 @@ ptrace_set_thread_area(struct task_struct *child,
354 return 0; 354 return 0;
355} 355}
356 356
357asmlinkage int sys_ptrace(long request, long pid, long addr, long data) 357asmlinkage long sys_ptrace(long request, long pid, long addr, long data)
358{ 358{
359 struct task_struct *child; 359 struct task_struct *child;
360 struct user * dummy = NULL; 360 struct user * dummy = NULL;
diff --git a/arch/i386/kernel/reboot_fixups.c b/arch/i386/kernel/reboot_fixups.c
index 1b183b378c2c..c9b87330aeea 100644
--- a/arch/i386/kernel/reboot_fixups.c
+++ b/arch/i386/kernel/reboot_fixups.c
@@ -44,7 +44,7 @@ void mach_reboot_fixups(void)
44 44
45 for (i=0; i < (sizeof(fixups_table)/sizeof(fixups_table[0])); i++) { 45 for (i=0; i < (sizeof(fixups_table)/sizeof(fixups_table[0])); i++) {
46 cur = &(fixups_table[i]); 46 cur = &(fixups_table[i]);
47 dev = pci_get_device(cur->vendor, cur->device, 0); 47 dev = pci_get_device(cur->vendor, cur->device, NULL);
48 if (!dev) 48 if (!dev)
49 continue; 49 continue;
50 50
diff --git a/arch/i386/kernel/setup.c b/arch/i386/kernel/setup.c
index 9b8c8a19824d..b48ac635f3c1 100644
--- a/arch/i386/kernel/setup.c
+++ b/arch/i386/kernel/setup.c
@@ -389,14 +389,24 @@ static void __init limit_regions(unsigned long long size)
389 } 389 }
390 } 390 }
391 for (i = 0; i < e820.nr_map; i++) { 391 for (i = 0; i < e820.nr_map; i++) {
392 if (e820.map[i].type == E820_RAM) { 392 current_addr = e820.map[i].addr + e820.map[i].size;
393 current_addr = e820.map[i].addr + e820.map[i].size; 393 if (current_addr < size)
394 if (current_addr >= size) { 394 continue;
395 e820.map[i].size -= current_addr-size; 395
396 e820.nr_map = i + 1; 396 if (e820.map[i].type != E820_RAM)
397 return; 397 continue;
398 } 398
399 if (e820.map[i].addr >= size) {
400 /*
401 * This region starts past the end of the
402 * requested size, skip it completely.
403 */
404 e820.nr_map = i;
405 } else {
406 e820.nr_map = i + 1;
407 e820.map[i].size -= current_addr - size;
399 } 408 }
409 return;
400 } 410 }
401} 411}
402 412
diff --git a/arch/i386/kernel/smpboot.c b/arch/i386/kernel/smpboot.c
index 1fb26d0e30b6..5a2bbe0c4fff 100644
--- a/arch/i386/kernel/smpboot.c
+++ b/arch/i386/kernel/smpboot.c
@@ -87,7 +87,11 @@ EXPORT_SYMBOL(cpu_online_map);
87cpumask_t cpu_callin_map; 87cpumask_t cpu_callin_map;
88cpumask_t cpu_callout_map; 88cpumask_t cpu_callout_map;
89EXPORT_SYMBOL(cpu_callout_map); 89EXPORT_SYMBOL(cpu_callout_map);
90#ifdef CONFIG_HOTPLUG_CPU
91cpumask_t cpu_possible_map = CPU_MASK_ALL;
92#else
90cpumask_t cpu_possible_map; 93cpumask_t cpu_possible_map;
94#endif
91EXPORT_SYMBOL(cpu_possible_map); 95EXPORT_SYMBOL(cpu_possible_map);
92static cpumask_t smp_commenced_mask; 96static cpumask_t smp_commenced_mask;
93 97
@@ -1074,6 +1078,16 @@ void *xquad_portio;
1074EXPORT_SYMBOL(xquad_portio); 1078EXPORT_SYMBOL(xquad_portio);
1075#endif 1079#endif
1076 1080
1081/*
1082 * Fall back to non SMP mode after errors.
1083 *
1084 */
1085static __init void disable_smp(void)
1086{
1087 cpu_set(0, cpu_sibling_map[0]);
1088 cpu_set(0, cpu_core_map[0]);
1089}
1090
1077static void __init smp_boot_cpus(unsigned int max_cpus) 1091static void __init smp_boot_cpus(unsigned int max_cpus)
1078{ 1092{
1079 int apicid, cpu, bit, kicked; 1093 int apicid, cpu, bit, kicked;
@@ -1086,7 +1100,6 @@ static void __init smp_boot_cpus(unsigned int max_cpus)
1086 printk("CPU%d: ", 0); 1100 printk("CPU%d: ", 0);
1087 print_cpu_info(&cpu_data[0]); 1101 print_cpu_info(&cpu_data[0]);
1088 1102
1089 boot_cpu_physical_apicid = GET_APIC_ID(apic_read(APIC_ID));
1090 boot_cpu_logical_apicid = logical_smp_processor_id(); 1103 boot_cpu_logical_apicid = logical_smp_processor_id();
1091 x86_cpu_to_apicid[0] = boot_cpu_physical_apicid; 1104 x86_cpu_to_apicid[0] = boot_cpu_physical_apicid;
1092 1105
@@ -1098,68 +1111,27 @@ static void __init smp_boot_cpus(unsigned int max_cpus)
1098 cpus_clear(cpu_core_map[0]); 1111 cpus_clear(cpu_core_map[0]);
1099 cpu_set(0, cpu_core_map[0]); 1112 cpu_set(0, cpu_core_map[0]);
1100 1113
1114 map_cpu_to_logical_apicid();
1115
1101 /* 1116 /*
1102 * If we couldn't find an SMP configuration at boot time, 1117 * If we couldn't find an SMP configuration at boot time,
1103 * get out of here now! 1118 * get out of here now!
1104 */ 1119 */
1105 if (!smp_found_config && !acpi_lapic) { 1120 if (!smp_found_config && !acpi_lapic) {
1106 printk(KERN_NOTICE "SMP motherboard not detected.\n"); 1121 printk(KERN_NOTICE "SMP motherboard not detected.\n");
1107 smpboot_clear_io_apic_irqs(); 1122 disable_smp();
1108 phys_cpu_present_map = physid_mask_of_physid(0);
1109 if (APIC_init_uniprocessor())
1110 printk(KERN_NOTICE "Local APIC not detected."
1111 " Using dummy APIC emulation.\n");
1112 map_cpu_to_logical_apicid();
1113 cpu_set(0, cpu_sibling_map[0]);
1114 cpu_set(0, cpu_core_map[0]);
1115 return; 1123 return;
1116 } 1124 }
1117 1125
1118 /* 1126 /*
1119 * Should not be necessary because the MP table should list the boot
1120 * CPU too, but we do it for the sake of robustness anyway.
1121 * Makes no sense to do this check in clustered apic mode, so skip it
1122 */
1123 if (!check_phys_apicid_present(boot_cpu_physical_apicid)) {
1124 printk("weird, boot CPU (#%d) not listed by the BIOS.\n",
1125 boot_cpu_physical_apicid);
1126 physid_set(hard_smp_processor_id(), phys_cpu_present_map);
1127 }
1128
1129 /*
1130 * If we couldn't find a local APIC, then get out of here now!
1131 */
1132 if (APIC_INTEGRATED(apic_version[boot_cpu_physical_apicid]) && !cpu_has_apic) {
1133 printk(KERN_ERR "BIOS bug, local APIC #%d not detected!...\n",
1134 boot_cpu_physical_apicid);
1135 printk(KERN_ERR "... forcing use of dummy APIC emulation. (tell your hw vendor)\n");
1136 smpboot_clear_io_apic_irqs();
1137 phys_cpu_present_map = physid_mask_of_physid(0);
1138 cpu_set(0, cpu_sibling_map[0]);
1139 cpu_set(0, cpu_core_map[0]);
1140 return;
1141 }
1142
1143 verify_local_APIC();
1144
1145 /*
1146 * If SMP should be disabled, then really disable it! 1127 * If SMP should be disabled, then really disable it!
1147 */ 1128 */
1148 if (!max_cpus) { 1129 if (!max_cpus || (enable_local_apic < 0)) {
1149 smp_found_config = 0; 1130 printk(KERN_INFO "SMP mode deactivated.\n");
1150 printk(KERN_INFO "SMP mode deactivated, forcing use of dummy APIC emulation.\n"); 1131 disable_smp();
1151 smpboot_clear_io_apic_irqs();
1152 phys_cpu_present_map = physid_mask_of_physid(0);
1153 cpu_set(0, cpu_sibling_map[0]);
1154 cpu_set(0, cpu_core_map[0]);
1155 return; 1132 return;
1156 } 1133 }
1157 1134
1158 connect_bsp_APIC();
1159 setup_local_APIC();
1160 map_cpu_to_logical_apicid();
1161
1162
1163 setup_portio_remap(); 1135 setup_portio_remap();
1164 1136
1165 /* 1137 /*
@@ -1240,10 +1212,6 @@ static void __init smp_boot_cpus(unsigned int max_cpus)
1240 cpu_set(0, cpu_sibling_map[0]); 1212 cpu_set(0, cpu_sibling_map[0]);
1241 cpu_set(0, cpu_core_map[0]); 1213 cpu_set(0, cpu_core_map[0]);
1242 1214
1243 smpboot_setup_io_apic();
1244
1245 setup_boot_APIC_clock();
1246
1247 /* 1215 /*
1248 * Synchronize the TSC with the AP 1216 * Synchronize the TSC with the AP
1249 */ 1217 */
diff --git a/arch/i386/kernel/srat.c b/arch/i386/kernel/srat.c
index 516bf5653b02..8de658db8146 100644
--- a/arch/i386/kernel/srat.c
+++ b/arch/i386/kernel/srat.c
@@ -327,7 +327,12 @@ int __init get_memcfg_from_srat(void)
327 int tables = 0; 327 int tables = 0;
328 int i = 0; 328 int i = 0;
329 329
330 acpi_find_root_pointer(ACPI_PHYSICAL_ADDRESSING, rsdp_address); 330 if (ACPI_FAILURE(acpi_find_root_pointer(ACPI_PHYSICAL_ADDRESSING,
331 rsdp_address))) {
332 printk("%s: System description tables not found\n",
333 __FUNCTION__);
334 goto out_err;
335 }
331 336
332 if (rsdp_address->pointer_type == ACPI_PHYSICAL_POINTER) { 337 if (rsdp_address->pointer_type == ACPI_PHYSICAL_POINTER) {
333 printk("%s: assigning address to rsdp\n", __FUNCTION__); 338 printk("%s: assigning address to rsdp\n", __FUNCTION__);
diff --git a/arch/i386/kernel/time.c b/arch/i386/kernel/time.c
index 2883a4d4f01f..07471bba2dc6 100644
--- a/arch/i386/kernel/time.c
+++ b/arch/i386/kernel/time.c
@@ -74,10 +74,6 @@ int pit_latch_buggy; /* extern */
74 74
75#include "do_timer.h" 75#include "do_timer.h"
76 76
77u64 jiffies_64 = INITIAL_JIFFIES;
78
79EXPORT_SYMBOL(jiffies_64);
80
81unsigned int cpu_khz; /* Detected as we calibrate the TSC */ 77unsigned int cpu_khz; /* Detected as we calibrate the TSC */
82EXPORT_SYMBOL(cpu_khz); 78EXPORT_SYMBOL(cpu_khz);
83 79
@@ -444,8 +440,8 @@ static int time_init_device(void)
444 440
445device_initcall(time_init_device); 441device_initcall(time_init_device);
446 442
447#ifdef CONFIG_HPET_TIMER
448extern void (*late_time_init)(void); 443extern void (*late_time_init)(void);
444#ifdef CONFIG_HPET_TIMER
449/* Duplicate of time_init() below, with hpet_enable part added */ 445/* Duplicate of time_init() below, with hpet_enable part added */
450static void __init hpet_time_init(void) 446static void __init hpet_time_init(void)
451{ 447{
@@ -462,6 +458,11 @@ static void __init hpet_time_init(void)
462 printk(KERN_INFO "Using %s for high-res timesource\n",cur_timer->name); 458 printk(KERN_INFO "Using %s for high-res timesource\n",cur_timer->name);
463 459
464 time_init_hook(); 460 time_init_hook();
461
462#ifdef CONFIG_X86_LOCAL_APIC
463 if (enable_local_apic >= 0)
464 APIC_late_time_init();
465#endif
465} 466}
466#endif 467#endif
467 468
@@ -486,4 +487,9 @@ void __init time_init(void)
486 printk(KERN_INFO "Using %s for high-res timesource\n",cur_timer->name); 487 printk(KERN_INFO "Using %s for high-res timesource\n",cur_timer->name);
487 488
488 time_init_hook(); 489 time_init_hook();
490
491#ifdef CONFIG_X86_LOCAL_APIC
492 if (enable_local_apic >= 0)
493 late_time_init = APIC_late_time_init;
494#endif
489} 495}
diff --git a/arch/i386/kernel/time_hpet.c b/arch/i386/kernel/time_hpet.c
index 658c0629ba6a..9caeaa315cd7 100644
--- a/arch/i386/kernel/time_hpet.c
+++ b/arch/i386/kernel/time_hpet.c
@@ -275,6 +275,7 @@ static unsigned long PIE_freq = DEFAULT_RTC_INT_FREQ;
275static unsigned long PIE_count; 275static unsigned long PIE_count;
276 276
277static unsigned long hpet_rtc_int_freq; /* RTC interrupt frequency */ 277static unsigned long hpet_rtc_int_freq; /* RTC interrupt frequency */
278static unsigned int hpet_t1_cmp; /* cached comparator register */
278 279
279/* 280/*
280 * Timer 1 for RTC, we do not use periodic interrupt feature, 281 * Timer 1 for RTC, we do not use periodic interrupt feature,
@@ -306,10 +307,12 @@ int hpet_rtc_timer_init(void)
306 cnt = hpet_readl(HPET_COUNTER); 307 cnt = hpet_readl(HPET_COUNTER);
307 cnt += ((hpet_tick*HZ)/hpet_rtc_int_freq); 308 cnt += ((hpet_tick*HZ)/hpet_rtc_int_freq);
308 hpet_writel(cnt, HPET_T1_CMP); 309 hpet_writel(cnt, HPET_T1_CMP);
310 hpet_t1_cmp = cnt;
309 local_irq_restore(flags); 311 local_irq_restore(flags);
310 312
311 cfg = hpet_readl(HPET_T1_CFG); 313 cfg = hpet_readl(HPET_T1_CFG);
312 cfg |= HPET_TN_ENABLE | HPET_TN_SETVAL | HPET_TN_32BIT; 314 cfg &= ~HPET_TN_PERIODIC;
315 cfg |= HPET_TN_ENABLE | HPET_TN_32BIT;
313 hpet_writel(cfg, HPET_T1_CFG); 316 hpet_writel(cfg, HPET_T1_CFG);
314 317
315 return 1; 318 return 1;
@@ -319,8 +322,12 @@ static void hpet_rtc_timer_reinit(void)
319{ 322{
320 unsigned int cfg, cnt; 323 unsigned int cfg, cnt;
321 324
322 if (!(PIE_on | AIE_on | UIE_on)) 325 if (unlikely(!(PIE_on | AIE_on | UIE_on))) {
326 cfg = hpet_readl(HPET_T1_CFG);
327 cfg &= ~HPET_TN_ENABLE;
328 hpet_writel(cfg, HPET_T1_CFG);
323 return; 329 return;
330 }
324 331
325 if (PIE_on && (PIE_freq > DEFAULT_RTC_INT_FREQ)) 332 if (PIE_on && (PIE_freq > DEFAULT_RTC_INT_FREQ))
326 hpet_rtc_int_freq = PIE_freq; 333 hpet_rtc_int_freq = PIE_freq;
@@ -328,15 +335,10 @@ static void hpet_rtc_timer_reinit(void)
328 hpet_rtc_int_freq = DEFAULT_RTC_INT_FREQ; 335 hpet_rtc_int_freq = DEFAULT_RTC_INT_FREQ;
329 336
330 /* It is more accurate to use the comparator value than current count.*/ 337 /* It is more accurate to use the comparator value than current count.*/
331 cnt = hpet_readl(HPET_T1_CMP); 338 cnt = hpet_t1_cmp;
332 cnt += hpet_tick*HZ/hpet_rtc_int_freq; 339 cnt += hpet_tick*HZ/hpet_rtc_int_freq;
333 hpet_writel(cnt, HPET_T1_CMP); 340 hpet_writel(cnt, HPET_T1_CMP);
334 341 hpet_t1_cmp = cnt;
335 cfg = hpet_readl(HPET_T1_CFG);
336 cfg |= HPET_TN_ENABLE | HPET_TN_SETVAL | HPET_TN_32BIT;
337 hpet_writel(cfg, HPET_T1_CFG);
338
339 return;
340} 342}
341 343
342/* 344/*
diff --git a/arch/i386/kernel/timers/timer_hpet.c b/arch/i386/kernel/timers/timer_hpet.c
index d973a8b681fd..be242723c339 100644
--- a/arch/i386/kernel/timers/timer_hpet.c
+++ b/arch/i386/kernel/timers/timer_hpet.c
@@ -30,23 +30,28 @@ static seqlock_t monotonic_lock = SEQLOCK_UNLOCKED;
30 * basic equation: 30 * basic equation:
31 * ns = cycles / (freq / ns_per_sec) 31 * ns = cycles / (freq / ns_per_sec)
32 * ns = cycles * (ns_per_sec / freq) 32 * ns = cycles * (ns_per_sec / freq)
33 * ns = cycles * (10^9 / (cpu_mhz * 10^6)) 33 * ns = cycles * (10^9 / (cpu_khz * 10^3))
34 * ns = cycles * (10^3 / cpu_mhz) 34 * ns = cycles * (10^6 / cpu_khz)
35 * 35 *
36 * Then we use scaling math (suggested by george@mvista.com) to get: 36 * Then we use scaling math (suggested by george@mvista.com) to get:
37 * ns = cycles * (10^3 * SC / cpu_mhz) / SC 37 * ns = cycles * (10^6 * SC / cpu_khz) / SC
38 * ns = cycles * cyc2ns_scale / SC 38 * ns = cycles * cyc2ns_scale / SC
39 * 39 *
40 * And since SC is a constant power of two, we can convert the div 40 * And since SC is a constant power of two, we can convert the div
41 * into a shift. 41 * into a shift.
42 *
43 * We can use khz divisor instead of mhz to keep a better percision, since
44 * cyc2ns_scale is limited to 10^6 * 2^10, which fits in 32 bits.
45 * (mathieu.desnoyers@polymtl.ca)
46 *
42 * -johnstul@us.ibm.com "math is hard, lets go shopping!" 47 * -johnstul@us.ibm.com "math is hard, lets go shopping!"
43 */ 48 */
44static unsigned long cyc2ns_scale; 49static unsigned long cyc2ns_scale;
45#define CYC2NS_SCALE_FACTOR 10 /* 2^10, carefully chosen */ 50#define CYC2NS_SCALE_FACTOR 10 /* 2^10, carefully chosen */
46 51
47static inline void set_cyc2ns_scale(unsigned long cpu_mhz) 52static inline void set_cyc2ns_scale(unsigned long cpu_khz)
48{ 53{
49 cyc2ns_scale = (1000 << CYC2NS_SCALE_FACTOR)/cpu_mhz; 54 cyc2ns_scale = (1000000 << CYC2NS_SCALE_FACTOR)/cpu_khz;
50} 55}
51 56
52static inline unsigned long long cycles_2_ns(unsigned long long cyc) 57static inline unsigned long long cycles_2_ns(unsigned long long cyc)
@@ -163,7 +168,7 @@ static int __init init_hpet(char* override)
163 printk("Detected %u.%03u MHz processor.\n", 168 printk("Detected %u.%03u MHz processor.\n",
164 cpu_khz / 1000, cpu_khz % 1000); 169 cpu_khz / 1000, cpu_khz % 1000);
165 } 170 }
166 set_cyc2ns_scale(cpu_khz/1000); 171 set_cyc2ns_scale(cpu_khz);
167 } 172 }
168 /* set this only when cpu_has_tsc */ 173 /* set this only when cpu_has_tsc */
169 timer_hpet.read_timer = read_timer_tsc; 174 timer_hpet.read_timer = read_timer_tsc;
diff --git a/arch/i386/kernel/timers/timer_tsc.c b/arch/i386/kernel/timers/timer_tsc.c
index 6dd470cc9f72..d395e3b42485 100644
--- a/arch/i386/kernel/timers/timer_tsc.c
+++ b/arch/i386/kernel/timers/timer_tsc.c
@@ -49,23 +49,28 @@ static seqlock_t monotonic_lock = SEQLOCK_UNLOCKED;
49 * basic equation: 49 * basic equation:
50 * ns = cycles / (freq / ns_per_sec) 50 * ns = cycles / (freq / ns_per_sec)
51 * ns = cycles * (ns_per_sec / freq) 51 * ns = cycles * (ns_per_sec / freq)
52 * ns = cycles * (10^9 / (cpu_mhz * 10^6)) 52 * ns = cycles * (10^9 / (cpu_khz * 10^3))
53 * ns = cycles * (10^3 / cpu_mhz) 53 * ns = cycles * (10^6 / cpu_khz)
54 * 54 *
55 * Then we use scaling math (suggested by george@mvista.com) to get: 55 * Then we use scaling math (suggested by george@mvista.com) to get:
56 * ns = cycles * (10^3 * SC / cpu_mhz) / SC 56 * ns = cycles * (10^6 * SC / cpu_khz) / SC
57 * ns = cycles * cyc2ns_scale / SC 57 * ns = cycles * cyc2ns_scale / SC
58 * 58 *
59 * And since SC is a constant power of two, we can convert the div 59 * And since SC is a constant power of two, we can convert the div
60 * into a shift. 60 * into a shift.
61 *
62 * We can use khz divisor instead of mhz to keep a better percision, since
63 * cyc2ns_scale is limited to 10^6 * 2^10, which fits in 32 bits.
64 * (mathieu.desnoyers@polymtl.ca)
65 *
61 * -johnstul@us.ibm.com "math is hard, lets go shopping!" 66 * -johnstul@us.ibm.com "math is hard, lets go shopping!"
62 */ 67 */
63static unsigned long cyc2ns_scale; 68static unsigned long cyc2ns_scale;
64#define CYC2NS_SCALE_FACTOR 10 /* 2^10, carefully chosen */ 69#define CYC2NS_SCALE_FACTOR 10 /* 2^10, carefully chosen */
65 70
66static inline void set_cyc2ns_scale(unsigned long cpu_mhz) 71static inline void set_cyc2ns_scale(unsigned long cpu_khz)
67{ 72{
68 cyc2ns_scale = (1000 << CYC2NS_SCALE_FACTOR)/cpu_mhz; 73 cyc2ns_scale = (1000000 << CYC2NS_SCALE_FACTOR)/cpu_khz;
69} 74}
70 75
71static inline unsigned long long cycles_2_ns(unsigned long long cyc) 76static inline unsigned long long cycles_2_ns(unsigned long long cyc)
@@ -286,7 +291,7 @@ time_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
286 if (use_tsc) { 291 if (use_tsc) {
287 if (!(freq->flags & CPUFREQ_CONST_LOOPS)) { 292 if (!(freq->flags & CPUFREQ_CONST_LOOPS)) {
288 fast_gettimeoffset_quotient = cpufreq_scale(fast_gettimeoffset_ref, freq->new, ref_freq); 293 fast_gettimeoffset_quotient = cpufreq_scale(fast_gettimeoffset_ref, freq->new, ref_freq);
289 set_cyc2ns_scale(cpu_khz/1000); 294 set_cyc2ns_scale(cpu_khz);
290 } 295 }
291 } 296 }
292#endif 297#endif
@@ -536,7 +541,7 @@ static int __init init_tsc(char* override)
536 printk("Detected %u.%03u MHz processor.\n", 541 printk("Detected %u.%03u MHz processor.\n",
537 cpu_khz / 1000, cpu_khz % 1000); 542 cpu_khz / 1000, cpu_khz % 1000);
538 } 543 }
539 set_cyc2ns_scale(cpu_khz/1000); 544 set_cyc2ns_scale(cpu_khz);
540 return 0; 545 return 0;
541 } 546 }
542 } 547 }
diff --git a/arch/i386/kernel/traps.c b/arch/i386/kernel/traps.c
index 19e90bdd84ea..c34d1bfc5161 100644
--- a/arch/i386/kernel/traps.c
+++ b/arch/i386/kernel/traps.c
@@ -488,6 +488,7 @@ fastcall void __kprobes do_general_protection(struct pt_regs * regs,
488 tss->io_bitmap_max - thread->io_bitmap_max); 488 tss->io_bitmap_max - thread->io_bitmap_max);
489 tss->io_bitmap_max = thread->io_bitmap_max; 489 tss->io_bitmap_max = thread->io_bitmap_max;
490 tss->io_bitmap_base = IO_BITMAP_OFFSET; 490 tss->io_bitmap_base = IO_BITMAP_OFFSET;
491 tss->io_bitmap_owner = thread;
491 put_cpu(); 492 put_cpu();
492 return; 493 return;
493 } 494 }
diff --git a/arch/i386/mach-es7000/es7000.h b/arch/i386/mach-es7000/es7000.h
index 898ed905e119..f1e3204f5dec 100644
--- a/arch/i386/mach-es7000/es7000.h
+++ b/arch/i386/mach-es7000/es7000.h
@@ -24,6 +24,15 @@
24 * http://www.unisys.com 24 * http://www.unisys.com
25 */ 25 */
26 26
27/*
28 * ES7000 chipsets
29 */
30
31#define NON_UNISYS 0
32#define ES7000_CLASSIC 1
33#define ES7000_ZORRO 2
34
35
27#define MIP_REG 1 36#define MIP_REG 1
28#define MIP_PSAI_REG 4 37#define MIP_PSAI_REG 4
29 38
@@ -106,6 +115,6 @@ struct mip_reg {
106 115
107extern int parse_unisys_oem (char *oemptr); 116extern int parse_unisys_oem (char *oemptr);
108extern int find_unisys_acpi_oem_table(unsigned long *oem_addr); 117extern int find_unisys_acpi_oem_table(unsigned long *oem_addr);
109extern void setup_unisys (); 118extern void setup_unisys(void);
110extern int es7000_start_cpu(int cpu, unsigned long eip); 119extern int es7000_start_cpu(int cpu, unsigned long eip);
111extern void es7000_sw_apic(void); 120extern void es7000_sw_apic(void);
diff --git a/arch/i386/mach-es7000/es7000plat.c b/arch/i386/mach-es7000/es7000plat.c
index dc6660511b07..a9ab0644f403 100644
--- a/arch/i386/mach-es7000/es7000plat.c
+++ b/arch/i386/mach-es7000/es7000plat.c
@@ -62,6 +62,9 @@ static unsigned int base;
62static int 62static int
63es7000_rename_gsi(int ioapic, int gsi) 63es7000_rename_gsi(int ioapic, int gsi)
64{ 64{
65 if (es7000_plat == ES7000_ZORRO)
66 return gsi;
67
65 if (!base) { 68 if (!base) {
66 int i; 69 int i;
67 for (i = 0; i < nr_ioapics; i++) 70 for (i = 0; i < nr_ioapics; i++)
@@ -76,7 +79,7 @@ es7000_rename_gsi(int ioapic, int gsi)
76#endif /* (CONFIG_X86_IO_APIC) && (CONFIG_ACPI) */ 79#endif /* (CONFIG_X86_IO_APIC) && (CONFIG_ACPI) */
77 80
78void __init 81void __init
79setup_unisys () 82setup_unisys(void)
80{ 83{
81 /* 84 /*
82 * Determine the generation of the ES7000 currently running. 85 * Determine the generation of the ES7000 currently running.
@@ -86,9 +89,9 @@ setup_unisys ()
86 * 89 *
87 */ 90 */
88 if (!(boot_cpu_data.x86 <= 15 && boot_cpu_data.x86_model <= 2)) 91 if (!(boot_cpu_data.x86 <= 15 && boot_cpu_data.x86_model <= 2))
89 es7000_plat = 2; 92 es7000_plat = ES7000_ZORRO;
90 else 93 else
91 es7000_plat = 1; 94 es7000_plat = ES7000_CLASSIC;
92 ioapic_renumber_irq = es7000_rename_gsi; 95 ioapic_renumber_irq = es7000_rename_gsi;
93} 96}
94 97
@@ -151,7 +154,7 @@ parse_unisys_oem (char *oemptr)
151 } 154 }
152 155
153 if (success < 2) { 156 if (success < 2) {
154 es7000_plat = 0; 157 es7000_plat = NON_UNISYS;
155 } else 158 } else
156 setup_unisys(); 159 setup_unisys();
157 return es7000_plat; 160 return es7000_plat;
diff --git a/arch/i386/mm/fault.c b/arch/i386/mm/fault.c
index 9edd4485b91e..cf572d9a3b6e 100644
--- a/arch/i386/mm/fault.c
+++ b/arch/i386/mm/fault.c
@@ -108,7 +108,7 @@ static inline unsigned long get_segment_eip(struct pt_regs *regs,
108 desc = (void *)desc + (seg & ~7); 108 desc = (void *)desc + (seg & ~7);
109 } else { 109 } else {
110 /* Must disable preemption while reading the GDT. */ 110 /* Must disable preemption while reading the GDT. */
111 desc = (u32 *)&per_cpu(cpu_gdt_table, get_cpu()); 111 desc = (u32 *)get_cpu_gdt_table(get_cpu());
112 desc = (void *)desc + (seg & ~7); 112 desc = (void *)desc + (seg & ~7);
113 } 113 }
114 114
diff --git a/arch/i386/pci/irq.c b/arch/i386/pci/irq.c
index cddafe33ff7c..19e6f4871d1e 100644
--- a/arch/i386/pci/irq.c
+++ b/arch/i386/pci/irq.c
@@ -547,31 +547,48 @@ static __init int intel_router_probe(struct irq_router *r, struct pci_dev *route
547 return 0; 547 return 0;
548} 548}
549 549
550static __init int via_router_probe(struct irq_router *r, struct pci_dev *router, u16 device) 550static __init int via_router_probe(struct irq_router *r,
551 struct pci_dev *router, u16 device)
551{ 552{
552 /* FIXME: We should move some of the quirk fixup stuff here */ 553 /* FIXME: We should move some of the quirk fixup stuff here */
553 554
554 if (router->device == PCI_DEVICE_ID_VIA_82C686 && 555 /*
555 device == PCI_DEVICE_ID_VIA_82C586_0) { 556 * work arounds for some buggy BIOSes
556 /* Asus k7m bios wrongly reports 82C686A as 586-compatible */ 557 */
557 device = PCI_DEVICE_ID_VIA_82C686; 558 if (device == PCI_DEVICE_ID_VIA_82C586_0) {
559 switch(router->device) {
560 case PCI_DEVICE_ID_VIA_82C686:
561 /*
562 * Asus k7m bios wrongly reports 82C686A
563 * as 586-compatible
564 */
565 device = PCI_DEVICE_ID_VIA_82C686;
566 break;
567 case PCI_DEVICE_ID_VIA_8235:
568 /**
569 * Asus a7v-x bios wrongly reports 8235
570 * as 586-compatible
571 */
572 device = PCI_DEVICE_ID_VIA_8235;
573 break;
574 }
558 } 575 }
559 576
560 switch(device) 577 switch(device) {
561 { 578 case PCI_DEVICE_ID_VIA_82C586_0:
562 case PCI_DEVICE_ID_VIA_82C586_0: 579 r->name = "VIA";
563 r->name = "VIA"; 580 r->get = pirq_via586_get;
564 r->get = pirq_via586_get; 581 r->set = pirq_via586_set;
565 r->set = pirq_via586_set; 582 return 1;
566 return 1; 583 case PCI_DEVICE_ID_VIA_82C596:
567 case PCI_DEVICE_ID_VIA_82C596: 584 case PCI_DEVICE_ID_VIA_82C686:
568 case PCI_DEVICE_ID_VIA_82C686: 585 case PCI_DEVICE_ID_VIA_8231:
569 case PCI_DEVICE_ID_VIA_8231: 586 case PCI_DEVICE_ID_VIA_8235:
570 /* FIXME: add new ones for 8233/5 */ 587 /* FIXME: add new ones for 8233/5 */
571 r->name = "VIA"; 588 r->name = "VIA";
572 r->get = pirq_via_get; 589 r->get = pirq_via_get;
573 r->set = pirq_via_set; 590 r->set = pirq_via_set;
574 return 1; 591 return 1;
575 } 592 }
576 return 0; 593 return 0;
577} 594}
diff --git a/arch/i386/power/cpu.c b/arch/i386/power/cpu.c
index b27c5acc79d0..1f1572692e0b 100644
--- a/arch/i386/power/cpu.c
+++ b/arch/i386/power/cpu.c
@@ -51,16 +51,14 @@ void save_processor_state(void)
51 __save_processor_state(&saved_context); 51 __save_processor_state(&saved_context);
52} 52}
53 53
54static void 54static void do_fpu_end(void)
55do_fpu_end(void)
56{ 55{
57 /* restore FPU regs if necessary */ 56 /*
58 /* Do it out of line so that gcc does not move cr0 load to some stupid place */ 57 * Restore FPU regs if necessary.
59 kernel_fpu_end(); 58 */
60 mxcsr_feature_mask_init(); 59 kernel_fpu_end();
61} 60}
62 61
63
64static void fix_processor_context(void) 62static void fix_processor_context(void)
65{ 63{
66 int cpu = smp_processor_id(); 64 int cpu = smp_processor_id();
diff --git a/arch/ia64/ia32/sys_ia32.c b/arch/ia64/ia32/sys_ia32.c
index 3fa67ecebc83..dc282710421a 100644
--- a/arch/ia64/ia32/sys_ia32.c
+++ b/arch/ia64/ia32/sys_ia32.c
@@ -36,6 +36,7 @@
36#include <linux/uio.h> 36#include <linux/uio.h>
37#include <linux/nfs_fs.h> 37#include <linux/nfs_fs.h>
38#include <linux/quota.h> 38#include <linux/quota.h>
39#include <linux/syscalls.h>
39#include <linux/sunrpc/svc.h> 40#include <linux/sunrpc/svc.h>
40#include <linux/nfsd/nfsd.h> 41#include <linux/nfsd/nfsd.h>
41#include <linux/nfsd/cache.h> 42#include <linux/nfsd/cache.h>
diff --git a/arch/ia64/kernel/cyclone.c b/arch/ia64/kernel/cyclone.c
index 768c7e46957c..6ade3790ce07 100644
--- a/arch/ia64/kernel/cyclone.c
+++ b/arch/ia64/kernel/cyclone.c
@@ -2,6 +2,7 @@
2#include <linux/smp.h> 2#include <linux/smp.h>
3#include <linux/time.h> 3#include <linux/time.h>
4#include <linux/errno.h> 4#include <linux/errno.h>
5#include <linux/timex.h>
5#include <asm/io.h> 6#include <asm/io.h>
6 7
7/* IBM Summit (EXA) Cyclone counter code*/ 8/* IBM Summit (EXA) Cyclone counter code*/
diff --git a/arch/ia64/kernel/time.c b/arch/ia64/kernel/time.c
index 8b8a5a45b621..5b7e736f3b49 100644
--- a/arch/ia64/kernel/time.c
+++ b/arch/ia64/kernel/time.c
@@ -32,10 +32,6 @@
32 32
33extern unsigned long wall_jiffies; 33extern unsigned long wall_jiffies;
34 34
35u64 jiffies_64 __cacheline_aligned_in_smp = INITIAL_JIFFIES;
36
37EXPORT_SYMBOL(jiffies_64);
38
39#define TIME_KEEPER_ID 0 /* smp_processor_id() of time-keeper */ 35#define TIME_KEEPER_ID 0 /* smp_processor_id() of time-keeper */
40 36
41#ifdef CONFIG_IA64_DEBUG_IRQ 37#ifdef CONFIG_IA64_DEBUG_IRQ
diff --git a/arch/m32r/kernel/entry.S b/arch/m32r/kernel/entry.S
index 85920fb8d08c..396c94218cc2 100644
--- a/arch/m32r/kernel/entry.S
+++ b/arch/m32r/kernel/entry.S
@@ -653,8 +653,6 @@ ENTRY(rie_handler)
653 SAVE_ALL 653 SAVE_ALL
654 mvfc r0, bpc 654 mvfc r0, bpc
655 ld r1, @r0 655 ld r1, @r0
656 seth r0, #0xa0f0
657 st r1, @r0
658 ldi r1, #0x20 ; error_code 656 ldi r1, #0x20 ; error_code
659 mv r0, sp ; pt_regs 657 mv r0, sp ; pt_regs
660 bl do_rie_handler 658 bl do_rie_handler
diff --git a/arch/m32r/kernel/io_m32700ut.c b/arch/m32r/kernel/io_m32700ut.c
index e545b065f7e9..eda9f963c1eb 100644
--- a/arch/m32r/kernel/io_m32700ut.c
+++ b/arch/m32r/kernel/io_m32700ut.c
@@ -64,11 +64,11 @@ static inline void *__port2addr_ata(unsigned long port)
64 * from 0x10000000 to 0x13ffffff on physical address. 64 * from 0x10000000 to 0x13ffffff on physical address.
65 * The base address of LAN controller(LAN91C111) is 0x300. 65 * The base address of LAN controller(LAN91C111) is 0x300.
66 */ 66 */
67#define LAN_IOSTART 0x300 67#define LAN_IOSTART 0xa0000300
68#define LAN_IOEND 0x320 68#define LAN_IOEND 0xa0000320
69static inline void *_port2addr_ne(unsigned long port) 69static inline void *_port2addr_ne(unsigned long port)
70{ 70{
71 return (void *)(port + NONCACHE_OFFSET + 0x10000000); 71 return (void *)(port + 0x10000000);
72} 72}
73static inline void *_port2addr_usb(unsigned long port) 73static inline void *_port2addr_usb(unsigned long port)
74{ 74{
diff --git a/arch/m32r/kernel/io_mappi.c b/arch/m32r/kernel/io_mappi.c
index 78033165fb5c..3c3da042fbd1 100644
--- a/arch/m32r/kernel/io_mappi.c
+++ b/arch/m32r/kernel/io_mappi.c
@@ -31,7 +31,7 @@ extern void pcc_iowrite(int, unsigned long, void *, size_t, size_t, int);
31 31
32static inline void *_port2addr(unsigned long port) 32static inline void *_port2addr(unsigned long port)
33{ 33{
34 return (void *)(port + NONCACHE_OFFSET); 34 return (void *)(port | (NONCACHE_OFFSET));
35} 35}
36 36
37static inline void *_port2addr_ne(unsigned long port) 37static inline void *_port2addr_ne(unsigned long port)
diff --git a/arch/m32r/kernel/io_mappi2.c b/arch/m32r/kernel/io_mappi2.c
index 5c03504bf653..df3c729cb3e0 100644
--- a/arch/m32r/kernel/io_mappi2.c
+++ b/arch/m32r/kernel/io_mappi2.c
@@ -33,12 +33,9 @@ extern void pcc_iowrite_word(int, unsigned long, void *, size_t, size_t, int);
33 33
34static inline void *_port2addr(unsigned long port) 34static inline void *_port2addr(unsigned long port)
35{ 35{
36 return (void *)(port + NONCACHE_OFFSET); 36 return (void *)(port | (NONCACHE_OFFSET));
37} 37}
38 38
39#define LAN_IOSTART 0x300
40#define LAN_IOEND 0x320
41
42#if defined(CONFIG_IDE) && !defined(CONFIG_M32R_CFC) 39#if defined(CONFIG_IDE) && !defined(CONFIG_M32R_CFC)
43static inline void *__port2addr_ata(unsigned long port) 40static inline void *__port2addr_ata(unsigned long port)
44{ 41{
@@ -59,15 +56,17 @@ static inline void *__port2addr_ata(unsigned long port)
59} 56}
60#endif 57#endif
61 58
59#define LAN_IOSTART 0xa0000300
60#define LAN_IOEND 0xa0000320
62#ifdef CONFIG_CHIP_OPSP 61#ifdef CONFIG_CHIP_OPSP
63static inline void *_port2addr_ne(unsigned long port) 62static inline void *_port2addr_ne(unsigned long port)
64{ 63{
65 return (void *)(port + NONCACHE_OFFSET + 0x10000000); 64 return (void *)(port + 0x10000000);
66} 65}
67#else 66#else
68static inline void *_port2addr_ne(unsigned long port) 67static inline void *_port2addr_ne(unsigned long port)
69{ 68{
70 return (void *)(port + NONCACHE_OFFSET + 0x04000000); 69 return (void *)(port + 0x04000000);
71} 70}
72#endif 71#endif
73static inline void *_port2addr_usb(unsigned long port) 72static inline void *_port2addr_usb(unsigned long port)
diff --git a/arch/m32r/kernel/io_mappi3.c b/arch/m32r/kernel/io_mappi3.c
index c80bde657854..6716ffea769a 100644
--- a/arch/m32r/kernel/io_mappi3.c
+++ b/arch/m32r/kernel/io_mappi3.c
@@ -36,9 +36,6 @@ static inline void *_port2addr(unsigned long port)
36 return (void *)(port + NONCACHE_OFFSET); 36 return (void *)(port + NONCACHE_OFFSET);
37} 37}
38 38
39#define LAN_IOSTART 0x300
40#define LAN_IOEND 0x320
41
42#if defined(CONFIG_IDE) && !defined(CONFIG_M32R_CFC) 39#if defined(CONFIG_IDE) && !defined(CONFIG_M32R_CFC)
43static inline void *__port2addr_ata(unsigned long port) 40static inline void *__port2addr_ata(unsigned long port)
44{ 41{
@@ -59,9 +56,11 @@ static inline void *__port2addr_ata(unsigned long port)
59} 56}
60#endif 57#endif
61 58
59#define LAN_IOSTART 0xa0000300
60#define LAN_IOEND 0xa0000320
62static inline void *_port2addr_ne(unsigned long port) 61static inline void *_port2addr_ne(unsigned long port)
63{ 62{
64 return (void *)(port + NONCACHE_OFFSET + 0x10000000); 63 return (void *)(port + 0x10000000);
65} 64}
66 65
67static inline void *_port2addr_usb(unsigned long port) 66static inline void *_port2addr_usb(unsigned long port)
diff --git a/arch/m32r/kernel/io_oaks32r.c b/arch/m32r/kernel/io_oaks32r.c
index 9997dddd24d7..8be323931e4a 100644
--- a/arch/m32r/kernel/io_oaks32r.c
+++ b/arch/m32r/kernel/io_oaks32r.c
@@ -16,7 +16,7 @@
16 16
17static inline void *_port2addr(unsigned long port) 17static inline void *_port2addr(unsigned long port)
18{ 18{
19 return (void *)(port + NONCACHE_OFFSET); 19 return (void *)(port | (NONCACHE_OFFSET));
20} 20}
21 21
22static inline void *_port2addr_ne(unsigned long port) 22static inline void *_port2addr_ne(unsigned long port)
diff --git a/arch/m32r/kernel/io_opsput.c b/arch/m32r/kernel/io_opsput.c
index e34951e8156f..4793bd18e115 100644
--- a/arch/m32r/kernel/io_opsput.c
+++ b/arch/m32r/kernel/io_opsput.c
@@ -36,7 +36,7 @@ extern void pcc_iowrite_word(int, unsigned long, void *, size_t, size_t, int);
36 36
37static inline void *_port2addr(unsigned long port) 37static inline void *_port2addr(unsigned long port)
38{ 38{
39 return (void *)(port + NONCACHE_OFFSET); 39 return (void *)(port | (NONCACHE_OFFSET));
40} 40}
41 41
42/* 42/*
@@ -44,11 +44,11 @@ static inline void *_port2addr(unsigned long port)
44 * from 0x10000000 to 0x13ffffff on physical address. 44 * from 0x10000000 to 0x13ffffff on physical address.
45 * The base address of LAN controller(LAN91C111) is 0x300. 45 * The base address of LAN controller(LAN91C111) is 0x300.
46 */ 46 */
47#define LAN_IOSTART 0x300 47#define LAN_IOSTART 0xa0000300
48#define LAN_IOEND 0x320 48#define LAN_IOEND 0xa0000320
49static inline void *_port2addr_ne(unsigned long port) 49static inline void *_port2addr_ne(unsigned long port)
50{ 50{
51 return (void *)(port + NONCACHE_OFFSET + 0x10000000); 51 return (void *)(port + 0x10000000);
52} 52}
53static inline void *_port2addr_usb(unsigned long port) 53static inline void *_port2addr_usb(unsigned long port)
54{ 54{
diff --git a/arch/m32r/kernel/io_usrv.c b/arch/m32r/kernel/io_usrv.c
index 9eb161dcc104..39a379af40bc 100644
--- a/arch/m32r/kernel/io_usrv.c
+++ b/arch/m32r/kernel/io_usrv.c
@@ -47,7 +47,7 @@ static inline void *_port2addr(unsigned long port)
47 else if (port >= UART1_IOSTART && port <= UART1_IOEND) 47 else if (port >= UART1_IOSTART && port <= UART1_IOEND)
48 port = ((port - UART1_IOSTART) << 1) + UART1_REGSTART; 48 port = ((port - UART1_IOSTART) << 1) + UART1_REGSTART;
49#endif /* CONFIG_SERIAL_8250 || CONFIG_SERIAL_8250_MODULE */ 49#endif /* CONFIG_SERIAL_8250 || CONFIG_SERIAL_8250_MODULE */
50 return (void *)(port + NONCACHE_OFFSET); 50 return (void *)(port | (NONCACHE_OFFSET));
51} 51}
52 52
53static inline void delay(void) 53static inline void delay(void)
diff --git a/arch/m32r/kernel/ptrace.c b/arch/m32r/kernel/ptrace.c
index 124f7c1b775e..078d2a0e71c2 100644
--- a/arch/m32r/kernel/ptrace.c
+++ b/arch/m32r/kernel/ptrace.c
@@ -756,7 +756,7 @@ do_ptrace(long request, struct task_struct *child, long addr, long data)
756 return ret; 756 return ret;
757} 757}
758 758
759asmlinkage int sys_ptrace(long request, long pid, long addr, long data) 759asmlinkage long sys_ptrace(long request, long pid, long addr, long data)
760{ 760{
761 struct task_struct *child; 761 struct task_struct *child;
762 int ret; 762 int ret;
diff --git a/arch/m32r/kernel/setup.c b/arch/m32r/kernel/setup.c
index ec5674727e7f..f722ec8eb021 100644
--- a/arch/m32r/kernel/setup.c
+++ b/arch/m32r/kernel/setup.c
@@ -305,19 +305,19 @@ static int show_cpuinfo(struct seq_file *m, void *v)
305 305
306 seq_printf(m, "processor\t: %ld\n", cpu); 306 seq_printf(m, "processor\t: %ld\n", cpu);
307 307
308#ifdef CONFIG_CHIP_VDEC2 308#if defined(CONFIG_CHIP_VDEC2)
309 seq_printf(m, "cpu family\t: VDEC2\n" 309 seq_printf(m, "cpu family\t: VDEC2\n"
310 "cache size\t: Unknown\n"); 310 "cache size\t: Unknown\n");
311#elif CONFIG_CHIP_M32700 311#elif defined(CONFIG_CHIP_M32700)
312 seq_printf(m,"cpu family\t: M32700\n" 312 seq_printf(m,"cpu family\t: M32700\n"
313 "cache size\t: I-8KB/D-8KB\n"); 313 "cache size\t: I-8KB/D-8KB\n");
314#elif CONFIG_CHIP_M32102 314#elif defined(CONFIG_CHIP_M32102)
315 seq_printf(m,"cpu family\t: M32102\n" 315 seq_printf(m,"cpu family\t: M32102\n"
316 "cache size\t: I-8KB\n"); 316 "cache size\t: I-8KB\n");
317#elif CONFIG_CHIP_OPSP 317#elif defined(CONFIG_CHIP_OPSP)
318 seq_printf(m,"cpu family\t: OPSP\n" 318 seq_printf(m,"cpu family\t: OPSP\n"
319 "cache size\t: I-8KB/D-8KB\n"); 319 "cache size\t: I-8KB/D-8KB\n");
320#elif CONFIG_CHIP_MP 320#elif defined(CONFIG_CHIP_MP)
321 seq_printf(m, "cpu family\t: M32R-MP\n" 321 seq_printf(m, "cpu family\t: M32R-MP\n"
322 "cache size\t: I-xxKB/D-xxKB\n"); 322 "cache size\t: I-xxKB/D-xxKB\n");
323#else 323#else
@@ -326,19 +326,19 @@ static int show_cpuinfo(struct seq_file *m, void *v)
326 seq_printf(m, "bogomips\t: %lu.%02lu\n", 326 seq_printf(m, "bogomips\t: %lu.%02lu\n",
327 c->loops_per_jiffy/(500000/HZ), 327 c->loops_per_jiffy/(500000/HZ),
328 (c->loops_per_jiffy/(5000/HZ)) % 100); 328 (c->loops_per_jiffy/(5000/HZ)) % 100);
329#ifdef CONFIG_PLAT_MAPPI 329#if defined(CONFIG_PLAT_MAPPI)
330 seq_printf(m, "Machine\t\t: Mappi Evaluation board\n"); 330 seq_printf(m, "Machine\t\t: Mappi Evaluation board\n");
331#elif CONFIG_PLAT_MAPPI2 331#elif defined(CONFIG_PLAT_MAPPI2)
332 seq_printf(m, "Machine\t\t: Mappi-II Evaluation board\n"); 332 seq_printf(m, "Machine\t\t: Mappi-II Evaluation board\n");
333#elif CONFIG_PLAT_MAPPI3 333#elif defined(CONFIG_PLAT_MAPPI3)
334 seq_printf(m, "Machine\t\t: Mappi-III Evaluation board\n"); 334 seq_printf(m, "Machine\t\t: Mappi-III Evaluation board\n");
335#elif CONFIG_PLAT_M32700UT 335#elif defined(CONFIG_PLAT_M32700UT)
336 seq_printf(m, "Machine\t\t: M32700UT Evaluation board\n"); 336 seq_printf(m, "Machine\t\t: M32700UT Evaluation board\n");
337#elif CONFIG_PLAT_OPSPUT 337#elif defined(CONFIG_PLAT_OPSPUT)
338 seq_printf(m, "Machine\t\t: OPSPUT Evaluation board\n"); 338 seq_printf(m, "Machine\t\t: OPSPUT Evaluation board\n");
339#elif CONFIG_PLAT_USRV 339#elif defined(CONFIG_PLAT_USRV)
340 seq_printf(m, "Machine\t\t: uServer\n"); 340 seq_printf(m, "Machine\t\t: uServer\n");
341#elif CONFIG_PLAT_OAKS32R 341#elif defined(CONFIG_PLAT_OAKS32R)
342 seq_printf(m, "Machine\t\t: OAKS32R\n"); 342 seq_printf(m, "Machine\t\t: OAKS32R\n");
343#else 343#else
344 seq_printf(m, "Machine\t\t: Unknown\n"); 344 seq_printf(m, "Machine\t\t: Unknown\n");
diff --git a/arch/m32r/kernel/time.c b/arch/m32r/kernel/time.c
index 539c562cd54d..2ebce2063fea 100644
--- a/arch/m32r/kernel/time.c
+++ b/arch/m32r/kernel/time.c
@@ -39,10 +39,6 @@ extern void send_IPI_allbutself(int, int);
39extern void smp_local_timer_interrupt(struct pt_regs *); 39extern void smp_local_timer_interrupt(struct pt_regs *);
40#endif 40#endif
41 41
42u64 jiffies_64 = INITIAL_JIFFIES;
43
44EXPORT_SYMBOL(jiffies_64);
45
46extern unsigned long wall_jiffies; 42extern unsigned long wall_jiffies;
47#define TICK_SIZE (tick_nsec / 1000) 43#define TICK_SIZE (tick_nsec / 1000)
48 44
diff --git a/arch/m32r/lib/csum_partial_copy.c b/arch/m32r/lib/csum_partial_copy.c
index ddb16a83a8ce..3d5f06145854 100644
--- a/arch/m32r/lib/csum_partial_copy.c
+++ b/arch/m32r/lib/csum_partial_copy.c
@@ -18,10 +18,10 @@
18 18
19#include <linux/module.h> 19#include <linux/module.h>
20#include <linux/types.h> 20#include <linux/types.h>
21#include <linux/string.h>
21 22
22#include <net/checksum.h> 23#include <net/checksum.h>
23#include <asm/byteorder.h> 24#include <asm/byteorder.h>
24#include <asm/string.h>
25#include <asm/uaccess.h> 25#include <asm/uaccess.h>
26 26
27/* 27/*
diff --git a/arch/m68k/kernel/ptrace.c b/arch/m68k/kernel/ptrace.c
index 8ed1b01a6a87..f7f1d2e5b90b 100644
--- a/arch/m68k/kernel/ptrace.c
+++ b/arch/m68k/kernel/ptrace.c
@@ -121,7 +121,7 @@ void ptrace_disable(struct task_struct *child)
121 child->thread.work.syscall_trace = 0; 121 child->thread.work.syscall_trace = 0;
122} 122}
123 123
124asmlinkage int sys_ptrace(long request, long pid, long addr, long data) 124asmlinkage long sys_ptrace(long request, long pid, long addr, long data)
125{ 125{
126 struct task_struct *child; 126 struct task_struct *child;
127 unsigned long tmp; 127 unsigned long tmp;
diff --git a/arch/m68k/kernel/time.c b/arch/m68k/kernel/time.c
index 4ec95e3cb874..98e4b1adfa29 100644
--- a/arch/m68k/kernel/time.c
+++ b/arch/m68k/kernel/time.c
@@ -27,10 +27,6 @@
27#include <linux/timex.h> 27#include <linux/timex.h>
28#include <linux/profile.h> 28#include <linux/profile.h>
29 29
30u64 jiffies_64 = INITIAL_JIFFIES;
31
32EXPORT_SYMBOL(jiffies_64);
33
34static inline int set_rtc_mmss(unsigned long nowtime) 30static inline int set_rtc_mmss(unsigned long nowtime)
35{ 31{
36 if (mach_set_clock_mmss) 32 if (mach_set_clock_mmss)
diff --git a/arch/m68knommu/kernel/ptrace.c b/arch/m68knommu/kernel/ptrace.c
index 9724e1cd82e5..621d7b91ccfe 100644
--- a/arch/m68knommu/kernel/ptrace.c
+++ b/arch/m68knommu/kernel/ptrace.c
@@ -101,7 +101,7 @@ void ptrace_disable(struct task_struct *child)
101 put_reg(child, PT_SR, tmp); 101 put_reg(child, PT_SR, tmp);
102} 102}
103 103
104asmlinkage int sys_ptrace(long request, long pid, long addr, long data) 104asmlinkage long sys_ptrace(long request, long pid, long addr, long data)
105{ 105{
106 struct task_struct *child; 106 struct task_struct *child;
107 int ret; 107 int ret;
diff --git a/arch/m68knommu/kernel/time.c b/arch/m68knommu/kernel/time.c
index b17c1ecba966..b9d8abb45430 100644
--- a/arch/m68knommu/kernel/time.c
+++ b/arch/m68knommu/kernel/time.c
@@ -27,10 +27,6 @@
27 27
28#define TICK_SIZE (tick_nsec / 1000) 28#define TICK_SIZE (tick_nsec / 1000)
29 29
30u64 jiffies_64 = INITIAL_JIFFIES;
31
32EXPORT_SYMBOL(jiffies_64);
33
34extern unsigned long wall_jiffies; 30extern unsigned long wall_jiffies;
35 31
36 32
diff --git a/arch/mips/kernel/irixelf.c b/arch/mips/kernel/irixelf.c
index 7ce34d4aa220..10d3644e3608 100644
--- a/arch/mips/kernel/irixelf.c
+++ b/arch/mips/kernel/irixelf.c
@@ -1077,8 +1077,8 @@ static int irix_core_dump(long signr, struct pt_regs * regs, struct file *file)
1077 struct elfhdr elf; 1077 struct elfhdr elf;
1078 off_t offset = 0, dataoff; 1078 off_t offset = 0, dataoff;
1079 int limit = current->signal->rlim[RLIMIT_CORE].rlim_cur; 1079 int limit = current->signal->rlim[RLIMIT_CORE].rlim_cur;
1080 int numnote = 4; 1080 int numnote = 3;
1081 struct memelfnote notes[4]; 1081 struct memelfnote notes[3];
1082 struct elf_prstatus prstatus; /* NT_PRSTATUS */ 1082 struct elf_prstatus prstatus; /* NT_PRSTATUS */
1083 elf_fpregset_t fpu; /* NT_PRFPREG */ 1083 elf_fpregset_t fpu; /* NT_PRFPREG */
1084 struct elf_prpsinfo psinfo; /* NT_PRPSINFO */ 1084 struct elf_prpsinfo psinfo; /* NT_PRPSINFO */
@@ -1211,20 +1211,15 @@ static int irix_core_dump(long signr, struct pt_regs * regs, struct file *file)
1211 } 1211 }
1212 strlcpy(psinfo.pr_fname, current->comm, sizeof(psinfo.pr_fname)); 1212 strlcpy(psinfo.pr_fname, current->comm, sizeof(psinfo.pr_fname));
1213 1213
1214 notes[2].name = "CORE";
1215 notes[2].type = NT_TASKSTRUCT;
1216 notes[2].datasz = sizeof(*current);
1217 notes[2].data = current;
1218
1219 /* Try to dump the FPU. */ 1214 /* Try to dump the FPU. */
1220 prstatus.pr_fpvalid = dump_fpu (regs, &fpu); 1215 prstatus.pr_fpvalid = dump_fpu (regs, &fpu);
1221 if (!prstatus.pr_fpvalid) { 1216 if (!prstatus.pr_fpvalid) {
1222 numnote--; 1217 numnote--;
1223 } else { 1218 } else {
1224 notes[3].name = "CORE"; 1219 notes[2].name = "CORE";
1225 notes[3].type = NT_PRFPREG; 1220 notes[2].type = NT_PRFPREG;
1226 notes[3].datasz = sizeof(fpu); 1221 notes[2].datasz = sizeof(fpu);
1227 notes[3].data = &fpu; 1222 notes[2].data = &fpu;
1228 } 1223 }
1229 1224
1230 /* Write notes phdr entry. */ 1225 /* Write notes phdr entry. */
diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c
index fcceab8f2e00..f1b0f3e1f95b 100644
--- a/arch/mips/kernel/ptrace.c
+++ b/arch/mips/kernel/ptrace.c
@@ -174,7 +174,7 @@ int ptrace_setfpregs (struct task_struct *child, __u32 __user *data)
174 return 0; 174 return 0;
175} 175}
176 176
177asmlinkage int sys_ptrace(long request, long pid, long addr, long data) 177asmlinkage long sys_ptrace(long request, long pid, long addr, long data)
178{ 178{
179 struct task_struct *child; 179 struct task_struct *child;
180 int ret; 180 int ret;
diff --git a/arch/mips/kernel/time.c b/arch/mips/kernel/time.c
index a24651dfaaba..787ed541d442 100644
--- a/arch/mips/kernel/time.c
+++ b/arch/mips/kernel/time.c
@@ -45,10 +45,6 @@
45 45
46#define TICK_SIZE (tick_nsec / 1000) 46#define TICK_SIZE (tick_nsec / 1000)
47 47
48u64 jiffies_64 = INITIAL_JIFFIES;
49
50EXPORT_SYMBOL(jiffies_64);
51
52/* 48/*
53 * forward reference 49 * forward reference
54 */ 50 */
diff --git a/arch/mips/sgi-ip27/ip27-berr.c b/arch/mips/sgi-ip27/ip27-berr.c
index e1829a5d3b19..07631a97670b 100644
--- a/arch/mips/sgi-ip27/ip27-berr.c
+++ b/arch/mips/sgi-ip27/ip27-berr.c
@@ -10,6 +10,7 @@
10#include <linux/init.h> 10#include <linux/init.h>
11#include <linux/kernel.h> 11#include <linux/kernel.h>
12#include <linux/module.h> 12#include <linux/module.h>
13#include <linux/signal.h> /* for SIGBUS */
13 14
14#include <asm/module.h> 15#include <asm/module.h>
15#include <asm/sn/addrs.h> 16#include <asm/sn/addrs.h>
diff --git a/arch/parisc/kernel/ioctl32.c b/arch/parisc/kernel/ioctl32.c
index 8cad8f004f00..0a331104ad56 100644
--- a/arch/parisc/kernel/ioctl32.c
+++ b/arch/parisc/kernel/ioctl32.c
@@ -561,11 +561,6 @@ IOCTL_TABLE_START
561#define DECLARES 561#define DECLARES
562#include "compat_ioctl.c" 562#include "compat_ioctl.c"
563 563
564/* Might be moved to compat_ioctl.h with some ifdefs... */
565COMPATIBLE_IOCTL(TIOCSTART)
566COMPATIBLE_IOCTL(TIOCSTOP)
567COMPATIBLE_IOCTL(TIOCSLTC)
568
569/* PA-specific ioctls */ 564/* PA-specific ioctls */
570COMPATIBLE_IOCTL(PA_PERF_ON) 565COMPATIBLE_IOCTL(PA_PERF_ON)
571COMPATIBLE_IOCTL(PA_PERF_OFF) 566COMPATIBLE_IOCTL(PA_PERF_OFF)
diff --git a/arch/parisc/kernel/ptrace.c b/arch/parisc/kernel/ptrace.c
index f3428e5e86fb..18130c3748f3 100644
--- a/arch/parisc/kernel/ptrace.c
+++ b/arch/parisc/kernel/ptrace.c
@@ -78,7 +78,7 @@ void ptrace_disable(struct task_struct *child)
78 pa_psw(child)->l = 0; 78 pa_psw(child)->l = 0;
79} 79}
80 80
81long sys_ptrace(long request, pid_t pid, long addr, long data) 81long sys_ptrace(long request, long pid, long addr, long data)
82{ 82{
83 struct task_struct *child; 83 struct task_struct *child;
84 long ret; 84 long ret;
diff --git a/arch/parisc/kernel/time.c b/arch/parisc/kernel/time.c
index bc979e1abdec..cded25680787 100644
--- a/arch/parisc/kernel/time.c
+++ b/arch/parisc/kernel/time.c
@@ -33,10 +33,6 @@
33 33
34#include <linux/timex.h> 34#include <linux/timex.h>
35 35
36u64 jiffies_64 = INITIAL_JIFFIES;
37
38EXPORT_SYMBOL(jiffies_64);
39
40/* xtime and wall_jiffies keep wall-clock time */ 36/* xtime and wall_jiffies keep wall-clock time */
41extern unsigned long wall_jiffies; 37extern unsigned long wall_jiffies;
42 38
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
new file mode 100644
index 000000000000..967ecf92d6a7
--- /dev/null
+++ b/arch/powerpc/Kconfig
@@ -0,0 +1,900 @@
1# For a description of the syntax of this configuration file,
2# see Documentation/kbuild/kconfig-language.txt.
3#
4
5mainmenu "Linux/PowerPC Kernel Configuration"
6
7config PPC64
8 bool "64-bit kernel"
9 default n
10 help
11 This option selects whether a 32-bit or a 64-bit kernel
12 will be built.
13
14config PPC32
15 bool
16 default y if !PPC64
17
18config 64BIT
19 bool
20 default y if PPC64
21
22config PPC_MERGE
23 def_bool y
24
25config MMU
26 bool
27 default y
28
29config UID16
30 bool
31
32config GENERIC_HARDIRQS
33 bool
34 default y
35
36config RWSEM_GENERIC_SPINLOCK
37 bool
38
39config RWSEM_XCHGADD_ALGORITHM
40 bool
41 default y
42
43config GENERIC_CALIBRATE_DELAY
44 bool
45 default y
46
47config PPC
48 bool
49 default y
50
51config EARLY_PRINTK
52 bool
53 default y if PPC64
54
55config COMPAT
56 bool
57 default y if PPC64
58
59config SYSVIPC_COMPAT
60 bool
61 depends on COMPAT && SYSVIPC
62 default y
63
64# All PPC32s use generic nvram driver through ppc_md
65config GENERIC_NVRAM
66 bool
67 default y if PPC32
68
69config SCHED_NO_NO_OMIT_FRAME_POINTER
70 bool
71 default y
72
73config ARCH_MAY_HAVE_PC_FDC
74 bool
75 default y
76
77menu "Processor support"
78choice
79 prompt "Processor Type"
80 depends on PPC32
81 default 6xx
82
83config 6xx
84 bool "6xx/7xx/74xx"
85 select PPC_FPU
86 help
87 There are four families of PowerPC chips supported. The more common
88 types (601, 603, 604, 740, 750, 7400), the Motorola embedded
89 versions (821, 823, 850, 855, 860, 52xx, 82xx, 83xx), the AMCC
90 embedded versions (403 and 405) and the high end 64 bit Power
91 processors (POWER 3, POWER4, and IBM PPC970 also known as G5).
92
93 Unless you are building a kernel for one of the embedded processor
94 systems, 64 bit IBM RS/6000 or an Apple G5, choose 6xx.
95 Note that the kernel runs in 32-bit mode even on 64-bit chips.
96
97config PPC_52xx
98 bool "Freescale 52xx"
99
100config PPC_82xx
101 bool "Freescale 82xx"
102
103config PPC_83xx
104 bool "Freescale 83xx"
105
106config 40x
107 bool "AMCC 40x"
108
109config 44x
110 bool "AMCC 44x"
111
112config 8xx
113 bool "Freescale 8xx"
114
115config E200
116 bool "Freescale e200"
117
118config E500
119 bool "Freescale e500"
120endchoice
121
122config POWER4_ONLY
123 bool "Optimize for POWER4"
124 depends on PPC64
125 default n
126 ---help---
127 Cause the compiler to optimize for POWER4/POWER5/PPC970 processors.
128 The resulting binary will not work on POWER3 or RS64 processors
129 when compiled with binutils 2.15 or later.
130
131config POWER3
132 bool
133 depends on PPC64
134 default y if !POWER4_ONLY
135
136config POWER4
137 depends on PPC64
138 def_bool y
139
140config PPC_FPU
141 bool
142 default y if PPC64
143
144config BOOKE
145 bool
146 depends on E200 || E500
147 default y
148
149config FSL_BOOKE
150 bool
151 depends on E200 || E500
152 default y
153
154config PTE_64BIT
155 bool
156 depends on 44x || E500
157 default y if 44x
158 default y if E500 && PHYS_64BIT
159
160config PHYS_64BIT
161 bool 'Large physical address support' if E500
162 depends on 44x || E500
163 default y if 44x
164 ---help---
165 This option enables kernel support for larger than 32-bit physical
166 addresses. This features is not be available on all e500 cores.
167
168 If in doubt, say N here.
169
170config ALTIVEC
171 bool "AltiVec Support"
172 depends on 6xx || POWER4
173 ---help---
174 This option enables kernel support for the Altivec extensions to the
175 PowerPC processor. The kernel currently supports saving and restoring
176 altivec registers, and turning on the 'altivec enable' bit so user
177 processes can execute altivec instructions.
178
179 This option is only usefully if you have a processor that supports
180 altivec (G4, otherwise known as 74xx series), but does not have
181 any affect on a non-altivec cpu (it does, however add code to the
182 kernel).
183
184 If in doubt, say Y here.
185
186config SPE
187 bool "SPE Support"
188 depends on E200 || E500
189 ---help---
190 This option enables kernel support for the Signal Processing
191 Extensions (SPE) to the PowerPC processor. The kernel currently
192 supports saving and restoring SPE registers, and turning on the
193 'spe enable' bit so user processes can execute SPE instructions.
194
195 This option is only useful if you have a processor that supports
196 SPE (e500, otherwise known as 85xx series), but does not have any
197 effect on a non-spe cpu (it does, however add code to the kernel).
198
199 If in doubt, say Y here.
200
201config PPC_STD_MMU
202 bool
203 depends on 6xx || POWER3 || POWER4 || PPC64
204 default y
205
206config PPC_STD_MMU_32
207 def_bool y
208 depends on PPC_STD_MMU && PPC32
209
210config SMP
211 depends on PPC_STD_MMU
212 bool "Symmetric multi-processing support"
213 ---help---
214 This enables support for systems with more than one CPU. If you have
215 a system with only one CPU, say N. If you have a system with more
216 than one CPU, say Y. Note that the kernel does not currently
217 support SMP machines with 603/603e/603ev or PPC750 ("G3") processors
218 since they have inadequate hardware support for multiprocessor
219 operation.
220
221 If you say N here, the kernel will run on single and multiprocessor
222 machines, but will use only one CPU of a multiprocessor machine. If
223 you say Y here, the kernel will run on single-processor machines.
224 On a single-processor machine, the kernel will run faster if you say
225 N here.
226
227 If you don't know what to do here, say N.
228
229config NR_CPUS
230 int "Maximum number of CPUs (2-32)"
231 range 2 128
232 depends on SMP
233 default "32" if PPC64
234 default "4"
235
236config NOT_COHERENT_CACHE
237 bool
238 depends on 4xx || 8xx || E200
239 default y
240endmenu
241
242source "init/Kconfig"
243
244menu "Platform support"
245 depends on PPC64 || 6xx
246
247choice
248 prompt "Machine type"
249 default PPC_MULTIPLATFORM
250
251config PPC_MULTIPLATFORM
252 bool "Generic desktop/server/laptop"
253 help
254 Select this option if configuring for an IBM pSeries or
255 RS/6000 machine, an Apple machine, or a PReP, CHRP,
256 Maple or Cell-based machine.
257
258config PPC_ISERIES
259 bool "IBM Legacy iSeries"
260 depends on PPC64
261
262config EMBEDDED6xx
263 bool "Embedded 6xx/7xx/7xxx-based board"
264 depends on PPC32
265
266config APUS
267 bool "Amiga-APUS"
268 depends on PPC32 && BROKEN
269 help
270 Select APUS if configuring for a PowerUP Amiga.
271 More information is available at:
272 <http://linux-apus.sourceforge.net/>.
273endchoice
274
275config PPC_PSERIES
276 depends on PPC_MULTIPLATFORM && PPC64
277 bool " IBM pSeries & new (POWER5-based) iSeries"
278 select PPC_I8259
279 select PPC_RTAS
280 select RTAS_ERROR_LOGGING
281 default y
282
283config PPC_CHRP
284 bool " Common Hardware Reference Platform (CHRP) based machines"
285 depends on PPC_MULTIPLATFORM && PPC32
286 select PPC_I8259
287 select PPC_INDIRECT_PCI
288 select PPC_RTAS
289 select PPC_MPC106
290 default y
291
292config PPC_PMAC
293 bool " Apple PowerMac based machines"
294 depends on PPC_MULTIPLATFORM
295 select PPC_INDIRECT_PCI if PPC32
296 select PPC_MPC106 if PPC32
297 default y
298
299config PPC_PMAC64
300 bool
301 depends on PPC_PMAC && POWER4
302 select U3_DART
303 default y
304
305config PPC_PREP
306 bool " PowerPC Reference Platform (PReP) based machines"
307 depends on PPC_MULTIPLATFORM && PPC32
308 select PPC_I8259
309 select PPC_INDIRECT_PCI
310 default y
311
312config PPC_MAPLE
313 depends on PPC_MULTIPLATFORM && PPC64
314 bool " Maple 970FX Evaluation Board"
315 select U3_DART
316 select MPIC_BROKEN_U3
317 default n
318 help
319 This option enables support for the Maple 970FX Evaluation Board.
320 For more informations, refer to <http://www.970eval.com>
321
322config PPC_BPA
323 bool " Broadband Processor Architecture"
324 depends on PPC_MULTIPLATFORM && PPC64
325 select PPC_RTAS
326
327config PPC_OF
328 bool
329 depends on PPC_MULTIPLATFORM # for now
330 default y
331
332config XICS
333 depends on PPC_PSERIES
334 bool
335 default y
336
337config U3_DART
338 bool
339 depends on PPC_MULTIPLATFORM && PPC64
340 default n
341
342config MPIC
343 depends on PPC_PSERIES || PPC_PMAC || PPC_MAPLE || PPC_CHRP
344 bool
345 default y
346
347config PPC_RTAS
348 bool
349 default n
350
351config RTAS_ERROR_LOGGING
352 bool
353 depends on PPC_RTAS
354 default n
355
356config MPIC_BROKEN_U3
357 bool
358 depends on PPC_MAPLE
359 default y
360
361config BPA_IIC
362 depends on PPC_BPA
363 bool
364 default y
365
366config IBMVIO
367 depends on PPC_PSERIES || PPC_ISERIES
368 bool
369 default y
370
371config PPC_MPC106
372 bool
373 default n
374
375source "drivers/cpufreq/Kconfig"
376
377config CPU_FREQ_PMAC
378 bool "Support for Apple PowerBooks"
379 depends on CPU_FREQ && ADB_PMU && PPC32
380 select CPU_FREQ_TABLE
381 help
382 This adds support for frequency switching on Apple PowerBooks,
383 this currently includes some models of iBook & Titanium
384 PowerBook.
385
386config PPC601_SYNC_FIX
387 bool "Workarounds for PPC601 bugs"
388 depends on 6xx && (PPC_PREP || PPC_PMAC)
389 help
390 Some versions of the PPC601 (the first PowerPC chip) have bugs which
391 mean that extra synchronization instructions are required near
392 certain instructions, typically those that make major changes to the
393 CPU state. These extra instructions reduce performance slightly.
394 If you say N here, these extra instructions will not be included,
395 resulting in a kernel which will run faster but may not run at all
396 on some systems with the PPC601 chip.
397
398 If in doubt, say Y here.
399
400config TAU
401 bool "Thermal Management Support"
402 depends on 6xx
403 help
404 G3 and G4 processors have an on-chip temperature sensor called the
405 'Thermal Assist Unit (TAU)', which, in theory, can measure the on-die
406 temperature within 2-4 degrees Celsius. This option shows the current
407 on-die temperature in /proc/cpuinfo if the cpu supports it.
408
409 Unfortunately, on some chip revisions, this sensor is very inaccurate
410 and in some cases, does not work at all, so don't assume the cpu
411 temp is actually what /proc/cpuinfo says it is.
412
413config TAU_INT
414 bool "Interrupt driven TAU driver (DANGEROUS)"
415 depends on TAU
416 ---help---
417 The TAU supports an interrupt driven mode which causes an interrupt
418 whenever the temperature goes out of range. This is the fastest way
419 to get notified the temp has exceeded a range. With this option off,
420 a timer is used to re-check the temperature periodically.
421
422 However, on some cpus it appears that the TAU interrupt hardware
423 is buggy and can cause a situation which would lead unexplained hard
424 lockups.
425
426 Unless you are extending the TAU driver, or enjoy kernel/hardware
427 debugging, leave this option off.
428
429config TAU_AVERAGE
430 bool "Average high and low temp"
431 depends on TAU
432 ---help---
433 The TAU hardware can compare the temperature to an upper and lower
434 bound. The default behavior is to show both the upper and lower
435 bound in /proc/cpuinfo. If the range is large, the temperature is
436 either changing a lot, or the TAU hardware is broken (likely on some
437 G4's). If the range is small (around 4 degrees), the temperature is
438 relatively stable. If you say Y here, a single temperature value,
439 halfway between the upper and lower bounds, will be reported in
440 /proc/cpuinfo.
441
442 If in doubt, say N here.
443endmenu
444
445source arch/powerpc/platforms/embedded6xx/Kconfig
446source arch/powerpc/platforms/4xx/Kconfig
447source arch/powerpc/platforms/85xx/Kconfig
448source arch/powerpc/platforms/8xx/Kconfig
449
450menu "Kernel options"
451
452config HIGHMEM
453 bool "High memory support"
454 depends on PPC32
455
456source kernel/Kconfig.hz
457source kernel/Kconfig.preempt
458source "fs/Kconfig.binfmt"
459
460# We optimistically allocate largepages from the VM, so make the limit
461# large enough (16MB). This badly named config option is actually
462# max order + 1
463config FORCE_MAX_ZONEORDER
464 int
465 depends on PPC64
466 default "13"
467
468config MATH_EMULATION
469 bool "Math emulation"
470 depends on 4xx || 8xx || E200 || E500
471 ---help---
472 Some PowerPC chips designed for embedded applications do not have
473 a floating-point unit and therefore do not implement the
474 floating-point instructions in the PowerPC instruction set. If you
475 say Y here, the kernel will include code to emulate a floating-point
476 unit, which will allow programs that use floating-point
477 instructions to run.
478
479config IOMMU_VMERGE
480 bool "Enable IOMMU virtual merging (EXPERIMENTAL)"
481 depends on EXPERIMENTAL && PPC64
482 default n
483 help
484 Cause IO segments sent to a device for DMA to be merged virtually
485 by the IOMMU when they happen to have been allocated contiguously.
486 This doesn't add pressure to the IOMMU allocator. However, some
487 drivers don't support getting large merged segments coming back
488 from *_map_sg(). Say Y if you know the drivers you are using are
489 properly handling this case.
490
491config HOTPLUG_CPU
492 bool "Support for enabling/disabling CPUs"
493 depends on SMP && HOTPLUG && EXPERIMENTAL && (PPC_PSERIES || PPC_PMAC)
494 ---help---
495 Say Y here to be able to disable and re-enable individual
496 CPUs at runtime on SMP machines.
497
498 Say N if you are unsure.
499
500config KEXEC
501 bool "kexec system call (EXPERIMENTAL)"
502 depends on PPC_MULTIPLATFORM && EXPERIMENTAL
503 help
504 kexec is a system call that implements the ability to shutdown your
505 current kernel, and to start another kernel. It is like a reboot
506 but it is indepedent of the system firmware. And like a reboot
507 you can start any kernel with it, not just Linux.
508
509 The name comes from the similiarity to the exec system call.
510
511 It is an ongoing process to be certain the hardware in a machine
512 is properly shutdown, so do not be surprised if this code does not
513 initially work for you. It may help to enable device hotplugging
514 support. As of this writing the exact hardware interface is
515 strongly in flux, so no good recommendation can be made.
516
517config EMBEDDEDBOOT
518 bool
519 depends on 8xx || 8260
520 default y
521
522config PC_KEYBOARD
523 bool "PC PS/2 style Keyboard"
524 depends on 4xx || CPM2
525
526config PPCBUG_NVRAM
527 bool "Enable reading PPCBUG NVRAM during boot" if PPLUS || LOPEC
528 default y if PPC_PREP
529
530config IRQ_ALL_CPUS
531 bool "Distribute interrupts on all CPUs by default"
532 depends on SMP && !MV64360
533 help
534 This option gives the kernel permission to distribute IRQs across
535 multiple CPUs. Saying N here will route all IRQs to the first
536 CPU. Generally saying Y is safe, although some problems have been
537 reported with SMP Power Macintoshes with this option enabled.
538
539source "arch/powerpc/platforms/pseries/Kconfig"
540
541config NUMA
542 bool "NUMA support"
543 depends on PPC64
544 default y if SMP && PPC_PSERIES
545
546config ARCH_SELECT_MEMORY_MODEL
547 def_bool y
548 depends on PPC64
549
550config ARCH_FLATMEM_ENABLE
551 def_bool y
552 depends on PPC64 && !NUMA
553
554config ARCH_DISCONTIGMEM_ENABLE
555 def_bool y
556 depends on SMP && PPC_PSERIES
557
558config ARCH_DISCONTIGMEM_DEFAULT
559 def_bool y
560 depends on ARCH_DISCONTIGMEM_ENABLE
561
562config ARCH_SPARSEMEM_ENABLE
563 def_bool y
564 depends on ARCH_DISCONTIGMEM_ENABLE
565
566source "mm/Kconfig"
567
568config HAVE_ARCH_EARLY_PFN_TO_NID
569 def_bool y
570 depends on NEED_MULTIPLE_NODES
571
572# Some NUMA nodes have memory ranges that span
573# other nodes. Even though a pfn is valid and
574# between a node's start and end pfns, it may not
575# reside on that node.
576#
577# This is a relatively temporary hack that should
578# be able to go away when sparsemem is fully in
579# place
580
581config NODES_SPAN_OTHER_NODES
582 def_bool y
583 depends on NEED_MULTIPLE_NODES
584
585config SCHED_SMT
586 bool "SMT (Hyperthreading) scheduler support"
587 depends on PPC64 && SMP
588 default off
589 help
590 SMT scheduler support improves the CPU scheduler's decision making
591 when dealing with POWER5 cpus at a cost of slightly increased
592 overhead in some places. If unsure say N here.
593
594config PROC_DEVICETREE
595 bool "Support for device tree in /proc"
596 depends on PROC_FS
597 help
598 This option adds a device-tree directory under /proc which contains
599 an image of the device tree that the kernel copies from Open
600 Firmware or other boot firmware. If unsure, say Y here.
601
602source "arch/powerpc/platforms/prep/Kconfig"
603
604config CMDLINE_BOOL
605 bool "Default bootloader kernel arguments"
606 depends on !PPC_ISERIES
607
608config CMDLINE
609 string "Initial kernel command string"
610 depends on CMDLINE_BOOL
611 default "console=ttyS0,9600 console=tty0 root=/dev/sda2"
612 help
613 On some platforms, there is currently no way for the boot loader to
614 pass arguments to the kernel. For these platforms, you can supply
615 some command-line options at build time by entering them here. In
616 most cases you will need to specify the root device here.
617
618if !44x || BROKEN
619source kernel/power/Kconfig
620endif
621
622config SECCOMP
623 bool "Enable seccomp to safely compute untrusted bytecode"
624 depends on PROC_FS
625 default y
626 help
627 This kernel feature is useful for number crunching applications
628 that may need to compute untrusted bytecode during their
629 execution. By using pipes or other transports made available to
630 the process as file descriptors supporting the read/write
631 syscalls, it's possible to isolate those applications in
632 their own address space using seccomp. Once seccomp is
633 enabled via /proc/<pid>/seccomp, it cannot be disabled
634 and the task is only allowed to execute a few safe syscalls
635 defined by each seccomp mode.
636
637 If unsure, say Y. Only embedded should say N here.
638
639endmenu
640
641config ISA_DMA_API
642 bool
643 default y
644
645menu "Bus options"
646
647config ISA
648 bool "Support for ISA-bus hardware"
649 depends on PPC_PREP || PPC_CHRP
650 select PPC_I8259
651 help
652 Find out whether you have ISA slots on your motherboard. ISA is the
653 name of a bus system, i.e. the way the CPU talks to the other stuff
654 inside your box. If you have an Apple machine, say N here; if you
655 have an IBM RS/6000 or pSeries machine or a PReP machine, say Y. If
656 you have an embedded board, consult your board documentation.
657
658config GENERIC_ISA_DMA
659 bool
660 depends on PPC64 || POWER4 || 6xx && !CPM2
661 default y
662
663config PPC_I8259
664 bool
665 default y if 85xx
666 default n
667
668config PPC_INDIRECT_PCI
669 bool
670 depends on PCI
671 default y if 40x || 44x || 85xx || 83xx
672 default n
673
674config EISA
675 bool
676
677config SBUS
678 bool
679
680# Yes MCA RS/6000s exist but Linux-PPC does not currently support any
681config MCA
682 bool
683
684config PCI
685 bool "PCI support" if 40x || CPM2 || 83xx || 85xx || PPC_MPC52xx || (EMBEDDED && PPC_ISERIES)
686 default y if !40x && !CPM2 && !8xx && !APUS && !83xx && !85xx
687 default PCI_PERMEDIA if !4xx && !CPM2 && !8xx && APUS
688 default PCI_QSPAN if !4xx && !CPM2 && 8xx
689 help
690 Find out whether your system includes a PCI bus. PCI is the name of
691 a bus system, i.e. the way the CPU talks to the other stuff inside
692 your box. If you say Y here, the kernel will include drivers and
693 infrastructure code to support PCI bus devices.
694
695config PCI_DOMAINS
696 bool
697 default PCI
698
699config MPC83xx_PCI2
700 bool " Supprt for 2nd PCI host controller"
701 depends on PCI && MPC834x
702 default y if MPC834x_SYS
703
704config PCI_QSPAN
705 bool "QSpan PCI"
706 depends on !4xx && !CPM2 && 8xx
707 select PPC_I8259
708 help
709 Say Y here if you have a system based on a Motorola 8xx-series
710 embedded processor with a QSPAN PCI interface, otherwise say N.
711
712config PCI_8260
713 bool
714 depends on PCI && 8260
715 select PPC_INDIRECT_PCI
716 default y
717
718config 8260_PCI9
719 bool " Enable workaround for MPC826x erratum PCI 9"
720 depends on PCI_8260 && !ADS8272
721 default y
722
723choice
724 prompt " IDMA channel for PCI 9 workaround"
725 depends on 8260_PCI9
726
727config 8260_PCI9_IDMA1
728 bool "IDMA1"
729
730config 8260_PCI9_IDMA2
731 bool "IDMA2"
732
733config 8260_PCI9_IDMA3
734 bool "IDMA3"
735
736config 8260_PCI9_IDMA4
737 bool "IDMA4"
738
739endchoice
740
741source "drivers/pci/Kconfig"
742
743source "drivers/pcmcia/Kconfig"
744
745source "drivers/pci/hotplug/Kconfig"
746
747endmenu
748
749menu "Advanced setup"
750 depends on PPC32
751
752config ADVANCED_OPTIONS
753 bool "Prompt for advanced kernel configuration options"
754 help
755 This option will enable prompting for a variety of advanced kernel
756 configuration options. These options can cause the kernel to not
757 work if they are set incorrectly, but can be used to optimize certain
758 aspects of kernel memory management.
759
760 Unless you know what you are doing, say N here.
761
762comment "Default settings for advanced configuration options are used"
763 depends on !ADVANCED_OPTIONS
764
765config HIGHMEM_START_BOOL
766 bool "Set high memory pool address"
767 depends on ADVANCED_OPTIONS && HIGHMEM
768 help
769 This option allows you to set the base address of the kernel virtual
770 area used to map high memory pages. This can be useful in
771 optimizing the layout of kernel virtual memory.
772
773 Say N here unless you know what you are doing.
774
775config HIGHMEM_START
776 hex "Virtual start address of high memory pool" if HIGHMEM_START_BOOL
777 default "0xfe000000"
778
779config LOWMEM_SIZE_BOOL
780 bool "Set maximum low memory"
781 depends on ADVANCED_OPTIONS
782 help
783 This option allows you to set the maximum amount of memory which
784 will be used as "low memory", that is, memory which the kernel can
785 access directly, without having to set up a kernel virtual mapping.
786 This can be useful in optimizing the layout of kernel virtual
787 memory.
788
789 Say N here unless you know what you are doing.
790
791config LOWMEM_SIZE
792 hex "Maximum low memory size (in bytes)" if LOWMEM_SIZE_BOOL
793 default "0x30000000"
794
795config KERNEL_START_BOOL
796 bool "Set custom kernel base address"
797 depends on ADVANCED_OPTIONS
798 help
799 This option allows you to set the kernel virtual address at which
800 the kernel will map low memory (the kernel image will be linked at
801 this address). This can be useful in optimizing the virtual memory
802 layout of the system.
803
804 Say N here unless you know what you are doing.
805
806config KERNEL_START
807 hex "Virtual address of kernel base" if KERNEL_START_BOOL
808 default "0xc0000000"
809
810config TASK_SIZE_BOOL
811 bool "Set custom user task size"
812 depends on ADVANCED_OPTIONS
813 help
814 This option allows you to set the amount of virtual address space
815 allocated to user tasks. This can be useful in optimizing the
816 virtual memory layout of the system.
817
818 Say N here unless you know what you are doing.
819
820config TASK_SIZE
821 hex "Size of user task space" if TASK_SIZE_BOOL
822 default "0x80000000"
823
824config CONSISTENT_START_BOOL
825 bool "Set custom consistent memory pool address"
826 depends on ADVANCED_OPTIONS && NOT_COHERENT_CACHE
827 help
828 This option allows you to set the base virtual address
829 of the the consistent memory pool. This pool of virtual
830 memory is used to make consistent memory allocations.
831
832config CONSISTENT_START
833 hex "Base virtual address of consistent memory pool" if CONSISTENT_START_BOOL
834 default "0xff100000" if NOT_COHERENT_CACHE
835
836config CONSISTENT_SIZE_BOOL
837 bool "Set custom consistent memory pool size"
838 depends on ADVANCED_OPTIONS && NOT_COHERENT_CACHE
839 help
840 This option allows you to set the size of the the
841 consistent memory pool. This pool of virtual memory
842 is used to make consistent memory allocations.
843
844config CONSISTENT_SIZE
845 hex "Size of consistent memory pool" if CONSISTENT_SIZE_BOOL
846 default "0x00200000" if NOT_COHERENT_CACHE
847
848config BOOT_LOAD_BOOL
849 bool "Set the boot link/load address"
850 depends on ADVANCED_OPTIONS && !PPC_MULTIPLATFORM
851 help
852 This option allows you to set the initial load address of the zImage
853 or zImage.initrd file. This can be useful if you are on a board
854 which has a small amount of memory.
855
856 Say N here unless you know what you are doing.
857
858config BOOT_LOAD
859 hex "Link/load address for booting" if BOOT_LOAD_BOOL
860 default "0x00400000" if 40x || 8xx || 8260
861 default "0x01000000" if 44x
862 default "0x00800000"
863
864config PIN_TLB
865 bool "Pinned Kernel TLBs (860 ONLY)"
866 depends on ADVANCED_OPTIONS && 8xx
867endmenu
868
869if PPC64
870config KERNEL_START
871 hex
872 default "0xc000000000000000"
873endif
874
875source "net/Kconfig"
876
877source "drivers/Kconfig"
878
879source "fs/Kconfig"
880
881# XXX source "arch/ppc/8xx_io/Kconfig"
882
883# XXX source "arch/ppc/8260_io/Kconfig"
884
885source "arch/powerpc/platforms/iseries/Kconfig"
886
887source "lib/Kconfig"
888
889source "arch/powerpc/oprofile/Kconfig"
890
891source "arch/powerpc/Kconfig.debug"
892
893source "security/Kconfig"
894
895config KEYS_COMPAT
896 bool
897 depends on COMPAT && KEYS
898 default y
899
900source "crypto/Kconfig"
diff --git a/arch/powerpc/Kconfig.debug b/arch/powerpc/Kconfig.debug
new file mode 100644
index 000000000000..0baf64ec80d0
--- /dev/null
+++ b/arch/powerpc/Kconfig.debug
@@ -0,0 +1,128 @@
1menu "Kernel hacking"
2
3source "lib/Kconfig.debug"
4
5config DEBUG_STACKOVERFLOW
6 bool "Check for stack overflows"
7 depends on DEBUG_KERNEL && PPC64
8 help
9 This option will cause messages to be printed if free stack space
10 drops below a certain limit.
11
12config KPROBES
13 bool "Kprobes"
14 depends on DEBUG_KERNEL && PPC64
15 help
16 Kprobes allows you to trap at almost any kernel address and
17 execute a callback function. register_kprobe() establishes
18 a probepoint and specifies the callback. Kprobes is useful
19 for kernel debugging, non-intrusive instrumentation and testing.
20 If in doubt, say "N".
21
22config DEBUG_STACK_USAGE
23 bool "Stack utilization instrumentation"
24 depends on DEBUG_KERNEL && PPC64
25 help
26 Enables the display of the minimum amount of free stack which each
27 task has ever had available in the sysrq-T and sysrq-P debug output.
28
29 This option will slow down process creation somewhat.
30
31config DEBUGGER
32 bool "Enable debugger hooks"
33 depends on DEBUG_KERNEL
34 help
35 Include in-kernel hooks for kernel debuggers. Unless you are
36 intending to debug the kernel, say N here.
37
38config KGDB
39 bool "Include kgdb kernel debugger"
40 depends on DEBUGGER && (BROKEN || PPC_GEN550 || 4xx)
41 select DEBUG_INFO
42 help
43 Include in-kernel hooks for kgdb, the Linux kernel source level
44 debugger. See <http://kgdb.sourceforge.net/> for more information.
45 Unless you are intending to debug the kernel, say N here.
46
47choice
48 prompt "Serial Port"
49 depends on KGDB
50 default KGDB_TTYS1
51
52config KGDB_TTYS0
53 bool "ttyS0"
54
55config KGDB_TTYS1
56 bool "ttyS1"
57
58config KGDB_TTYS2
59 bool "ttyS2"
60
61config KGDB_TTYS3
62 bool "ttyS3"
63
64endchoice
65
66config KGDB_CONSOLE
67 bool "Enable serial console thru kgdb port"
68 depends on KGDB && 8xx || CPM2
69 help
70 If you enable this, all serial console messages will be sent
71 over the gdb stub.
72 If unsure, say N.
73
74config XMON
75 bool "Include xmon kernel debugger"
76 depends on DEBUGGER && !PPC_ISERIES
77 help
78 Include in-kernel hooks for the xmon kernel monitor/debugger.
79 Unless you are intending to debug the kernel, say N here.
80 Make sure to enable also CONFIG_BOOTX_TEXT on Macs. Otherwise
81 nothing will appear on the screen (xmon writes directly to the
82 framebuffer memory).
83 The cmdline option 'xmon' or 'xmon=early' will drop into xmon
84 very early during boot. 'xmon=on' will just enable the xmon
85 debugger hooks. 'xmon=off' will disable the debugger hooks
86 if CONFIG_XMON_DEFAULT is set.
87
88config XMON_DEFAULT
89 bool "Enable xmon by default"
90 depends on XMON
91 help
92 xmon is normally disabled unless booted with 'xmon=on'.
93 Use 'xmon=off' to disable xmon init during runtime.
94
95config IRQSTACKS
96 bool "Use separate kernel stacks when processing interrupts"
97 depends on PPC64
98 help
99 If you say Y here the kernel will use separate kernel stacks
100 for handling hard and soft interrupts. This can help avoid
101 overflowing the process kernel stacks.
102
103config BDI_SWITCH
104 bool "Include BDI-2000 user context switcher"
105 depends on DEBUG_KERNEL && PPC32
106 help
107 Include in-kernel support for the Abatron BDI2000 debugger.
108 Unless you are intending to debug the kernel with one of these
109 machines, say N here.
110
111config BOOTX_TEXT
112 bool "Support for early boot text console (BootX or OpenFirmware only)"
113 depends PPC_OF && !PPC_ISERIES
114 help
115 Say Y here to see progress messages from the boot firmware in text
116 mode. Requires either BootX or Open Firmware.
117
118config SERIAL_TEXT_DEBUG
119 bool "Support for early boot texts over serial port"
120 depends on 4xx || LOPEC || MV64X60 || PPLUS || PRPMC800 || \
121 PPC_GEN550 || PPC_MPC52xx
122
123config PPC_OCP
124 bool
125 depends on IBM_OCP || XILINX_OCP
126 default y
127
128endmenu
diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile
new file mode 100644
index 000000000000..2f4cce06a7e5
--- /dev/null
+++ b/arch/powerpc/Makefile
@@ -0,0 +1,222 @@
1# This file is included by the global makefile so that you can add your own
2# architecture-specific flags and dependencies. Remember to do have actions
3# for "archclean" and "archdep" for cleaning up and making dependencies for
4# this architecture.
5#
6# This file is subject to the terms and conditions of the GNU General Public
7# License. See the file "COPYING" in the main directory of this archive
8# for more details.
9#
10# Copyright (C) 1994 by Linus Torvalds
11# Changes for PPC by Gary Thomas
12# Rewritten by Cort Dougan and Paul Mackerras
13#
14
15# This must match PAGE_OFFSET in include/asm-powerpc/page.h.
16KERNELLOAD := $(CONFIG_KERNEL_START)
17
18HAS_BIARCH := $(call cc-option-yn, -m32)
19
20ifeq ($(CONFIG_PPC64),y)
21OLDARCH := ppc64
22SZ := 64
23
24# Set default 32 bits cross compilers for vdso and boot wrapper
25CROSS32_COMPILE ?=
26
27CROSS32CC := $(CROSS32_COMPILE)gcc
28CROSS32AS := $(CROSS32_COMPILE)as
29CROSS32LD := $(CROSS32_COMPILE)ld
30CROSS32OBJCOPY := $(CROSS32_COMPILE)objcopy
31
32ifeq ($(HAS_BIARCH),y)
33ifeq ($(CROSS32_COMPILE),)
34CROSS32CC := $(CC) -m32
35CROSS32AS := $(AS) -a32
36CROSS32LD := $(LD) -m elf32ppc
37CROSS32OBJCOPY := $(OBJCOPY)
38endif
39endif
40
41export CROSS32CC CROSS32AS CROSS32LD CROSS32OBJCOPY
42
43new_nm := $(shell if $(NM) --help 2>&1 | grep -- '--synthetic' > /dev/null; then echo y; else echo n; fi)
44
45ifeq ($(new_nm),y)
46NM := $(NM) --synthetic
47endif
48
49else
50OLDARCH := ppc
51SZ := 32
52endif
53
54UTS_MACHINE := $(OLDARCH)
55
56ifeq ($(HAS_BIARCH),y)
57override AS += -a$(SZ)
58override LD += -m elf$(SZ)ppc
59override CC += -m$(SZ)
60endif
61
62LDFLAGS_vmlinux := -Ttext $(KERNELLOAD) -Bstatic -e $(KERNELLOAD)
63
64# The -Iarch/$(ARCH)/include is temporary while we are merging
65CPPFLAGS += -Iarch/$(ARCH) -Iarch/$(ARCH)/include
66AFLAGS += -Iarch/$(ARCH)
67CFLAGS += -Iarch/$(ARCH) -msoft-float -pipe
68CFLAGS-$(CONFIG_PPC64) := -mminimal-toc -mtraceback=none -mcall-aixdesc
69CFLAGS-$(CONFIG_PPC32) := -ffixed-r2 -mmultiple
70CFLAGS += $(CFLAGS-y)
71CPP = $(CC) -E $(CFLAGS)
72# Temporary hack until we have migrated to asm-powerpc
73LINUXINCLUDE += -Iarch/$(ARCH)/include
74
75CHECKFLAGS += -m$(SZ) -D__powerpc__ -D__powerpc$(SZ)__
76
77ifeq ($(CONFIG_PPC64),y)
78GCC_VERSION := $(call cc-version)
79GCC_BROKEN_VEC := $(shell if [ $(GCC_VERSION) -lt 0400 ] ; then echo "y"; fi)
80
81ifeq ($(CONFIG_POWER4_ONLY),y)
82ifeq ($(CONFIG_ALTIVEC),y)
83ifeq ($(GCC_BROKEN_VEC),y)
84 CFLAGS += $(call cc-option,-mcpu=970)
85else
86 CFLAGS += $(call cc-option,-mcpu=power4)
87endif
88else
89 CFLAGS += $(call cc-option,-mcpu=power4)
90endif
91else
92 CFLAGS += $(call cc-option,-mtune=power4)
93endif
94endif
95
96# No AltiVec instruction when building kernel
97CFLAGS += $(call cc-option,-mno-altivec)
98
99# Enable unit-at-a-time mode when possible. It shrinks the
100# kernel considerably.
101CFLAGS += $(call cc-option,-funit-at-a-time)
102
103ifndef CONFIG_FSL_BOOKE
104CFLAGS += -mstring
105endif
106
107cpu-as-$(CONFIG_PPC64BRIDGE) += -Wa,-mppc64bridge
108cpu-as-$(CONFIG_4xx) += -Wa,-m405
109cpu-as-$(CONFIG_6xx) += -Wa,-maltivec
110cpu-as-$(CONFIG_POWER4) += -Wa,-maltivec
111cpu-as-$(CONFIG_E500) += -Wa,-me500
112cpu-as-$(CONFIG_E200) += -Wa,-me200
113
114AFLAGS += $(cpu-as-y)
115CFLAGS += $(cpu-as-y)
116
117# Default to the common case.
118KBUILD_DEFCONFIG := common_defconfig
119
120head-y := arch/powerpc/kernel/head_32.o
121head-$(CONFIG_PPC64) := arch/powerpc/kernel/head_64.o
122head-$(CONFIG_8xx) := arch/powerpc/kernel/head_8xx.o
123head-$(CONFIG_4xx) := arch/powerpc/kernel/head_4xx.o
124head-$(CONFIG_44x) := arch/powerpc/kernel/head_44x.o
125head-$(CONFIG_FSL_BOOKE) := arch/powerpc/kernel/head_fsl_booke.o
126
127head-$(CONFIG_PPC64) += arch/powerpc/kernel/entry_64.o
128head-$(CONFIG_PPC_FPU) += arch/powerpc/kernel/fpu.o
129
130core-y += arch/powerpc/kernel/ \
131 arch/$(OLDARCH)/kernel/ \
132 arch/powerpc/mm/ \
133 arch/powerpc/lib/ \
134 arch/powerpc/sysdev/ \
135 arch/powerpc/platforms/
136core-$(CONFIG_MATH_EMULATION) += arch/ppc/math-emu/
137core-$(CONFIG_XMON) += arch/powerpc/xmon/
138core-$(CONFIG_APUS) += arch/ppc/amiga/
139drivers-$(CONFIG_8xx) += arch/ppc/8xx_io/
140drivers-$(CONFIG_4xx) += arch/ppc/4xx_io/
141drivers-$(CONFIG_CPM2) += arch/ppc/8260_io/
142
143drivers-$(CONFIG_OPROFILE) += arch/powerpc/oprofile/
144
145defaultimage-$(CONFIG_PPC32) := uImage zImage
146defaultimage-$(CONFIG_PPC_ISERIES) := vmlinux
147defaultimage-$(CONFIG_PPC_PSERIES) := zImage
148KBUILD_IMAGE := $(defaultimage-y)
149all: $(KBUILD_IMAGE)
150
151CPPFLAGS_vmlinux.lds := -Upowerpc
152
153# All the instructions talk about "make bzImage".
154bzImage: zImage
155
156BOOT_TARGETS = zImage zImage.initrd znetboot znetboot.initrd vmlinux.sm
157
158.PHONY: $(BOOT_TARGETS)
159
160boot := arch/$(OLDARCH)/boot
161
162# urk
163ifeq ($(CONFIG_PPC64),y)
164$(BOOT_TARGETS): vmlinux
165 $(Q)$(MAKE) ARCH=ppc64 $(build)=$(boot) $(patsubst %,$(boot)/%,$@)
166else
167$(BOOT_TARGETS): vmlinux
168 $(Q)$(MAKE) ARCH=ppc $(build)=$(boot) $@
169endif
170
171uImage: vmlinux
172 $(Q)$(MAKE) ARCH=$(OLDARCH) $(build)=$(boot)/images $(boot)/images/$@
173
174define archhelp
175 @echo '* zImage - Compressed kernel image (arch/$(ARCH)/boot/images/zImage.*)'
176 @echo ' uImage - Create a bootable image for U-Boot / PPCBoot'
177 @echo ' install - Install kernel using'
178 @echo ' (your) ~/bin/installkernel or'
179 @echo ' (distribution) /sbin/installkernel or'
180 @echo ' install to $$(INSTALL_PATH) and run lilo'
181 @echo ' *_defconfig - Select default config from arch/$(ARCH)/ppc/configs'
182endef
183
184archclean:
185 $(Q)$(MAKE) $(clean)=$(boot)
186 # Temporary hack until we have migrated to asm-powerpc
187 $(Q)rm -rf arch/$(ARCH)/include
188
189archprepare: checkbin
190
191# Temporary hack until we have migrated to asm-powerpc
192include/asm: arch/$(ARCH)/include/asm
193arch/$(ARCH)/include/asm:
194 $(Q)if [ ! -d arch/$(ARCH)/include ]; then mkdir -p arch/$(ARCH)/include; fi
195 $(Q)ln -fsn $(srctree)/include/asm-$(OLDARCH) arch/$(ARCH)/include/asm
196
197# Use the file '.tmp_gas_check' for binutils tests, as gas won't output
198# to stdout and these checks are run even on install targets.
199TOUT := .tmp_gas_check
200# Ensure this is binutils 2.12.1 (or 2.12.90.0.7) or later for altivec
201# instructions.
202# gcc-3.4 and binutils-2.14 are a fatal combination.
203GCC_VERSION := $(call cc-version)
204
205checkbin:
206 @if test "$(GCC_VERSION)" = "0304" ; then \
207 if ! /bin/echo mftb 5 | $(AS) -v -mppc -many -o $(TOUT) >/dev/null 2>&1 ; then \
208 echo -n '*** ${VERSION}.${PATCHLEVEL} kernels no longer build '; \
209 echo 'correctly with gcc-3.4 and your version of binutils.'; \
210 echo '*** Please upgrade your binutils or downgrade your gcc'; \
211 false; \
212 fi ; \
213 fi
214 @if ! /bin/echo dssall | $(AS) -many -o $(TOUT) >/dev/null 2>&1 ; then \
215 echo -n '*** ${VERSION}.${PATCHLEVEL} kernels no longer build ' ; \
216 echo 'correctly with old versions of binutils.' ; \
217 echo '*** Please upgrade your binutils to 2.12.1 or newer' ; \
218 false ; \
219 fi
220
221CLEAN_FILES += $(TOUT)
222
diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
new file mode 100644
index 000000000000..572d4f5eaacb
--- /dev/null
+++ b/arch/powerpc/kernel/Makefile
@@ -0,0 +1,56 @@
1#
2# Makefile for the linux kernel.
3#
4
5ifeq ($(CONFIG_PPC64),y)
6EXTRA_CFLAGS += -mno-minimal-toc
7endif
8ifeq ($(CONFIG_PPC32),y)
9CFLAGS_prom_init.o += -fPIC
10CFLAGS_btext.o += -fPIC
11endif
12
13obj-y := semaphore.o cputable.o ptrace.o syscalls.o \
14 signal_32.o pmc.o
15obj-$(CONFIG_PPC64) += setup_64.o binfmt_elf32.o sys_ppc32.o \
16 ptrace32.o systbl.o
17obj-$(CONFIG_ALTIVEC) += vecemu.o vector.o
18obj-$(CONFIG_POWER4) += idle_power4.o
19obj-$(CONFIG_PPC_OF) += of_device.o
20obj-$(CONFIG_PPC_RTAS) += rtas.o
21obj-$(CONFIG_IBMVIO) += vio.o
22
23ifeq ($(CONFIG_PPC_MERGE),y)
24
25extra-$(CONFIG_PPC_STD_MMU) := head_32.o
26extra-$(CONFIG_PPC64) := head_64.o
27extra-$(CONFIG_40x) := head_4xx.o
28extra-$(CONFIG_44x) := head_44x.o
29extra-$(CONFIG_FSL_BOOKE) := head_fsl_booke.o
30extra-$(CONFIG_8xx) := head_8xx.o
31extra-y += vmlinux.lds
32
33obj-y += process.o init_task.o time.o \
34 prom.o traps.o setup-common.o
35obj-$(CONFIG_PPC32) += entry_32.o setup_32.o misc_32.o systbl.o
36obj-$(CONFIG_PPC64) += misc_64.o
37obj-$(CONFIG_PPC_OF) += prom_init.o
38obj-$(CONFIG_MODULES) += ppc_ksyms.o
39obj-$(CONFIG_BOOTX_TEXT) += btext.o
40obj-$(CONFIG_6xx) += idle_6xx.o
41
42ifeq ($(CONFIG_PPC_ISERIES),y)
43$(obj)/head_64.o: $(obj)/lparmap.s
44AFLAGS_head_64.o += -I$(obj)
45endif
46
47else
48# stuff used from here for ARCH=ppc or ARCH=ppc64
49obj-$(CONFIG_PPC64) += traps.o process.o init_task.o time.o \
50 setup-common.o
51
52
53endif
54
55extra-$(CONFIG_PPC_FPU) += fpu.o
56extra-$(CONFIG_PPC64) += entry_64.o
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
new file mode 100644
index 000000000000..330cd783206f
--- /dev/null
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -0,0 +1,273 @@
1/*
2 * This program is used to generate definitions needed by
3 * assembly language modules.
4 *
5 * We use the technique used in the OSF Mach kernel code:
6 * generate asm statements containing #defines,
7 * compile this file to assembler, and then extract the
8 * #defines from the assembly-language output.
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version
13 * 2 of the License, or (at your option) any later version.
14 */
15
16#include <linux/config.h>
17#include <linux/signal.h>
18#include <linux/sched.h>
19#include <linux/kernel.h>
20#include <linux/errno.h>
21#include <linux/string.h>
22#include <linux/types.h>
23#include <linux/mman.h>
24#include <linux/mm.h>
25#ifdef CONFIG_PPC64
26#include <linux/time.h>
27#include <linux/hardirq.h>
28#else
29#include <linux/ptrace.h>
30#include <linux/suspend.h>
31#endif
32
33#include <asm/io.h>
34#include <asm/page.h>
35#include <asm/pgtable.h>
36#include <asm/processor.h>
37#include <asm/cputable.h>
38#include <asm/thread_info.h>
39#include <asm/rtas.h>
40#ifdef CONFIG_PPC64
41#include <asm/paca.h>
42#include <asm/lppaca.h>
43#include <asm/iSeries/HvLpEvent.h>
44#include <asm/cache.h>
45#include <asm/systemcfg.h>
46#include <asm/compat.h>
47#endif
48
49#define DEFINE(sym, val) \
50 asm volatile("\n->" #sym " %0 " #val : : "i" (val))
51
52#define BLANK() asm volatile("\n->" : : )
53
54int main(void)
55{
56 DEFINE(THREAD, offsetof(struct task_struct, thread));
57 DEFINE(MM, offsetof(struct task_struct, mm));
58#ifdef CONFIG_PPC64
59 DEFINE(AUDITCONTEXT, offsetof(struct task_struct, audit_context));
60#else
61 DEFINE(THREAD_INFO, offsetof(struct task_struct, thread_info));
62 DEFINE(PTRACE, offsetof(struct task_struct, ptrace));
63#endif /* CONFIG_PPC64 */
64
65 DEFINE(KSP, offsetof(struct thread_struct, ksp));
66 DEFINE(PT_REGS, offsetof(struct thread_struct, regs));
67 DEFINE(THREAD_FPEXC_MODE, offsetof(struct thread_struct, fpexc_mode));
68 DEFINE(THREAD_FPR0, offsetof(struct thread_struct, fpr[0]));
69 DEFINE(THREAD_FPSCR, offsetof(struct thread_struct, fpscr));
70#ifdef CONFIG_ALTIVEC
71 DEFINE(THREAD_VR0, offsetof(struct thread_struct, vr[0]));
72 DEFINE(THREAD_VRSAVE, offsetof(struct thread_struct, vrsave));
73 DEFINE(THREAD_VSCR, offsetof(struct thread_struct, vscr));
74 DEFINE(THREAD_USED_VR, offsetof(struct thread_struct, used_vr));
75#endif /* CONFIG_ALTIVEC */
76#ifdef CONFIG_PPC64
77 DEFINE(KSP_VSID, offsetof(struct thread_struct, ksp_vsid));
78#else /* CONFIG_PPC64 */
79 DEFINE(PGDIR, offsetof(struct thread_struct, pgdir));
80 DEFINE(LAST_SYSCALL, offsetof(struct thread_struct, last_syscall));
81#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
82 DEFINE(THREAD_DBCR0, offsetof(struct thread_struct, dbcr0));
83 DEFINE(PT_PTRACED, PT_PTRACED);
84#endif
85#ifdef CONFIG_SPE
86 DEFINE(THREAD_EVR0, offsetof(struct thread_struct, evr[0]));
87 DEFINE(THREAD_ACC, offsetof(struct thread_struct, acc));
88 DEFINE(THREAD_SPEFSCR, offsetof(struct thread_struct, spefscr));
89 DEFINE(THREAD_USED_SPE, offsetof(struct thread_struct, used_spe));
90#endif /* CONFIG_SPE */
91#endif /* CONFIG_PPC64 */
92
93 DEFINE(TI_FLAGS, offsetof(struct thread_info, flags));
94 DEFINE(TI_PREEMPT, offsetof(struct thread_info, preempt_count));
95 DEFINE(TI_SC_NOERR, offsetof(struct thread_info, syscall_noerror));
96#ifdef CONFIG_PPC32
97 DEFINE(TI_TASK, offsetof(struct thread_info, task));
98 DEFINE(TI_EXECDOMAIN, offsetof(struct thread_info, exec_domain));
99 DEFINE(TI_CPU, offsetof(struct thread_info, cpu));
100#endif /* CONFIG_PPC32 */
101
102#ifdef CONFIG_PPC64
103 DEFINE(DCACHEL1LINESIZE, offsetof(struct ppc64_caches, dline_size));
104 DEFINE(DCACHEL1LOGLINESIZE, offsetof(struct ppc64_caches, log_dline_size));
105 DEFINE(DCACHEL1LINESPERPAGE, offsetof(struct ppc64_caches, dlines_per_page));
106 DEFINE(ICACHEL1LINESIZE, offsetof(struct ppc64_caches, iline_size));
107 DEFINE(ICACHEL1LOGLINESIZE, offsetof(struct ppc64_caches, log_iline_size));
108 DEFINE(ICACHEL1LINESPERPAGE, offsetof(struct ppc64_caches, ilines_per_page));
109 DEFINE(PLATFORM, offsetof(struct systemcfg, platform));
110 DEFINE(PLATFORM_LPAR, PLATFORM_LPAR);
111
112 /* paca */
113 DEFINE(PACA_SIZE, sizeof(struct paca_struct));
114 DEFINE(PACAPACAINDEX, offsetof(struct paca_struct, paca_index));
115 DEFINE(PACAPROCSTART, offsetof(struct paca_struct, cpu_start));
116 DEFINE(PACAKSAVE, offsetof(struct paca_struct, kstack));
117 DEFINE(PACACURRENT, offsetof(struct paca_struct, __current));
118 DEFINE(PACASAVEDMSR, offsetof(struct paca_struct, saved_msr));
119 DEFINE(PACASTABREAL, offsetof(struct paca_struct, stab_real));
120 DEFINE(PACASTABVIRT, offsetof(struct paca_struct, stab_addr));
121 DEFINE(PACASTABRR, offsetof(struct paca_struct, stab_rr));
122 DEFINE(PACAR1, offsetof(struct paca_struct, saved_r1));
123 DEFINE(PACATOC, offsetof(struct paca_struct, kernel_toc));
124 DEFINE(PACAPROCENABLED, offsetof(struct paca_struct, proc_enabled));
125 DEFINE(PACASLBCACHE, offsetof(struct paca_struct, slb_cache));
126 DEFINE(PACASLBCACHEPTR, offsetof(struct paca_struct, slb_cache_ptr));
127 DEFINE(PACACONTEXTID, offsetof(struct paca_struct, context.id));
128#ifdef CONFIG_HUGETLB_PAGE
129 DEFINE(PACALOWHTLBAREAS, offsetof(struct paca_struct, context.low_htlb_areas));
130 DEFINE(PACAHIGHHTLBAREAS, offsetof(struct paca_struct, context.high_htlb_areas));
131#endif /* CONFIG_HUGETLB_PAGE */
132 DEFINE(PACADEFAULTDECR, offsetof(struct paca_struct, default_decr));
133 DEFINE(PACA_EXGEN, offsetof(struct paca_struct, exgen));
134 DEFINE(PACA_EXMC, offsetof(struct paca_struct, exmc));
135 DEFINE(PACA_EXSLB, offsetof(struct paca_struct, exslb));
136 DEFINE(PACA_EXDSI, offsetof(struct paca_struct, exdsi));
137 DEFINE(PACAEMERGSP, offsetof(struct paca_struct, emergency_sp));
138 DEFINE(PACALPPACA, offsetof(struct paca_struct, lppaca));
139 DEFINE(PACAHWCPUID, offsetof(struct paca_struct, hw_cpu_id));
140
141 DEFINE(LPPACASRR0, offsetof(struct lppaca, saved_srr0));
142 DEFINE(LPPACASRR1, offsetof(struct lppaca, saved_srr1));
143 DEFINE(LPPACAANYINT, offsetof(struct lppaca, int_dword.any_int));
144 DEFINE(LPPACADECRINT, offsetof(struct lppaca, int_dword.fields.decr_int));
145#endif /* CONFIG_PPC64 */
146
147 /* RTAS */
148 DEFINE(RTASBASE, offsetof(struct rtas_t, base));
149 DEFINE(RTASENTRY, offsetof(struct rtas_t, entry));
150
151 /* Interrupt register frame */
152 DEFINE(STACK_FRAME_OVERHEAD, STACK_FRAME_OVERHEAD);
153#ifndef CONFIG_PPC64
154 DEFINE(INT_FRAME_SIZE, STACK_FRAME_OVERHEAD + sizeof(struct pt_regs));
155#else /* CONFIG_PPC64 */
156 DEFINE(SWITCH_FRAME_SIZE, STACK_FRAME_OVERHEAD + sizeof(struct pt_regs));
157 /* 288 = # of volatile regs, int & fp, for leaf routines */
158 /* which do not stack a frame. See the PPC64 ABI. */
159 DEFINE(INT_FRAME_SIZE, STACK_FRAME_OVERHEAD + sizeof(struct pt_regs) + 288);
160 /* Create extra stack space for SRR0 and SRR1 when calling prom/rtas. */
161 DEFINE(PROM_FRAME_SIZE, STACK_FRAME_OVERHEAD + sizeof(struct pt_regs) + 16);
162 DEFINE(RTAS_FRAME_SIZE, STACK_FRAME_OVERHEAD + sizeof(struct pt_regs) + 16);
163#endif /* CONFIG_PPC64 */
164 DEFINE(GPR0, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[0]));
165 DEFINE(GPR1, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[1]));
166 DEFINE(GPR2, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[2]));
167 DEFINE(GPR3, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[3]));
168 DEFINE(GPR4, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[4]));
169 DEFINE(GPR5, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[5]));
170 DEFINE(GPR6, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[6]));
171 DEFINE(GPR7, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[7]));
172 DEFINE(GPR8, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[8]));
173 DEFINE(GPR9, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[9]));
174 DEFINE(GPR10, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[10]));
175 DEFINE(GPR11, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[11]));
176 DEFINE(GPR12, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[12]));
177 DEFINE(GPR13, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[13]));
178#ifndef CONFIG_PPC64
179 DEFINE(GPR14, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[14]));
180 DEFINE(GPR15, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[15]));
181 DEFINE(GPR16, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[16]));
182 DEFINE(GPR17, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[17]));
183 DEFINE(GPR18, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[18]));
184 DEFINE(GPR19, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[19]));
185 DEFINE(GPR20, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[20]));
186 DEFINE(GPR21, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[21]));
187 DEFINE(GPR22, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[22]));
188 DEFINE(GPR23, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[23]));
189 DEFINE(GPR24, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[24]));
190 DEFINE(GPR25, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[25]));
191 DEFINE(GPR26, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[26]));
192 DEFINE(GPR27, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[27]));
193 DEFINE(GPR28, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[28]));
194 DEFINE(GPR29, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[29]));
195 DEFINE(GPR30, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[30]));
196 DEFINE(GPR31, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[31]));
197#endif /* CONFIG_PPC64 */
198 /*
199 * Note: these symbols include _ because they overlap with special
200 * register names
201 */
202 DEFINE(_NIP, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, nip));
203 DEFINE(_MSR, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, msr));
204 DEFINE(_CTR, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, ctr));
205 DEFINE(_LINK, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, link));
206 DEFINE(_CCR, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, ccr));
207 DEFINE(_XER, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, xer));
208 DEFINE(_DAR, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, dar));
209 DEFINE(_DSISR, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, dsisr));
210 DEFINE(ORIG_GPR3, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, orig_gpr3));
211 DEFINE(RESULT, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, result));
212 DEFINE(_TRAP, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, trap));
213#ifndef CONFIG_PPC64
214 DEFINE(_MQ, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, mq));
215 /*
216 * The PowerPC 400-class & Book-E processors have neither the DAR
217 * nor the DSISR SPRs. Hence, we overload them to hold the similar
218 * DEAR and ESR SPRs for such processors. For critical interrupts
219 * we use them to hold SRR0 and SRR1.
220 */
221 DEFINE(_DEAR, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, dar));
222 DEFINE(_ESR, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, dsisr));
223#else /* CONFIG_PPC64 */
224 DEFINE(SOFTE, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, softe));
225
226 /* These _only_ to be used with {PROM,RTAS}_FRAME_SIZE!!! */
227 DEFINE(_SRR0, STACK_FRAME_OVERHEAD+sizeof(struct pt_regs));
228 DEFINE(_SRR1, STACK_FRAME_OVERHEAD+sizeof(struct pt_regs)+8);
229#endif /* CONFIG_PPC64 */
230
231 DEFINE(CLONE_VM, CLONE_VM);
232 DEFINE(CLONE_UNTRACED, CLONE_UNTRACED);
233
234#ifndef CONFIG_PPC64
235 DEFINE(MM_PGD, offsetof(struct mm_struct, pgd));
236#endif /* ! CONFIG_PPC64 */
237
238 /* About the CPU features table */
239 DEFINE(CPU_SPEC_ENTRY_SIZE, sizeof(struct cpu_spec));
240 DEFINE(CPU_SPEC_PVR_MASK, offsetof(struct cpu_spec, pvr_mask));
241 DEFINE(CPU_SPEC_PVR_VALUE, offsetof(struct cpu_spec, pvr_value));
242 DEFINE(CPU_SPEC_FEATURES, offsetof(struct cpu_spec, cpu_features));
243 DEFINE(CPU_SPEC_SETUP, offsetof(struct cpu_spec, cpu_setup));
244
245#ifndef CONFIG_PPC64
246 DEFINE(pbe_address, offsetof(struct pbe, address));
247 DEFINE(pbe_orig_address, offsetof(struct pbe, orig_address));
248 DEFINE(pbe_next, offsetof(struct pbe, next));
249
250 DEFINE(TASK_SIZE, TASK_SIZE);
251 DEFINE(NUM_USER_SEGMENTS, TASK_SIZE>>28);
252#else /* CONFIG_PPC64 */
253 /* systemcfg offsets for use by vdso */
254 DEFINE(CFG_TB_ORIG_STAMP, offsetof(struct systemcfg, tb_orig_stamp));
255 DEFINE(CFG_TB_TICKS_PER_SEC, offsetof(struct systemcfg, tb_ticks_per_sec));
256 DEFINE(CFG_TB_TO_XS, offsetof(struct systemcfg, tb_to_xs));
257 DEFINE(CFG_STAMP_XSEC, offsetof(struct systemcfg, stamp_xsec));
258 DEFINE(CFG_TB_UPDATE_COUNT, offsetof(struct systemcfg, tb_update_count));
259 DEFINE(CFG_TZ_MINUTEWEST, offsetof(struct systemcfg, tz_minuteswest));
260 DEFINE(CFG_TZ_DSTTIME, offsetof(struct systemcfg, tz_dsttime));
261 DEFINE(CFG_SYSCALL_MAP32, offsetof(struct systemcfg, syscall_map_32));
262 DEFINE(CFG_SYSCALL_MAP64, offsetof(struct systemcfg, syscall_map_64));
263
264 /* timeval/timezone offsets for use by vdso */
265 DEFINE(TVAL64_TV_SEC, offsetof(struct timeval, tv_sec));
266 DEFINE(TVAL64_TV_USEC, offsetof(struct timeval, tv_usec));
267 DEFINE(TVAL32_TV_SEC, offsetof(struct compat_timeval, tv_sec));
268 DEFINE(TVAL32_TV_USEC, offsetof(struct compat_timeval, tv_usec));
269 DEFINE(TZONE_TZ_MINWEST, offsetof(struct timezone, tz_minuteswest));
270 DEFINE(TZONE_TZ_DSTTIME, offsetof(struct timezone, tz_dsttime));
271#endif /* CONFIG_PPC64 */
272 return 0;
273}
diff --git a/arch/ppc64/kernel/binfmt_elf32.c b/arch/powerpc/kernel/binfmt_elf32.c
index fadc699a0497..8ad6b0f33651 100644
--- a/arch/ppc64/kernel/binfmt_elf32.c
+++ b/arch/powerpc/kernel/binfmt_elf32.c
@@ -70,9 +70,6 @@ cputime_to_compat_timeval(const cputime_t cputime, struct compat_timeval *value)
70 value->tv_sec = jiffies / HZ; 70 value->tv_sec = jiffies / HZ;
71} 71}
72 72
73extern void start_thread32(struct pt_regs *, unsigned long, unsigned long);
74#undef start_thread
75#define start_thread start_thread32
76#define init_elf_binfmt init_elf32_binfmt 73#define init_elf_binfmt init_elf32_binfmt
77 74
78#include "../../../fs/binfmt_elf.c" 75#include "../../../fs/binfmt_elf.c"
diff --git a/arch/powerpc/kernel/btext.c b/arch/powerpc/kernel/btext.c
new file mode 100644
index 000000000000..bdfba92b2b38
--- /dev/null
+++ b/arch/powerpc/kernel/btext.c
@@ -0,0 +1,853 @@
1/*
2 * Procedures for drawing on the screen early on in the boot process.
3 *
4 * Benjamin Herrenschmidt <benh@kernel.crashing.org>
5 */
6#include <linux/config.h>
7#include <linux/kernel.h>
8#include <linux/string.h>
9#include <linux/init.h>
10#include <linux/module.h>
11
12#include <asm/sections.h>
13#include <asm/prom.h>
14#include <asm/btext.h>
15#include <asm/prom.h>
16#include <asm/page.h>
17#include <asm/mmu.h>
18#include <asm/pgtable.h>
19#include <asm/io.h>
20#include <asm/lmb.h>
21#include <asm/processor.h>
22
23#define NO_SCROLL
24
25#ifndef NO_SCROLL
26static void scrollscreen(void);
27#endif
28
29static void draw_byte(unsigned char c, long locX, long locY);
30static void draw_byte_32(unsigned char *bits, unsigned int *base, int rb);
31static void draw_byte_16(unsigned char *bits, unsigned int *base, int rb);
32static void draw_byte_8(unsigned char *bits, unsigned int *base, int rb);
33
34static int g_loc_X;
35static int g_loc_Y;
36static int g_max_loc_X;
37static int g_max_loc_Y;
38
39static int dispDeviceRowBytes;
40static int dispDeviceDepth;
41static int dispDeviceRect[4];
42static unsigned char *dispDeviceBase, *logicalDisplayBase;
43
44unsigned long disp_BAT[2] __initdata = {0, 0};
45
46#define cmapsz (16*256)
47
48static unsigned char vga_font[cmapsz];
49
50int boot_text_mapped;
51int force_printk_to_btext = 0;
52
53#ifdef CONFIG_PPC32
54/* Calc BAT values for mapping the display and store them
55 * in disp_BAT. Those values are then used from head.S to map
56 * the display during identify_machine() and MMU_Init()
57 *
58 * The display is mapped to virtual address 0xD0000000, rather
59 * than 1:1, because some some CHRP machines put the frame buffer
60 * in the region starting at 0xC0000000 (KERNELBASE).
61 * This mapping is temporary and will disappear as soon as the
62 * setup done by MMU_Init() is applied.
63 *
64 * For now, we align the BAT and then map 8Mb on 601 and 16Mb
65 * on other PPCs. This may cause trouble if the framebuffer
66 * is really badly aligned, but I didn't encounter this case
67 * yet.
68 */
69void __init
70btext_prepare_BAT(void)
71{
72 unsigned long vaddr = KERNELBASE + 0x10000000;
73 unsigned long addr;
74 unsigned long lowbits;
75
76 addr = (unsigned long)dispDeviceBase;
77 if (!addr) {
78 boot_text_mapped = 0;
79 return;
80 }
81 if (PVR_VER(mfspr(SPRN_PVR)) != 1) {
82 /* 603, 604, G3, G4, ... */
83 lowbits = addr & ~0xFF000000UL;
84 addr &= 0xFF000000UL;
85 disp_BAT[0] = vaddr | (BL_16M<<2) | 2;
86 disp_BAT[1] = addr | (_PAGE_NO_CACHE | _PAGE_GUARDED | BPP_RW);
87 } else {
88 /* 601 */
89 lowbits = addr & ~0xFF800000UL;
90 addr &= 0xFF800000UL;
91 disp_BAT[0] = vaddr | (_PAGE_NO_CACHE | PP_RWXX) | 4;
92 disp_BAT[1] = addr | BL_8M | 0x40;
93 }
94 logicalDisplayBase = (void *) (vaddr + lowbits);
95}
96#endif
97
98/* This function will enable the early boot text when doing OF booting. This
99 * way, xmon output should work too
100 */
101void __init
102btext_setup_display(int width, int height, int depth, int pitch,
103 unsigned long address)
104{
105 g_loc_X = 0;
106 g_loc_Y = 0;
107 g_max_loc_X = width / 8;
108 g_max_loc_Y = height / 16;
109 logicalDisplayBase = (unsigned char *)address;
110 dispDeviceBase = (unsigned char *)address;
111 dispDeviceRowBytes = pitch;
112 dispDeviceDepth = depth;
113 dispDeviceRect[0] = dispDeviceRect[1] = 0;
114 dispDeviceRect[2] = width;
115 dispDeviceRect[3] = height;
116 boot_text_mapped = 1;
117}
118
119/* Here's a small text engine to use during early boot
120 * or for debugging purposes
121 *
122 * todo:
123 *
124 * - build some kind of vgacon with it to enable early printk
125 * - move to a separate file
126 * - add a few video driver hooks to keep in sync with display
127 * changes.
128 */
129
130void map_boot_text(void)
131{
132 unsigned long base, offset, size;
133 unsigned char *vbase;
134
135 /* By default, we are no longer mapped */
136 boot_text_mapped = 0;
137 if (dispDeviceBase == 0)
138 return;
139 base = ((unsigned long) dispDeviceBase) & 0xFFFFF000UL;
140 offset = ((unsigned long) dispDeviceBase) - base;
141 size = dispDeviceRowBytes * dispDeviceRect[3] + offset
142 + dispDeviceRect[0];
143 vbase = __ioremap(base, size, _PAGE_NO_CACHE);
144 if (vbase == 0)
145 return;
146 logicalDisplayBase = vbase + offset;
147 boot_text_mapped = 1;
148}
149
150int btext_initialize(struct device_node *np)
151{
152 unsigned int width, height, depth, pitch;
153 unsigned long address = 0;
154 u32 *prop;
155
156 prop = (u32 *)get_property(np, "width", NULL);
157 if (prop == NULL)
158 return -EINVAL;
159 width = *prop;
160 prop = (u32 *)get_property(np, "height", NULL);
161 if (prop == NULL)
162 return -EINVAL;
163 height = *prop;
164 prop = (u32 *)get_property(np, "depth", NULL);
165 if (prop == NULL)
166 return -EINVAL;
167 depth = *prop;
168 pitch = width * ((depth + 7) / 8);
169 prop = (u32 *)get_property(np, "linebytes", NULL);
170 if (prop)
171 pitch = *prop;
172 if (pitch == 1)
173 pitch = 0x1000;
174 prop = (u32 *)get_property(np, "address", NULL);
175 if (prop)
176 address = *prop;
177
178 /* FIXME: Add support for PCI reg properties */
179
180 if (address == 0)
181 return -EINVAL;
182
183 g_loc_X = 0;
184 g_loc_Y = 0;
185 g_max_loc_X = width / 8;
186 g_max_loc_Y = height / 16;
187 logicalDisplayBase = (unsigned char *)address;
188 dispDeviceBase = (unsigned char *)address;
189 dispDeviceRowBytes = pitch;
190 dispDeviceDepth = depth;
191 dispDeviceRect[0] = dispDeviceRect[1] = 0;
192 dispDeviceRect[2] = width;
193 dispDeviceRect[3] = height;
194
195 map_boot_text();
196
197 return 0;
198}
199
200void __init init_boot_display(void)
201{
202 char *name;
203 struct device_node *np = NULL;
204 int rc = -ENODEV;
205
206 printk("trying to initialize btext ...\n");
207
208 name = (char *)get_property(of_chosen, "linux,stdout-path", NULL);
209 if (name != NULL) {
210 np = of_find_node_by_path(name);
211 if (np != NULL) {
212 if (strcmp(np->type, "display") != 0) {
213 printk("boot stdout isn't a display !\n");
214 of_node_put(np);
215 np = NULL;
216 }
217 }
218 }
219 if (np)
220 rc = btext_initialize(np);
221 if (rc == 0)
222 return;
223
224 for (np = NULL; (np = of_find_node_by_type(np, "display"));) {
225 if (get_property(np, "linux,opened", NULL)) {
226 printk("trying %s ...\n", np->full_name);
227 rc = btext_initialize(np);
228 printk("result: %d\n", rc);
229 }
230 if (rc == 0)
231 return;
232 }
233}
234
235/* Calc the base address of a given point (x,y) */
236static unsigned char * calc_base(int x, int y)
237{
238 unsigned char *base;
239
240 base = logicalDisplayBase;
241 if (base == 0)
242 base = dispDeviceBase;
243 base += (x + dispDeviceRect[0]) * (dispDeviceDepth >> 3);
244 base += (y + dispDeviceRect[1]) * dispDeviceRowBytes;
245 return base;
246}
247
248/* Adjust the display to a new resolution */
249void btext_update_display(unsigned long phys, int width, int height,
250 int depth, int pitch)
251{
252 if (dispDeviceBase == 0)
253 return;
254
255 /* check it's the same frame buffer (within 256MB) */
256 if ((phys ^ (unsigned long)dispDeviceBase) & 0xf0000000)
257 return;
258
259 dispDeviceBase = (__u8 *) phys;
260 dispDeviceRect[0] = 0;
261 dispDeviceRect[1] = 0;
262 dispDeviceRect[2] = width;
263 dispDeviceRect[3] = height;
264 dispDeviceDepth = depth;
265 dispDeviceRowBytes = pitch;
266 if (boot_text_mapped) {
267 iounmap(logicalDisplayBase);
268 boot_text_mapped = 0;
269 }
270 map_boot_text();
271 g_loc_X = 0;
272 g_loc_Y = 0;
273 g_max_loc_X = width / 8;
274 g_max_loc_Y = height / 16;
275}
276EXPORT_SYMBOL(btext_update_display);
277
278void btext_clearscreen(void)
279{
280 unsigned long *base = (unsigned long *)calc_base(0, 0);
281 unsigned long width = ((dispDeviceRect[2] - dispDeviceRect[0]) *
282 (dispDeviceDepth >> 3)) >> 3;
283 int i,j;
284
285 for (i=0; i<(dispDeviceRect[3] - dispDeviceRect[1]); i++)
286 {
287 unsigned long *ptr = base;
288 for(j=width; j; --j)
289 *(ptr++) = 0;
290 base += (dispDeviceRowBytes >> 3);
291 }
292}
293
294#ifndef NO_SCROLL
295static void scrollscreen(void)
296{
297 unsigned long *src = (unsigned long *)calc_base(0,16);
298 unsigned long *dst = (unsigned long *)calc_base(0,0);
299 unsigned long width = ((dispDeviceRect[2] - dispDeviceRect[0]) *
300 (dispDeviceDepth >> 3)) >> 3;
301 int i,j;
302
303 for (i=0; i<(dispDeviceRect[3] - dispDeviceRect[1] - 16); i++)
304 {
305 unsigned long *src_ptr = src;
306 unsigned long *dst_ptr = dst;
307 for(j=width; j; --j)
308 *(dst_ptr++) = *(src_ptr++);
309 src += (dispDeviceRowBytes >> 3);
310 dst += (dispDeviceRowBytes >> 3);
311 }
312 for (i=0; i<16; i++)
313 {
314 unsigned long *dst_ptr = dst;
315 for(j=width; j; --j)
316 *(dst_ptr++) = 0;
317 dst += (dispDeviceRowBytes >> 3);
318 }
319}
320#endif /* ndef NO_SCROLL */
321
322void btext_drawchar(char c)
323{
324 int cline = 0;
325#ifdef NO_SCROLL
326 int x;
327#endif
328 if (!boot_text_mapped)
329 return;
330
331 switch (c) {
332 case '\b':
333 if (g_loc_X > 0)
334 --g_loc_X;
335 break;
336 case '\t':
337 g_loc_X = (g_loc_X & -8) + 8;
338 break;
339 case '\r':
340 g_loc_X = 0;
341 break;
342 case '\n':
343 g_loc_X = 0;
344 g_loc_Y++;
345 cline = 1;
346 break;
347 default:
348 draw_byte(c, g_loc_X++, g_loc_Y);
349 }
350 if (g_loc_X >= g_max_loc_X) {
351 g_loc_X = 0;
352 g_loc_Y++;
353 cline = 1;
354 }
355#ifndef NO_SCROLL
356 while (g_loc_Y >= g_max_loc_Y) {
357 scrollscreen();
358 g_loc_Y--;
359 }
360#else
361 /* wrap around from bottom to top of screen so we don't
362 waste time scrolling each line. -- paulus. */
363 if (g_loc_Y >= g_max_loc_Y)
364 g_loc_Y = 0;
365 if (cline) {
366 for (x = 0; x < g_max_loc_X; ++x)
367 draw_byte(' ', x, g_loc_Y);
368 }
369#endif
370}
371
372void btext_drawstring(const char *c)
373{
374 if (!boot_text_mapped)
375 return;
376 while (*c)
377 btext_drawchar(*c++);
378}
379
380void btext_drawhex(unsigned long v)
381{
382 char *hex_table = "0123456789abcdef";
383
384 if (!boot_text_mapped)
385 return;
386#ifdef CONFIG_PPC64
387 btext_drawchar(hex_table[(v >> 60) & 0x0000000FUL]);
388 btext_drawchar(hex_table[(v >> 56) & 0x0000000FUL]);
389 btext_drawchar(hex_table[(v >> 52) & 0x0000000FUL]);
390 btext_drawchar(hex_table[(v >> 48) & 0x0000000FUL]);
391 btext_drawchar(hex_table[(v >> 44) & 0x0000000FUL]);
392 btext_drawchar(hex_table[(v >> 40) & 0x0000000FUL]);
393 btext_drawchar(hex_table[(v >> 36) & 0x0000000FUL]);
394 btext_drawchar(hex_table[(v >> 32) & 0x0000000FUL]);
395#endif
396 btext_drawchar(hex_table[(v >> 28) & 0x0000000FUL]);
397 btext_drawchar(hex_table[(v >> 24) & 0x0000000FUL]);
398 btext_drawchar(hex_table[(v >> 20) & 0x0000000FUL]);
399 btext_drawchar(hex_table[(v >> 16) & 0x0000000FUL]);
400 btext_drawchar(hex_table[(v >> 12) & 0x0000000FUL]);
401 btext_drawchar(hex_table[(v >> 8) & 0x0000000FUL]);
402 btext_drawchar(hex_table[(v >> 4) & 0x0000000FUL]);
403 btext_drawchar(hex_table[(v >> 0) & 0x0000000FUL]);
404 btext_drawchar(' ');
405}
406
407static void draw_byte(unsigned char c, long locX, long locY)
408{
409 unsigned char *base = calc_base(locX << 3, locY << 4);
410 unsigned char *font = &vga_font[((unsigned int)c) * 16];
411 int rb = dispDeviceRowBytes;
412
413 switch(dispDeviceDepth) {
414 case 24:
415 case 32:
416 draw_byte_32(font, (unsigned int *)base, rb);
417 break;
418 case 15:
419 case 16:
420 draw_byte_16(font, (unsigned int *)base, rb);
421 break;
422 case 8:
423 draw_byte_8(font, (unsigned int *)base, rb);
424 break;
425 }
426}
427
428static unsigned int expand_bits_8[16] = {
429 0x00000000,
430 0x000000ff,
431 0x0000ff00,
432 0x0000ffff,
433 0x00ff0000,
434 0x00ff00ff,
435 0x00ffff00,
436 0x00ffffff,
437 0xff000000,
438 0xff0000ff,
439 0xff00ff00,
440 0xff00ffff,
441 0xffff0000,
442 0xffff00ff,
443 0xffffff00,
444 0xffffffff
445};
446
447static unsigned int expand_bits_16[4] = {
448 0x00000000,
449 0x0000ffff,
450 0xffff0000,
451 0xffffffff
452};
453
454
455static void draw_byte_32(unsigned char *font, unsigned int *base, int rb)
456{
457 int l, bits;
458 int fg = 0xFFFFFFFFUL;
459 int bg = 0x00000000UL;
460
461 for (l = 0; l < 16; ++l)
462 {
463 bits = *font++;
464 base[0] = (-(bits >> 7) & fg) ^ bg;
465 base[1] = (-((bits >> 6) & 1) & fg) ^ bg;
466 base[2] = (-((bits >> 5) & 1) & fg) ^ bg;
467 base[3] = (-((bits >> 4) & 1) & fg) ^ bg;
468 base[4] = (-((bits >> 3) & 1) & fg) ^ bg;
469 base[5] = (-((bits >> 2) & 1) & fg) ^ bg;
470 base[6] = (-((bits >> 1) & 1) & fg) ^ bg;
471 base[7] = (-(bits & 1) & fg) ^ bg;
472 base = (unsigned int *) ((char *)base + rb);
473 }
474}
475
476static void draw_byte_16(unsigned char *font, unsigned int *base, int rb)
477{
478 int l, bits;
479 int fg = 0xFFFFFFFFUL;
480 int bg = 0x00000000UL;
481 unsigned int *eb = (int *)expand_bits_16;
482
483 for (l = 0; l < 16; ++l)
484 {
485 bits = *font++;
486 base[0] = (eb[bits >> 6] & fg) ^ bg;
487 base[1] = (eb[(bits >> 4) & 3] & fg) ^ bg;
488 base[2] = (eb[(bits >> 2) & 3] & fg) ^ bg;
489 base[3] = (eb[bits & 3] & fg) ^ bg;
490 base = (unsigned int *) ((char *)base + rb);
491 }
492}
493
494static void draw_byte_8(unsigned char *font, unsigned int *base, int rb)
495{
496 int l, bits;
497 int fg = 0x0F0F0F0FUL;
498 int bg = 0x00000000UL;
499 unsigned int *eb = (int *)expand_bits_8;
500
501 for (l = 0; l < 16; ++l)
502 {
503 bits = *font++;
504 base[0] = (eb[bits >> 4] & fg) ^ bg;
505 base[1] = (eb[bits & 0xf] & fg) ^ bg;
506 base = (unsigned int *) ((char *)base + rb);
507 }
508}
509
510static unsigned char vga_font[cmapsz] = {
5110x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
5120x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7e, 0x81, 0xa5, 0x81, 0x81, 0xbd,
5130x99, 0x81, 0x81, 0x7e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7e, 0xff,
5140xdb, 0xff, 0xff, 0xc3, 0xe7, 0xff, 0xff, 0x7e, 0x00, 0x00, 0x00, 0x00,
5150x00, 0x00, 0x00, 0x00, 0x6c, 0xfe, 0xfe, 0xfe, 0xfe, 0x7c, 0x38, 0x10,
5160x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x38, 0x7c, 0xfe,
5170x7c, 0x38, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18,
5180x3c, 0x3c, 0xe7, 0xe7, 0xe7, 0x18, 0x18, 0x3c, 0x00, 0x00, 0x00, 0x00,
5190x00, 0x00, 0x00, 0x18, 0x3c, 0x7e, 0xff, 0xff, 0x7e, 0x18, 0x18, 0x3c,
5200x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x3c,
5210x3c, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff,
5220xff, 0xff, 0xe7, 0xc3, 0xc3, 0xe7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
5230x00, 0x00, 0x00, 0x00, 0x00, 0x3c, 0x66, 0x42, 0x42, 0x66, 0x3c, 0x00,
5240x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc3, 0x99, 0xbd,
5250xbd, 0x99, 0xc3, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x1e, 0x0e,
5260x1a, 0x32, 0x78, 0xcc, 0xcc, 0xcc, 0xcc, 0x78, 0x00, 0x00, 0x00, 0x00,
5270x00, 0x00, 0x3c, 0x66, 0x66, 0x66, 0x66, 0x3c, 0x18, 0x7e, 0x18, 0x18,
5280x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3f, 0x33, 0x3f, 0x30, 0x30, 0x30,
5290x30, 0x70, 0xf0, 0xe0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7f, 0x63,
5300x7f, 0x63, 0x63, 0x63, 0x63, 0x67, 0xe7, 0xe6, 0xc0, 0x00, 0x00, 0x00,
5310x00, 0x00, 0x00, 0x18, 0x18, 0xdb, 0x3c, 0xe7, 0x3c, 0xdb, 0x18, 0x18,
5320x00, 0x00, 0x00, 0x00, 0x00, 0x80, 0xc0, 0xe0, 0xf0, 0xf8, 0xfe, 0xf8,
5330xf0, 0xe0, 0xc0, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x06, 0x0e,
5340x1e, 0x3e, 0xfe, 0x3e, 0x1e, 0x0e, 0x06, 0x02, 0x00, 0x00, 0x00, 0x00,
5350x00, 0x00, 0x18, 0x3c, 0x7e, 0x18, 0x18, 0x18, 0x7e, 0x3c, 0x18, 0x00,
5360x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66,
5370x66, 0x00, 0x66, 0x66, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7f, 0xdb,
5380xdb, 0xdb, 0x7b, 0x1b, 0x1b, 0x1b, 0x1b, 0x1b, 0x00, 0x00, 0x00, 0x00,
5390x00, 0x7c, 0xc6, 0x60, 0x38, 0x6c, 0xc6, 0xc6, 0x6c, 0x38, 0x0c, 0xc6,
5400x7c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
5410xfe, 0xfe, 0xfe, 0xfe, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x3c,
5420x7e, 0x18, 0x18, 0x18, 0x7e, 0x3c, 0x18, 0x7e, 0x00, 0x00, 0x00, 0x00,
5430x00, 0x00, 0x18, 0x3c, 0x7e, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
5440x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
5450x18, 0x7e, 0x3c, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
5460x00, 0x18, 0x0c, 0xfe, 0x0c, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
5470x00, 0x00, 0x00, 0x00, 0x00, 0x30, 0x60, 0xfe, 0x60, 0x30, 0x00, 0x00,
5480x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc0, 0xc0,
5490xc0, 0xfe, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
5500x00, 0x24, 0x66, 0xff, 0x66, 0x24, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
5510x00, 0x00, 0x00, 0x00, 0x10, 0x38, 0x38, 0x7c, 0x7c, 0xfe, 0xfe, 0x00,
5520x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfe, 0xfe, 0x7c, 0x7c,
5530x38, 0x38, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
5540x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
5550x00, 0x00, 0x18, 0x3c, 0x3c, 0x3c, 0x18, 0x18, 0x18, 0x00, 0x18, 0x18,
5560x00, 0x00, 0x00, 0x00, 0x00, 0x66, 0x66, 0x66, 0x24, 0x00, 0x00, 0x00,
5570x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x6c,
5580x6c, 0xfe, 0x6c, 0x6c, 0x6c, 0xfe, 0x6c, 0x6c, 0x00, 0x00, 0x00, 0x00,
5590x18, 0x18, 0x7c, 0xc6, 0xc2, 0xc0, 0x7c, 0x06, 0x06, 0x86, 0xc6, 0x7c,
5600x18, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc2, 0xc6, 0x0c, 0x18,
5610x30, 0x60, 0xc6, 0x86, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x38, 0x6c,
5620x6c, 0x38, 0x76, 0xdc, 0xcc, 0xcc, 0xcc, 0x76, 0x00, 0x00, 0x00, 0x00,
5630x00, 0x30, 0x30, 0x30, 0x60, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
5640x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0c, 0x18, 0x30, 0x30, 0x30, 0x30,
5650x30, 0x30, 0x18, 0x0c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x30, 0x18,
5660x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x18, 0x30, 0x00, 0x00, 0x00, 0x00,
5670x00, 0x00, 0x00, 0x00, 0x00, 0x66, 0x3c, 0xff, 0x3c, 0x66, 0x00, 0x00,
5680x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x7e,
5690x18, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
5700x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x18, 0x30, 0x00, 0x00, 0x00,
5710x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7e, 0x00, 0x00, 0x00, 0x00,
5720x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
5730x00, 0x00, 0x18, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
5740x02, 0x06, 0x0c, 0x18, 0x30, 0x60, 0xc0, 0x80, 0x00, 0x00, 0x00, 0x00,
5750x00, 0x00, 0x7c, 0xc6, 0xc6, 0xce, 0xde, 0xf6, 0xe6, 0xc6, 0xc6, 0x7c,
5760x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x38, 0x78, 0x18, 0x18, 0x18,
5770x18, 0x18, 0x18, 0x7e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7c, 0xc6,
5780x06, 0x0c, 0x18, 0x30, 0x60, 0xc0, 0xc6, 0xfe, 0x00, 0x00, 0x00, 0x00,
5790x00, 0x00, 0x7c, 0xc6, 0x06, 0x06, 0x3c, 0x06, 0x06, 0x06, 0xc6, 0x7c,
5800x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0c, 0x1c, 0x3c, 0x6c, 0xcc, 0xfe,
5810x0c, 0x0c, 0x0c, 0x1e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfe, 0xc0,
5820xc0, 0xc0, 0xfc, 0x06, 0x06, 0x06, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00,
5830x00, 0x00, 0x38, 0x60, 0xc0, 0xc0, 0xfc, 0xc6, 0xc6, 0xc6, 0xc6, 0x7c,
5840x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfe, 0xc6, 0x06, 0x06, 0x0c, 0x18,
5850x30, 0x30, 0x30, 0x30, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7c, 0xc6,
5860xc6, 0xc6, 0x7c, 0xc6, 0xc6, 0xc6, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00,
5870x00, 0x00, 0x7c, 0xc6, 0xc6, 0xc6, 0x7e, 0x06, 0x06, 0x06, 0x0c, 0x78,
5880x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x00, 0x00,
5890x00, 0x18, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
5900x18, 0x18, 0x00, 0x00, 0x00, 0x18, 0x18, 0x30, 0x00, 0x00, 0x00, 0x00,
5910x00, 0x00, 0x00, 0x06, 0x0c, 0x18, 0x30, 0x60, 0x30, 0x18, 0x0c, 0x06,
5920x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7e, 0x00, 0x00,
5930x7e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x60,
5940x30, 0x18, 0x0c, 0x06, 0x0c, 0x18, 0x30, 0x60, 0x00, 0x00, 0x00, 0x00,
5950x00, 0x00, 0x7c, 0xc6, 0xc6, 0x0c, 0x18, 0x18, 0x18, 0x00, 0x18, 0x18,
5960x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7c, 0xc6, 0xc6, 0xc6, 0xde, 0xde,
5970xde, 0xdc, 0xc0, 0x7c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x38,
5980x6c, 0xc6, 0xc6, 0xfe, 0xc6, 0xc6, 0xc6, 0xc6, 0x00, 0x00, 0x00, 0x00,
5990x00, 0x00, 0xfc, 0x66, 0x66, 0x66, 0x7c, 0x66, 0x66, 0x66, 0x66, 0xfc,
6000x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3c, 0x66, 0xc2, 0xc0, 0xc0, 0xc0,
6010xc0, 0xc2, 0x66, 0x3c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf8, 0x6c,
6020x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x6c, 0xf8, 0x00, 0x00, 0x00, 0x00,
6030x00, 0x00, 0xfe, 0x66, 0x62, 0x68, 0x78, 0x68, 0x60, 0x62, 0x66, 0xfe,
6040x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfe, 0x66, 0x62, 0x68, 0x78, 0x68,
6050x60, 0x60, 0x60, 0xf0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3c, 0x66,
6060xc2, 0xc0, 0xc0, 0xde, 0xc6, 0xc6, 0x66, 0x3a, 0x00, 0x00, 0x00, 0x00,
6070x00, 0x00, 0xc6, 0xc6, 0xc6, 0xc6, 0xfe, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6,
6080x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3c, 0x18, 0x18, 0x18, 0x18, 0x18,
6090x18, 0x18, 0x18, 0x3c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1e, 0x0c,
6100x0c, 0x0c, 0x0c, 0x0c, 0xcc, 0xcc, 0xcc, 0x78, 0x00, 0x00, 0x00, 0x00,
6110x00, 0x00, 0xe6, 0x66, 0x66, 0x6c, 0x78, 0x78, 0x6c, 0x66, 0x66, 0xe6,
6120x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf0, 0x60, 0x60, 0x60, 0x60, 0x60,
6130x60, 0x62, 0x66, 0xfe, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc3, 0xe7,
6140xff, 0xff, 0xdb, 0xc3, 0xc3, 0xc3, 0xc3, 0xc3, 0x00, 0x00, 0x00, 0x00,
6150x00, 0x00, 0xc6, 0xe6, 0xf6, 0xfe, 0xde, 0xce, 0xc6, 0xc6, 0xc6, 0xc6,
6160x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7c, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6,
6170xc6, 0xc6, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfc, 0x66,
6180x66, 0x66, 0x7c, 0x60, 0x60, 0x60, 0x60, 0xf0, 0x00, 0x00, 0x00, 0x00,
6190x00, 0x00, 0x7c, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xd6, 0xde, 0x7c,
6200x0c, 0x0e, 0x00, 0x00, 0x00, 0x00, 0xfc, 0x66, 0x66, 0x66, 0x7c, 0x6c,
6210x66, 0x66, 0x66, 0xe6, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7c, 0xc6,
6220xc6, 0x60, 0x38, 0x0c, 0x06, 0xc6, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00,
6230x00, 0x00, 0xff, 0xdb, 0x99, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x3c,
6240x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6,
6250xc6, 0xc6, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc3, 0xc3,
6260xc3, 0xc3, 0xc3, 0xc3, 0xc3, 0x66, 0x3c, 0x18, 0x00, 0x00, 0x00, 0x00,
6270x00, 0x00, 0xc3, 0xc3, 0xc3, 0xc3, 0xc3, 0xdb, 0xdb, 0xff, 0x66, 0x66,
6280x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc3, 0xc3, 0x66, 0x3c, 0x18, 0x18,
6290x3c, 0x66, 0xc3, 0xc3, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc3, 0xc3,
6300xc3, 0x66, 0x3c, 0x18, 0x18, 0x18, 0x18, 0x3c, 0x00, 0x00, 0x00, 0x00,
6310x00, 0x00, 0xff, 0xc3, 0x86, 0x0c, 0x18, 0x30, 0x60, 0xc1, 0xc3, 0xff,
6320x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3c, 0x30, 0x30, 0x30, 0x30, 0x30,
6330x30, 0x30, 0x30, 0x3c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80,
6340xc0, 0xe0, 0x70, 0x38, 0x1c, 0x0e, 0x06, 0x02, 0x00, 0x00, 0x00, 0x00,
6350x00, 0x00, 0x3c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x3c,
6360x00, 0x00, 0x00, 0x00, 0x10, 0x38, 0x6c, 0xc6, 0x00, 0x00, 0x00, 0x00,
6370x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
6380x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0x00, 0x00,
6390x30, 0x30, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
6400x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x78, 0x0c, 0x7c,
6410xcc, 0xcc, 0xcc, 0x76, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xe0, 0x60,
6420x60, 0x78, 0x6c, 0x66, 0x66, 0x66, 0x66, 0x7c, 0x00, 0x00, 0x00, 0x00,
6430x00, 0x00, 0x00, 0x00, 0x00, 0x7c, 0xc6, 0xc0, 0xc0, 0xc0, 0xc6, 0x7c,
6440x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1c, 0x0c, 0x0c, 0x3c, 0x6c, 0xcc,
6450xcc, 0xcc, 0xcc, 0x76, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
6460x00, 0x7c, 0xc6, 0xfe, 0xc0, 0xc0, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00,
6470x00, 0x00, 0x38, 0x6c, 0x64, 0x60, 0xf0, 0x60, 0x60, 0x60, 0x60, 0xf0,
6480x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x76, 0xcc, 0xcc,
6490xcc, 0xcc, 0xcc, 0x7c, 0x0c, 0xcc, 0x78, 0x00, 0x00, 0x00, 0xe0, 0x60,
6500x60, 0x6c, 0x76, 0x66, 0x66, 0x66, 0x66, 0xe6, 0x00, 0x00, 0x00, 0x00,
6510x00, 0x00, 0x18, 0x18, 0x00, 0x38, 0x18, 0x18, 0x18, 0x18, 0x18, 0x3c,
6520x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06, 0x06, 0x00, 0x0e, 0x06, 0x06,
6530x06, 0x06, 0x06, 0x06, 0x66, 0x66, 0x3c, 0x00, 0x00, 0x00, 0xe0, 0x60,
6540x60, 0x66, 0x6c, 0x78, 0x78, 0x6c, 0x66, 0xe6, 0x00, 0x00, 0x00, 0x00,
6550x00, 0x00, 0x38, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x3c,
6560x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xe6, 0xff, 0xdb,
6570xdb, 0xdb, 0xdb, 0xdb, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
6580x00, 0xdc, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x00, 0x00, 0x00, 0x00,
6590x00, 0x00, 0x00, 0x00, 0x00, 0x7c, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0x7c,
6600x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xdc, 0x66, 0x66,
6610x66, 0x66, 0x66, 0x7c, 0x60, 0x60, 0xf0, 0x00, 0x00, 0x00, 0x00, 0x00,
6620x00, 0x76, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0x7c, 0x0c, 0x0c, 0x1e, 0x00,
6630x00, 0x00, 0x00, 0x00, 0x00, 0xdc, 0x76, 0x66, 0x60, 0x60, 0x60, 0xf0,
6640x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7c, 0xc6, 0x60,
6650x38, 0x0c, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x30,
6660x30, 0xfc, 0x30, 0x30, 0x30, 0x30, 0x36, 0x1c, 0x00, 0x00, 0x00, 0x00,
6670x00, 0x00, 0x00, 0x00, 0x00, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0x76,
6680x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc3, 0xc3, 0xc3,
6690xc3, 0x66, 0x3c, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
6700x00, 0xc3, 0xc3, 0xc3, 0xdb, 0xdb, 0xff, 0x66, 0x00, 0x00, 0x00, 0x00,
6710x00, 0x00, 0x00, 0x00, 0x00, 0xc3, 0x66, 0x3c, 0x18, 0x3c, 0x66, 0xc3,
6720x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc6, 0xc6, 0xc6,
6730xc6, 0xc6, 0xc6, 0x7e, 0x06, 0x0c, 0xf8, 0x00, 0x00, 0x00, 0x00, 0x00,
6740x00, 0xfe, 0xcc, 0x18, 0x30, 0x60, 0xc6, 0xfe, 0x00, 0x00, 0x00, 0x00,
6750x00, 0x00, 0x0e, 0x18, 0x18, 0x18, 0x70, 0x18, 0x18, 0x18, 0x18, 0x0e,
6760x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x18, 0x18, 0x00, 0x18,
6770x18, 0x18, 0x18, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x70, 0x18,
6780x18, 0x18, 0x0e, 0x18, 0x18, 0x18, 0x18, 0x70, 0x00, 0x00, 0x00, 0x00,
6790x00, 0x00, 0x76, 0xdc, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
6800x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x38, 0x6c, 0xc6,
6810xc6, 0xc6, 0xfe, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3c, 0x66,
6820xc2, 0xc0, 0xc0, 0xc0, 0xc2, 0x66, 0x3c, 0x0c, 0x06, 0x7c, 0x00, 0x00,
6830x00, 0x00, 0xcc, 0x00, 0x00, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0x76,
6840x00, 0x00, 0x00, 0x00, 0x00, 0x0c, 0x18, 0x30, 0x00, 0x7c, 0xc6, 0xfe,
6850xc0, 0xc0, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x38, 0x6c,
6860x00, 0x78, 0x0c, 0x7c, 0xcc, 0xcc, 0xcc, 0x76, 0x00, 0x00, 0x00, 0x00,
6870x00, 0x00, 0xcc, 0x00, 0x00, 0x78, 0x0c, 0x7c, 0xcc, 0xcc, 0xcc, 0x76,
6880x00, 0x00, 0x00, 0x00, 0x00, 0x60, 0x30, 0x18, 0x00, 0x78, 0x0c, 0x7c,
6890xcc, 0xcc, 0xcc, 0x76, 0x00, 0x00, 0x00, 0x00, 0x00, 0x38, 0x6c, 0x38,
6900x00, 0x78, 0x0c, 0x7c, 0xcc, 0xcc, 0xcc, 0x76, 0x00, 0x00, 0x00, 0x00,
6910x00, 0x00, 0x00, 0x00, 0x3c, 0x66, 0x60, 0x60, 0x66, 0x3c, 0x0c, 0x06,
6920x3c, 0x00, 0x00, 0x00, 0x00, 0x10, 0x38, 0x6c, 0x00, 0x7c, 0xc6, 0xfe,
6930xc0, 0xc0, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc6, 0x00,
6940x00, 0x7c, 0xc6, 0xfe, 0xc0, 0xc0, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00,
6950x00, 0x60, 0x30, 0x18, 0x00, 0x7c, 0xc6, 0xfe, 0xc0, 0xc0, 0xc6, 0x7c,
6960x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x66, 0x00, 0x00, 0x38, 0x18, 0x18,
6970x18, 0x18, 0x18, 0x3c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x3c, 0x66,
6980x00, 0x38, 0x18, 0x18, 0x18, 0x18, 0x18, 0x3c, 0x00, 0x00, 0x00, 0x00,
6990x00, 0x60, 0x30, 0x18, 0x00, 0x38, 0x18, 0x18, 0x18, 0x18, 0x18, 0x3c,
7000x00, 0x00, 0x00, 0x00, 0x00, 0xc6, 0x00, 0x10, 0x38, 0x6c, 0xc6, 0xc6,
7010xfe, 0xc6, 0xc6, 0xc6, 0x00, 0x00, 0x00, 0x00, 0x38, 0x6c, 0x38, 0x00,
7020x38, 0x6c, 0xc6, 0xc6, 0xfe, 0xc6, 0xc6, 0xc6, 0x00, 0x00, 0x00, 0x00,
7030x18, 0x30, 0x60, 0x00, 0xfe, 0x66, 0x60, 0x7c, 0x60, 0x60, 0x66, 0xfe,
7040x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x6e, 0x3b, 0x1b,
7050x7e, 0xd8, 0xdc, 0x77, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3e, 0x6c,
7060xcc, 0xcc, 0xfe, 0xcc, 0xcc, 0xcc, 0xcc, 0xce, 0x00, 0x00, 0x00, 0x00,
7070x00, 0x10, 0x38, 0x6c, 0x00, 0x7c, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0x7c,
7080x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc6, 0x00, 0x00, 0x7c, 0xc6, 0xc6,
7090xc6, 0xc6, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x60, 0x30, 0x18,
7100x00, 0x7c, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00,
7110x00, 0x30, 0x78, 0xcc, 0x00, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0x76,
7120x00, 0x00, 0x00, 0x00, 0x00, 0x60, 0x30, 0x18, 0x00, 0xcc, 0xcc, 0xcc,
7130xcc, 0xcc, 0xcc, 0x76, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc6, 0x00,
7140x00, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0x7e, 0x06, 0x0c, 0x78, 0x00,
7150x00, 0xc6, 0x00, 0x7c, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0x7c,
7160x00, 0x00, 0x00, 0x00, 0x00, 0xc6, 0x00, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6,
7170xc6, 0xc6, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x7e,
7180xc3, 0xc0, 0xc0, 0xc0, 0xc3, 0x7e, 0x18, 0x18, 0x00, 0x00, 0x00, 0x00,
7190x00, 0x38, 0x6c, 0x64, 0x60, 0xf0, 0x60, 0x60, 0x60, 0x60, 0xe6, 0xfc,
7200x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc3, 0x66, 0x3c, 0x18, 0xff, 0x18,
7210xff, 0x18, 0x18, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfc, 0x66, 0x66,
7220x7c, 0x62, 0x66, 0x6f, 0x66, 0x66, 0x66, 0xf3, 0x00, 0x00, 0x00, 0x00,
7230x00, 0x0e, 0x1b, 0x18, 0x18, 0x18, 0x7e, 0x18, 0x18, 0x18, 0x18, 0x18,
7240xd8, 0x70, 0x00, 0x00, 0x00, 0x18, 0x30, 0x60, 0x00, 0x78, 0x0c, 0x7c,
7250xcc, 0xcc, 0xcc, 0x76, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0c, 0x18, 0x30,
7260x00, 0x38, 0x18, 0x18, 0x18, 0x18, 0x18, 0x3c, 0x00, 0x00, 0x00, 0x00,
7270x00, 0x18, 0x30, 0x60, 0x00, 0x7c, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0x7c,
7280x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x30, 0x60, 0x00, 0xcc, 0xcc, 0xcc,
7290xcc, 0xcc, 0xcc, 0x76, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x76, 0xdc,
7300x00, 0xdc, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x00, 0x00, 0x00, 0x00,
7310x76, 0xdc, 0x00, 0xc6, 0xe6, 0xf6, 0xfe, 0xde, 0xce, 0xc6, 0xc6, 0xc6,
7320x00, 0x00, 0x00, 0x00, 0x00, 0x3c, 0x6c, 0x6c, 0x3e, 0x00, 0x7e, 0x00,
7330x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x38, 0x6c, 0x6c,
7340x38, 0x00, 0x7c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
7350x00, 0x00, 0x30, 0x30, 0x00, 0x30, 0x30, 0x60, 0xc0, 0xc6, 0xc6, 0x7c,
7360x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfe, 0xc0,
7370xc0, 0xc0, 0xc0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
7380x00, 0x00, 0xfe, 0x06, 0x06, 0x06, 0x06, 0x00, 0x00, 0x00, 0x00, 0x00,
7390x00, 0xc0, 0xc0, 0xc2, 0xc6, 0xcc, 0x18, 0x30, 0x60, 0xce, 0x9b, 0x06,
7400x0c, 0x1f, 0x00, 0x00, 0x00, 0xc0, 0xc0, 0xc2, 0xc6, 0xcc, 0x18, 0x30,
7410x66, 0xce, 0x96, 0x3e, 0x06, 0x06, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18,
7420x00, 0x18, 0x18, 0x18, 0x3c, 0x3c, 0x3c, 0x18, 0x00, 0x00, 0x00, 0x00,
7430x00, 0x00, 0x00, 0x00, 0x00, 0x36, 0x6c, 0xd8, 0x6c, 0x36, 0x00, 0x00,
7440x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xd8, 0x6c, 0x36,
7450x6c, 0xd8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x11, 0x44, 0x11, 0x44,
7460x11, 0x44, 0x11, 0x44, 0x11, 0x44, 0x11, 0x44, 0x11, 0x44, 0x11, 0x44,
7470x55, 0xaa, 0x55, 0xaa, 0x55, 0xaa, 0x55, 0xaa, 0x55, 0xaa, 0x55, 0xaa,
7480x55, 0xaa, 0x55, 0xaa, 0xdd, 0x77, 0xdd, 0x77, 0xdd, 0x77, 0xdd, 0x77,
7490xdd, 0x77, 0xdd, 0x77, 0xdd, 0x77, 0xdd, 0x77, 0x18, 0x18, 0x18, 0x18,
7500x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
7510x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0xf8, 0x18, 0x18, 0x18, 0x18,
7520x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0xf8, 0x18, 0xf8,
7530x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x36, 0x36, 0x36, 0x36,
7540x36, 0x36, 0x36, 0xf6, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
7550x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfe, 0x36, 0x36, 0x36, 0x36,
7560x36, 0x36, 0x36, 0x36, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf8, 0x18, 0xf8,
7570x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x36, 0x36, 0x36, 0x36,
7580x36, 0xf6, 0x06, 0xf6, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
7590x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
7600x36, 0x36, 0x36, 0x36, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfe, 0x06, 0xf6,
7610x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
7620x36, 0xf6, 0x06, 0xfe, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
7630x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0xfe, 0x00, 0x00, 0x00, 0x00,
7640x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x18, 0x18, 0x18, 0xf8, 0x18, 0xf8,
7650x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
7660x00, 0x00, 0x00, 0xf8, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
7670x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x1f, 0x00, 0x00, 0x00, 0x00,
7680x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0xff,
7690x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
7700x00, 0x00, 0x00, 0xff, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
7710x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x1f, 0x18, 0x18, 0x18, 0x18,
7720x18, 0x18, 0x18, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff,
7730x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x18, 0x18,
7740x18, 0x18, 0x18, 0xff, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
7750x18, 0x18, 0x18, 0x18, 0x18, 0x1f, 0x18, 0x1f, 0x18, 0x18, 0x18, 0x18,
7760x18, 0x18, 0x18, 0x18, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x37,
7770x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
7780x36, 0x37, 0x30, 0x3f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
7790x00, 0x00, 0x00, 0x00, 0x00, 0x3f, 0x30, 0x37, 0x36, 0x36, 0x36, 0x36,
7800x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0xf7, 0x00, 0xff,
7810x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
7820x00, 0xff, 0x00, 0xf7, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
7830x36, 0x36, 0x36, 0x36, 0x36, 0x37, 0x30, 0x37, 0x36, 0x36, 0x36, 0x36,
7840x36, 0x36, 0x36, 0x36, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0x00, 0xff,
7850x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x36, 0x36, 0x36, 0x36,
7860x36, 0xf7, 0x00, 0xf7, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
7870x18, 0x18, 0x18, 0x18, 0x18, 0xff, 0x00, 0xff, 0x00, 0x00, 0x00, 0x00,
7880x00, 0x00, 0x00, 0x00, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0xff,
7890x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
7900x00, 0xff, 0x00, 0xff, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
7910x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0x36, 0x36, 0x36, 0x36,
7920x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x3f,
7930x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x18, 0x18,
7940x18, 0x1f, 0x18, 0x1f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
7950x00, 0x00, 0x00, 0x00, 0x00, 0x1f, 0x18, 0x1f, 0x18, 0x18, 0x18, 0x18,
7960x18, 0x18, 0x18, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3f,
7970x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
7980x36, 0x36, 0x36, 0xff, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
7990x18, 0x18, 0x18, 0x18, 0x18, 0xff, 0x18, 0xff, 0x18, 0x18, 0x18, 0x18,
8000x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0xf8,
8010x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
8020x00, 0x00, 0x00, 0x1f, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
8030xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
8040xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff,
8050xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf0, 0xf0, 0xf0, 0xf0,
8060xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0,
8070x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f,
8080x0f, 0x0f, 0x0f, 0x0f, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00,
8090x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
8100x00, 0x76, 0xdc, 0xd8, 0xd8, 0xd8, 0xdc, 0x76, 0x00, 0x00, 0x00, 0x00,
8110x00, 0x00, 0x78, 0xcc, 0xcc, 0xcc, 0xd8, 0xcc, 0xc6, 0xc6, 0xc6, 0xcc,
8120x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfe, 0xc6, 0xc6, 0xc0, 0xc0, 0xc0,
8130xc0, 0xc0, 0xc0, 0xc0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
8140xfe, 0x6c, 0x6c, 0x6c, 0x6c, 0x6c, 0x6c, 0x6c, 0x00, 0x00, 0x00, 0x00,
8150x00, 0x00, 0x00, 0xfe, 0xc6, 0x60, 0x30, 0x18, 0x30, 0x60, 0xc6, 0xfe,
8160x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7e, 0xd8, 0xd8,
8170xd8, 0xd8, 0xd8, 0x70, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
8180x66, 0x66, 0x66, 0x66, 0x66, 0x7c, 0x60, 0x60, 0xc0, 0x00, 0x00, 0x00,
8190x00, 0x00, 0x00, 0x00, 0x76, 0xdc, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
8200x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7e, 0x18, 0x3c, 0x66, 0x66,
8210x66, 0x3c, 0x18, 0x7e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x38,
8220x6c, 0xc6, 0xc6, 0xfe, 0xc6, 0xc6, 0x6c, 0x38, 0x00, 0x00, 0x00, 0x00,
8230x00, 0x00, 0x38, 0x6c, 0xc6, 0xc6, 0xc6, 0x6c, 0x6c, 0x6c, 0x6c, 0xee,
8240x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1e, 0x30, 0x18, 0x0c, 0x3e, 0x66,
8250x66, 0x66, 0x66, 0x3c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
8260x00, 0x7e, 0xdb, 0xdb, 0xdb, 0x7e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
8270x00, 0x00, 0x00, 0x03, 0x06, 0x7e, 0xdb, 0xdb, 0xf3, 0x7e, 0x60, 0xc0,
8280x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1c, 0x30, 0x60, 0x60, 0x7c, 0x60,
8290x60, 0x60, 0x30, 0x1c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7c,
8300xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0x00, 0x00, 0x00, 0x00,
8310x00, 0x00, 0x00, 0x00, 0xfe, 0x00, 0x00, 0xfe, 0x00, 0x00, 0xfe, 0x00,
8320x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x7e, 0x18,
8330x18, 0x00, 0x00, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x30,
8340x18, 0x0c, 0x06, 0x0c, 0x18, 0x30, 0x00, 0x7e, 0x00, 0x00, 0x00, 0x00,
8350x00, 0x00, 0x00, 0x0c, 0x18, 0x30, 0x60, 0x30, 0x18, 0x0c, 0x00, 0x7e,
8360x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0e, 0x1b, 0x1b, 0x1b, 0x18, 0x18,
8370x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
8380x18, 0x18, 0x18, 0x18, 0xd8, 0xd8, 0xd8, 0x70, 0x00, 0x00, 0x00, 0x00,
8390x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x00, 0x7e, 0x00, 0x18, 0x18, 0x00,
8400x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x76, 0xdc, 0x00,
8410x76, 0xdc, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x38, 0x6c, 0x6c,
8420x38, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
8430x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x00, 0x00, 0x00,
8440x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
8450x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0f, 0x0c, 0x0c,
8460x0c, 0x0c, 0x0c, 0xec, 0x6c, 0x6c, 0x3c, 0x1c, 0x00, 0x00, 0x00, 0x00,
8470x00, 0xd8, 0x6c, 0x6c, 0x6c, 0x6c, 0x6c, 0x00, 0x00, 0x00, 0x00, 0x00,
8480x00, 0x00, 0x00, 0x00, 0x00, 0x70, 0xd8, 0x30, 0x60, 0xc8, 0xf8, 0x00,
8490x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
8500x7c, 0x7c, 0x7c, 0x7c, 0x7c, 0x7c, 0x7c, 0x00, 0x00, 0x00, 0x00, 0x00,
8510x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
8520x00, 0x00, 0x00, 0x00,
853};
diff --git a/arch/ppc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c
index 6b76cf58d9e0..b91345fa0805 100644
--- a/arch/ppc/kernel/cputable.c
+++ b/arch/powerpc/kernel/cputable.c
@@ -1,8 +1,9 @@
1/* 1/*
2 * arch/ppc/kernel/cputable.c
3 *
4 * Copyright (C) 2001 Ben. Herrenschmidt (benh@kernel.crashing.org) 2 * Copyright (C) 2001 Ben. Herrenschmidt (benh@kernel.crashing.org)
5 * 3 *
4 * Modifications for ppc64:
5 * Copyright (C) 2003 Dave Engebretsen <engebret@us.ibm.com>
6 *
6 * This program is free software; you can redistribute it and/or 7 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License 8 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version 9 * as published by the Free Software Foundation; either version
@@ -14,96 +15,302 @@
14#include <linux/sched.h> 15#include <linux/sched.h>
15#include <linux/threads.h> 16#include <linux/threads.h>
16#include <linux/init.h> 17#include <linux/init.h>
17#include <asm/cputable.h> 18#include <linux/module.h>
18 19
19struct cpu_spec* cur_cpu_spec[NR_CPUS]; 20#include <asm/oprofile_impl.h>
21#include <asm/cputable.h>
20 22
21extern void __setup_cpu_601(unsigned long offset, int cpu_nr, struct cpu_spec* spec); 23struct cpu_spec* cur_cpu_spec = NULL;
22extern void __setup_cpu_603(unsigned long offset, int cpu_nr, struct cpu_spec* spec); 24EXPORT_SYMBOL(cur_cpu_spec);
23extern void __setup_cpu_604(unsigned long offset, int cpu_nr, struct cpu_spec* spec);
24extern void __setup_cpu_750(unsigned long offset, int cpu_nr, struct cpu_spec* spec);
25extern void __setup_cpu_750cx(unsigned long offset, int cpu_nr, struct cpu_spec* spec);
26extern void __setup_cpu_750fx(unsigned long offset, int cpu_nr, struct cpu_spec* spec);
27extern void __setup_cpu_7400(unsigned long offset, int cpu_nr, struct cpu_spec* spec);
28extern void __setup_cpu_7410(unsigned long offset, int cpu_nr, struct cpu_spec* spec);
29extern void __setup_cpu_745x(unsigned long offset, int cpu_nr, struct cpu_spec* spec);
30extern void __setup_cpu_power3(unsigned long offset, int cpu_nr, struct cpu_spec* spec);
31extern void __setup_cpu_power4(unsigned long offset, int cpu_nr, struct cpu_spec* spec);
32extern void __setup_cpu_ppc970(unsigned long offset, int cpu_nr, struct cpu_spec* spec);
33extern void __setup_cpu_generic(unsigned long offset, int cpu_nr, struct cpu_spec* spec);
34 25
35#define CLASSIC_PPC (!defined(CONFIG_8xx) && !defined(CONFIG_4xx) && \ 26/* NOTE:
36 !defined(CONFIG_POWER3) && !defined(CONFIG_POWER4) && \ 27 * Unlike ppc32, ppc64 will only call this once for the boot CPU, it's
37 !defined(CONFIG_BOOKE)) 28 * the responsibility of the appropriate CPU save/restore functions to
29 * eventually copy these settings over. Those save/restore aren't yet
30 * part of the cputable though. That has to be fixed for both ppc32
31 * and ppc64
32 */
33#ifdef CONFIG_PPC64
34extern void __setup_cpu_power3(unsigned long offset, struct cpu_spec* spec);
35extern void __setup_cpu_power4(unsigned long offset, struct cpu_spec* spec);
36extern void __setup_cpu_be(unsigned long offset, struct cpu_spec* spec);
37#else
38extern void __setup_cpu_603(unsigned long offset, struct cpu_spec* spec);
39extern void __setup_cpu_604(unsigned long offset, struct cpu_spec* spec);
40extern void __setup_cpu_750(unsigned long offset, struct cpu_spec* spec);
41extern void __setup_cpu_750cx(unsigned long offset, struct cpu_spec* spec);
42extern void __setup_cpu_750fx(unsigned long offset, struct cpu_spec* spec);
43extern void __setup_cpu_7400(unsigned long offset, struct cpu_spec* spec);
44extern void __setup_cpu_7410(unsigned long offset, struct cpu_spec* spec);
45extern void __setup_cpu_745x(unsigned long offset, struct cpu_spec* spec);
46#endif /* CONFIG_PPC32 */
47extern void __setup_cpu_ppc970(unsigned long offset, struct cpu_spec* spec);
38 48
39/* This table only contains "desktop" CPUs, it need to be filled with embedded 49/* This table only contains "desktop" CPUs, it need to be filled with embedded
40 * ones as well... 50 * ones as well...
41 */ 51 */
42#define COMMON_PPC (PPC_FEATURE_32 | PPC_FEATURE_HAS_FPU | \ 52#define COMMON_USER (PPC_FEATURE_32 | PPC_FEATURE_HAS_FPU | \
43 PPC_FEATURE_HAS_MMU) 53 PPC_FEATURE_HAS_MMU)
54#define COMMON_USER_PPC64 (COMMON_USER | PPC_FEATURE_64)
44 55
45/* We only set the altivec features if the kernel was compiled with altivec
46 * support
47 */
48#ifdef CONFIG_ALTIVEC
49#define CPU_FTR_ALTIVEC_COMP CPU_FTR_ALTIVEC
50#define PPC_FEATURE_ALTIVEC_COMP PPC_FEATURE_HAS_ALTIVEC
51#else
52#define CPU_FTR_ALTIVEC_COMP 0
53#define PPC_FEATURE_ALTIVEC_COMP 0
54#endif
55 56
56/* We only set the spe features if the kernel was compiled with 57/* We only set the spe features if the kernel was compiled with
57 * spe support 58 * spe support
58 */ 59 */
59#ifdef CONFIG_SPE 60#ifdef CONFIG_SPE
60#define PPC_FEATURE_SPE_COMP PPC_FEATURE_HAS_SPE 61#define PPC_FEATURE_SPE_COMP PPC_FEATURE_HAS_SPE
61#else 62#else
62#define PPC_FEATURE_SPE_COMP 0 63#define PPC_FEATURE_SPE_COMP 0
63#endif 64#endif
64 65
65/* We need to mark all pages as being coherent if we're SMP or we 66struct cpu_spec cpu_specs[] = {
66 * have a 74[45]x and an MPC107 host bridge. 67#ifdef CONFIG_PPC64
67 */ 68 { /* Power3 */
68#if defined(CONFIG_SMP) || defined(CONFIG_MPC10X_BRIDGE) 69 .pvr_mask = 0xffff0000,
69#define CPU_FTR_COMMON CPU_FTR_NEED_COHERENT 70 .pvr_value = 0x00400000,
70#else 71 .cpu_name = "POWER3 (630)",
71#define CPU_FTR_COMMON 0 72 .cpu_features = CPU_FTRS_POWER3,
73 .cpu_user_features = COMMON_USER_PPC64,
74 .icache_bsize = 128,
75 .dcache_bsize = 128,
76 .num_pmcs = 8,
77 .cpu_setup = __setup_cpu_power3,
78#ifdef CONFIG_OPROFILE
79 .oprofile_cpu_type = "ppc64/power3",
80 .oprofile_model = &op_model_rs64,
72#endif 81#endif
73 82 },
74/* The powersave features NAP & DOZE seems to confuse BDI when 83 { /* Power3+ */
75 debugging. So if a BDI is used, disable theses 84 .pvr_mask = 0xffff0000,
76 */ 85 .pvr_value = 0x00410000,
77#ifndef CONFIG_BDI_SWITCH 86 .cpu_name = "POWER3 (630+)",
78#define CPU_FTR_MAYBE_CAN_DOZE CPU_FTR_CAN_DOZE 87 .cpu_features = CPU_FTRS_POWER3,
79#define CPU_FTR_MAYBE_CAN_NAP CPU_FTR_CAN_NAP 88 .cpu_user_features = COMMON_USER_PPC64,
89 .icache_bsize = 128,
90 .dcache_bsize = 128,
91 .num_pmcs = 8,
92 .cpu_setup = __setup_cpu_power3,
93#ifdef CONFIG_OPROFILE
94 .oprofile_cpu_type = "ppc64/power3",
95 .oprofile_model = &op_model_rs64,
96#endif
97 },
98 { /* Northstar */
99 .pvr_mask = 0xffff0000,
100 .pvr_value = 0x00330000,
101 .cpu_name = "RS64-II (northstar)",
102 .cpu_features = CPU_FTRS_RS64,
103 .cpu_user_features = COMMON_USER_PPC64,
104 .icache_bsize = 128,
105 .dcache_bsize = 128,
106 .num_pmcs = 8,
107 .cpu_setup = __setup_cpu_power3,
108#ifdef CONFIG_OPROFILE
109 .oprofile_cpu_type = "ppc64/rs64",
110 .oprofile_model = &op_model_rs64,
111#endif
112 },
113 { /* Pulsar */
114 .pvr_mask = 0xffff0000,
115 .pvr_value = 0x00340000,
116 .cpu_name = "RS64-III (pulsar)",
117 .cpu_features = CPU_FTRS_RS64,
118 .cpu_user_features = COMMON_USER_PPC64,
119 .icache_bsize = 128,
120 .dcache_bsize = 128,
121 .num_pmcs = 8,
122 .cpu_setup = __setup_cpu_power3,
123#ifdef CONFIG_OPROFILE
124 .oprofile_cpu_type = "ppc64/rs64",
125 .oprofile_model = &op_model_rs64,
126#endif
127 },
128 { /* I-star */
129 .pvr_mask = 0xffff0000,
130 .pvr_value = 0x00360000,
131 .cpu_name = "RS64-III (icestar)",
132 .cpu_features = CPU_FTRS_RS64,
133 .cpu_user_features = COMMON_USER_PPC64,
134 .icache_bsize = 128,
135 .dcache_bsize = 128,
136 .num_pmcs = 8,
137 .cpu_setup = __setup_cpu_power3,
138#ifdef CONFIG_OPROFILE
139 .oprofile_cpu_type = "ppc64/rs64",
140 .oprofile_model = &op_model_rs64,
141#endif
142 },
143 { /* S-star */
144 .pvr_mask = 0xffff0000,
145 .pvr_value = 0x00370000,
146 .cpu_name = "RS64-IV (sstar)",
147 .cpu_features = CPU_FTRS_RS64,
148 .cpu_user_features = COMMON_USER_PPC64,
149 .icache_bsize = 128,
150 .dcache_bsize = 128,
151 .num_pmcs = 8,
152 .cpu_setup = __setup_cpu_power3,
153#ifdef CONFIG_OPROFILE
154 .oprofile_cpu_type = "ppc64/rs64",
155 .oprofile_model = &op_model_rs64,
156#endif
157 },
158 { /* Power4 */
159 .pvr_mask = 0xffff0000,
160 .pvr_value = 0x00350000,
161 .cpu_name = "POWER4 (gp)",
162 .cpu_features = CPU_FTRS_POWER4,
163 .cpu_user_features = COMMON_USER_PPC64,
164 .icache_bsize = 128,
165 .dcache_bsize = 128,
166 .num_pmcs = 8,
167 .cpu_setup = __setup_cpu_power4,
168#ifdef CONFIG_OPROFILE
169 .oprofile_cpu_type = "ppc64/power4",
170 .oprofile_model = &op_model_rs64,
171#endif
172 },
173 { /* Power4+ */
174 .pvr_mask = 0xffff0000,
175 .pvr_value = 0x00380000,
176 .cpu_name = "POWER4+ (gq)",
177 .cpu_features = CPU_FTRS_POWER4,
178 .cpu_user_features = COMMON_USER_PPC64,
179 .icache_bsize = 128,
180 .dcache_bsize = 128,
181 .num_pmcs = 8,
182 .cpu_setup = __setup_cpu_power4,
183#ifdef CONFIG_OPROFILE
184 .oprofile_cpu_type = "ppc64/power4",
185 .oprofile_model = &op_model_power4,
186#endif
187 },
188 { /* PPC970 */
189 .pvr_mask = 0xffff0000,
190 .pvr_value = 0x00390000,
191 .cpu_name = "PPC970",
192 .cpu_features = CPU_FTRS_PPC970,
193 .cpu_user_features = COMMON_USER_PPC64 |
194 PPC_FEATURE_HAS_ALTIVEC_COMP,
195 .icache_bsize = 128,
196 .dcache_bsize = 128,
197 .num_pmcs = 8,
198 .cpu_setup = __setup_cpu_ppc970,
199#ifdef CONFIG_OPROFILE
200 .oprofile_cpu_type = "ppc64/970",
201 .oprofile_model = &op_model_power4,
202#endif
203 },
204#endif /* CONFIG_PPC64 */
205#if defined(CONFIG_PPC64) || defined(CONFIG_POWER4)
206 { /* PPC970FX */
207 .pvr_mask = 0xffff0000,
208 .pvr_value = 0x003c0000,
209 .cpu_name = "PPC970FX",
210#ifdef CONFIG_PPC32
211 .cpu_features = CPU_FTRS_970_32,
80#else 212#else
81#define CPU_FTR_MAYBE_CAN_DOZE 0 213 .cpu_features = CPU_FTRS_PPC970,
82#define CPU_FTR_MAYBE_CAN_NAP 0
83#endif 214#endif
84 215 .cpu_user_features = COMMON_USER_PPC64 |
85struct cpu_spec cpu_specs[] = { 216 PPC_FEATURE_HAS_ALTIVEC_COMP,
217 .icache_bsize = 128,
218 .dcache_bsize = 128,
219 .num_pmcs = 8,
220 .cpu_setup = __setup_cpu_ppc970,
221#ifdef CONFIG_OPROFILE
222 .oprofile_cpu_type = "ppc64/970",
223 .oprofile_model = &op_model_power4,
224#endif
225 },
226#endif /* defined(CONFIG_PPC64) || defined(CONFIG_POWER4) */
227#ifdef CONFIG_PPC64
228 { /* PPC970MP */
229 .pvr_mask = 0xffff0000,
230 .pvr_value = 0x00440000,
231 .cpu_name = "PPC970MP",
232 .cpu_features = CPU_FTRS_PPC970,
233 .cpu_user_features = COMMON_USER_PPC64 |
234 PPC_FEATURE_HAS_ALTIVEC_COMP,
235 .icache_bsize = 128,
236 .dcache_bsize = 128,
237 .cpu_setup = __setup_cpu_ppc970,
238#ifdef CONFIG_OPROFILE
239 .oprofile_cpu_type = "ppc64/970",
240 .oprofile_model = &op_model_power4,
241#endif
242 },
243 { /* Power5 */
244 .pvr_mask = 0xffff0000,
245 .pvr_value = 0x003a0000,
246 .cpu_name = "POWER5 (gr)",
247 .cpu_features = CPU_FTRS_POWER5,
248 .cpu_user_features = COMMON_USER_PPC64,
249 .icache_bsize = 128,
250 .dcache_bsize = 128,
251 .num_pmcs = 6,
252 .cpu_setup = __setup_cpu_power4,
253#ifdef CONFIG_OPROFILE
254 .oprofile_cpu_type = "ppc64/power5",
255 .oprofile_model = &op_model_power4,
256#endif
257 },
258 { /* Power5 */
259 .pvr_mask = 0xffff0000,
260 .pvr_value = 0x003b0000,
261 .cpu_name = "POWER5 (gs)",
262 .cpu_features = CPU_FTRS_POWER5,
263 .cpu_user_features = COMMON_USER_PPC64,
264 .icache_bsize = 128,
265 .dcache_bsize = 128,
266 .num_pmcs = 6,
267 .cpu_setup = __setup_cpu_power4,
268#ifdef CONFIG_OPROFILE
269 .oprofile_cpu_type = "ppc64/power5",
270 .oprofile_model = &op_model_power4,
271#endif
272 },
273 { /* BE DD1.x */
274 .pvr_mask = 0xffff0000,
275 .pvr_value = 0x00700000,
276 .cpu_name = "Cell Broadband Engine",
277 .cpu_features = CPU_FTRS_CELL,
278 .cpu_user_features = COMMON_USER_PPC64 |
279 PPC_FEATURE_HAS_ALTIVEC_COMP,
280 .icache_bsize = 128,
281 .dcache_bsize = 128,
282 .cpu_setup = __setup_cpu_be,
283 },
284 { /* default match */
285 .pvr_mask = 0x00000000,
286 .pvr_value = 0x00000000,
287 .cpu_name = "POWER4 (compatible)",
288 .cpu_features = CPU_FTRS_COMPATIBLE,
289 .cpu_user_features = COMMON_USER_PPC64,
290 .icache_bsize = 128,
291 .dcache_bsize = 128,
292 .num_pmcs = 6,
293 .cpu_setup = __setup_cpu_power4,
294 }
295#endif /* CONFIG_PPC64 */
296#ifdef CONFIG_PPC32
86#if CLASSIC_PPC 297#if CLASSIC_PPC
87 { /* 601 */ 298 { /* 601 */
88 .pvr_mask = 0xffff0000, 299 .pvr_mask = 0xffff0000,
89 .pvr_value = 0x00010000, 300 .pvr_value = 0x00010000,
90 .cpu_name = "601", 301 .cpu_name = "601",
91 .cpu_features = CPU_FTR_COMMON | CPU_FTR_601 | 302 .cpu_features = CPU_FTRS_PPC601,
92 CPU_FTR_HPTE_TABLE, 303 .cpu_user_features = COMMON_USER | PPC_FEATURE_601_INSTR |
93 .cpu_user_features = COMMON_PPC | PPC_FEATURE_601_INSTR |
94 PPC_FEATURE_UNIFIED_CACHE | PPC_FEATURE_NO_TB, 304 PPC_FEATURE_UNIFIED_CACHE | PPC_FEATURE_NO_TB,
95 .icache_bsize = 32, 305 .icache_bsize = 32,
96 .dcache_bsize = 32, 306 .dcache_bsize = 32,
97 .cpu_setup = __setup_cpu_601
98 }, 307 },
99 { /* 603 */ 308 { /* 603 */
100 .pvr_mask = 0xffff0000, 309 .pvr_mask = 0xffff0000,
101 .pvr_value = 0x00030000, 310 .pvr_value = 0x00030000,
102 .cpu_name = "603", 311 .cpu_name = "603",
103 .cpu_features = CPU_FTR_COMMON | 312 .cpu_features = CPU_FTRS_603,
104 CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_MAYBE_CAN_DOZE | 313 .cpu_user_features = COMMON_USER,
105 CPU_FTR_USE_TB | CPU_FTR_MAYBE_CAN_NAP,
106 .cpu_user_features = COMMON_PPC,
107 .icache_bsize = 32, 314 .icache_bsize = 32,
108 .dcache_bsize = 32, 315 .dcache_bsize = 32,
109 .cpu_setup = __setup_cpu_603 316 .cpu_setup = __setup_cpu_603
@@ -112,10 +319,8 @@ struct cpu_spec cpu_specs[] = {
112 .pvr_mask = 0xffff0000, 319 .pvr_mask = 0xffff0000,
113 .pvr_value = 0x00060000, 320 .pvr_value = 0x00060000,
114 .cpu_name = "603e", 321 .cpu_name = "603e",
115 .cpu_features = CPU_FTR_COMMON | 322 .cpu_features = CPU_FTRS_603,
116 CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_MAYBE_CAN_DOZE | 323 .cpu_user_features = COMMON_USER,
117 CPU_FTR_USE_TB | CPU_FTR_MAYBE_CAN_NAP,
118 .cpu_user_features = COMMON_PPC,
119 .icache_bsize = 32, 324 .icache_bsize = 32,
120 .dcache_bsize = 32, 325 .dcache_bsize = 32,
121 .cpu_setup = __setup_cpu_603 326 .cpu_setup = __setup_cpu_603
@@ -124,10 +329,8 @@ struct cpu_spec cpu_specs[] = {
124 .pvr_mask = 0xffff0000, 329 .pvr_mask = 0xffff0000,
125 .pvr_value = 0x00070000, 330 .pvr_value = 0x00070000,
126 .cpu_name = "603ev", 331 .cpu_name = "603ev",
127 .cpu_features = CPU_FTR_COMMON | 332 .cpu_features = CPU_FTRS_603,
128 CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_MAYBE_CAN_DOZE | 333 .cpu_user_features = COMMON_USER,
129 CPU_FTR_USE_TB | CPU_FTR_MAYBE_CAN_NAP,
130 .cpu_user_features = COMMON_PPC,
131 .icache_bsize = 32, 334 .icache_bsize = 32,
132 .dcache_bsize = 32, 335 .dcache_bsize = 32,
133 .cpu_setup = __setup_cpu_603 336 .cpu_setup = __setup_cpu_603
@@ -136,10 +339,8 @@ struct cpu_spec cpu_specs[] = {
136 .pvr_mask = 0xffff0000, 339 .pvr_mask = 0xffff0000,
137 .pvr_value = 0x00040000, 340 .pvr_value = 0x00040000,
138 .cpu_name = "604", 341 .cpu_name = "604",
139 .cpu_features = CPU_FTR_COMMON | 342 .cpu_features = CPU_FTRS_604,
140 CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | 343 .cpu_user_features = COMMON_USER,
141 CPU_FTR_604_PERF_MON | CPU_FTR_HPTE_TABLE,
142 .cpu_user_features = COMMON_PPC,
143 .icache_bsize = 32, 344 .icache_bsize = 32,
144 .dcache_bsize = 32, 345 .dcache_bsize = 32,
145 .num_pmcs = 2, 346 .num_pmcs = 2,
@@ -149,10 +350,8 @@ struct cpu_spec cpu_specs[] = {
149 .pvr_mask = 0xfffff000, 350 .pvr_mask = 0xfffff000,
150 .pvr_value = 0x00090000, 351 .pvr_value = 0x00090000,
151 .cpu_name = "604e", 352 .cpu_name = "604e",
152 .cpu_features = CPU_FTR_COMMON | 353 .cpu_features = CPU_FTRS_604,
153 CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | 354 .cpu_user_features = COMMON_USER,
154 CPU_FTR_604_PERF_MON | CPU_FTR_HPTE_TABLE,
155 .cpu_user_features = COMMON_PPC,
156 .icache_bsize = 32, 355 .icache_bsize = 32,
157 .dcache_bsize = 32, 356 .dcache_bsize = 32,
158 .num_pmcs = 4, 357 .num_pmcs = 4,
@@ -162,10 +361,8 @@ struct cpu_spec cpu_specs[] = {
162 .pvr_mask = 0xffff0000, 361 .pvr_mask = 0xffff0000,
163 .pvr_value = 0x00090000, 362 .pvr_value = 0x00090000,
164 .cpu_name = "604r", 363 .cpu_name = "604r",
165 .cpu_features = CPU_FTR_COMMON | 364 .cpu_features = CPU_FTRS_604,
166 CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | 365 .cpu_user_features = COMMON_USER,
167 CPU_FTR_604_PERF_MON | CPU_FTR_HPTE_TABLE,
168 .cpu_user_features = COMMON_PPC,
169 .icache_bsize = 32, 366 .icache_bsize = 32,
170 .dcache_bsize = 32, 367 .dcache_bsize = 32,
171 .num_pmcs = 4, 368 .num_pmcs = 4,
@@ -175,10 +372,8 @@ struct cpu_spec cpu_specs[] = {
175 .pvr_mask = 0xffff0000, 372 .pvr_mask = 0xffff0000,
176 .pvr_value = 0x000a0000, 373 .pvr_value = 0x000a0000,
177 .cpu_name = "604ev", 374 .cpu_name = "604ev",
178 .cpu_features = CPU_FTR_COMMON | 375 .cpu_features = CPU_FTRS_604,
179 CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | 376 .cpu_user_features = COMMON_USER,
180 CPU_FTR_604_PERF_MON | CPU_FTR_HPTE_TABLE,
181 .cpu_user_features = COMMON_PPC,
182 .icache_bsize = 32, 377 .icache_bsize = 32,
183 .dcache_bsize = 32, 378 .dcache_bsize = 32,
184 .num_pmcs = 4, 379 .num_pmcs = 4,
@@ -188,11 +383,8 @@ struct cpu_spec cpu_specs[] = {
188 .pvr_mask = 0xffffffff, 383 .pvr_mask = 0xffffffff,
189 .pvr_value = 0x00084202, 384 .pvr_value = 0x00084202,
190 .cpu_name = "740/750", 385 .cpu_name = "740/750",
191 .cpu_features = CPU_FTR_COMMON | 386 .cpu_features = CPU_FTRS_740_NOTAU,
192 CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_MAYBE_CAN_DOZE | 387 .cpu_user_features = COMMON_USER,
193 CPU_FTR_USE_TB | CPU_FTR_L2CR | CPU_FTR_HPTE_TABLE |
194 CPU_FTR_MAYBE_CAN_NAP,
195 .cpu_user_features = COMMON_PPC,
196 .icache_bsize = 32, 388 .icache_bsize = 32,
197 .dcache_bsize = 32, 389 .dcache_bsize = 32,
198 .num_pmcs = 4, 390 .num_pmcs = 4,
@@ -202,11 +394,8 @@ struct cpu_spec cpu_specs[] = {
202 .pvr_mask = 0xfffffff0, 394 .pvr_mask = 0xfffffff0,
203 .pvr_value = 0x00080100, 395 .pvr_value = 0x00080100,
204 .cpu_name = "750CX", 396 .cpu_name = "750CX",
205 .cpu_features = CPU_FTR_COMMON | 397 .cpu_features = CPU_FTRS_750,
206 CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_MAYBE_CAN_DOZE | 398 .cpu_user_features = COMMON_USER,
207 CPU_FTR_USE_TB | CPU_FTR_L2CR | CPU_FTR_TAU |
208 CPU_FTR_HPTE_TABLE | CPU_FTR_MAYBE_CAN_NAP,
209 .cpu_user_features = COMMON_PPC,
210 .icache_bsize = 32, 399 .icache_bsize = 32,
211 .dcache_bsize = 32, 400 .dcache_bsize = 32,
212 .num_pmcs = 4, 401 .num_pmcs = 4,
@@ -216,11 +405,8 @@ struct cpu_spec cpu_specs[] = {
216 .pvr_mask = 0xfffffff0, 405 .pvr_mask = 0xfffffff0,
217 .pvr_value = 0x00082200, 406 .pvr_value = 0x00082200,
218 .cpu_name = "750CX", 407 .cpu_name = "750CX",
219 .cpu_features = CPU_FTR_COMMON | 408 .cpu_features = CPU_FTRS_750,
220 CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_MAYBE_CAN_DOZE | 409 .cpu_user_features = COMMON_USER,
221 CPU_FTR_USE_TB | CPU_FTR_L2CR | CPU_FTR_TAU |
222 CPU_FTR_HPTE_TABLE | CPU_FTR_MAYBE_CAN_NAP,
223 .cpu_user_features = COMMON_PPC,
224 .icache_bsize = 32, 410 .icache_bsize = 32,
225 .dcache_bsize = 32, 411 .dcache_bsize = 32,
226 .num_pmcs = 4, 412 .num_pmcs = 4,
@@ -230,11 +416,8 @@ struct cpu_spec cpu_specs[] = {
230 .pvr_mask = 0xfffffff0, 416 .pvr_mask = 0xfffffff0,
231 .pvr_value = 0x00082210, 417 .pvr_value = 0x00082210,
232 .cpu_name = "750CXe", 418 .cpu_name = "750CXe",
233 .cpu_features = CPU_FTR_COMMON | 419 .cpu_features = CPU_FTRS_750,
234 CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_MAYBE_CAN_DOZE | 420 .cpu_user_features = COMMON_USER,
235 CPU_FTR_USE_TB | CPU_FTR_L2CR | CPU_FTR_TAU |
236 CPU_FTR_HPTE_TABLE | CPU_FTR_MAYBE_CAN_NAP,
237 .cpu_user_features = COMMON_PPC,
238 .icache_bsize = 32, 421 .icache_bsize = 32,
239 .dcache_bsize = 32, 422 .dcache_bsize = 32,
240 .num_pmcs = 4, 423 .num_pmcs = 4,
@@ -244,11 +427,8 @@ struct cpu_spec cpu_specs[] = {
244 .pvr_mask = 0xffffffff, 427 .pvr_mask = 0xffffffff,
245 .pvr_value = 0x00083214, 428 .pvr_value = 0x00083214,
246 .cpu_name = "750CXe", 429 .cpu_name = "750CXe",
247 .cpu_features = CPU_FTR_COMMON | 430 .cpu_features = CPU_FTRS_750,
248 CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_MAYBE_CAN_DOZE | 431 .cpu_user_features = COMMON_USER,
249 CPU_FTR_USE_TB | CPU_FTR_L2CR | CPU_FTR_TAU |
250 CPU_FTR_HPTE_TABLE | CPU_FTR_MAYBE_CAN_NAP,
251 .cpu_user_features = COMMON_PPC,
252 .icache_bsize = 32, 432 .icache_bsize = 32,
253 .dcache_bsize = 32, 433 .dcache_bsize = 32,
254 .num_pmcs = 4, 434 .num_pmcs = 4,
@@ -258,11 +438,8 @@ struct cpu_spec cpu_specs[] = {
258 .pvr_mask = 0xfffff000, 438 .pvr_mask = 0xfffff000,
259 .pvr_value = 0x00083000, 439 .pvr_value = 0x00083000,
260 .cpu_name = "745/755", 440 .cpu_name = "745/755",
261 .cpu_features = CPU_FTR_COMMON | 441 .cpu_features = CPU_FTRS_750,
262 CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_MAYBE_CAN_DOZE | 442 .cpu_user_features = COMMON_USER,
263 CPU_FTR_USE_TB | CPU_FTR_L2CR | CPU_FTR_TAU |
264 CPU_FTR_HPTE_TABLE | CPU_FTR_MAYBE_CAN_NAP,
265 .cpu_user_features = COMMON_PPC,
266 .icache_bsize = 32, 443 .icache_bsize = 32,
267 .dcache_bsize = 32, 444 .dcache_bsize = 32,
268 .num_pmcs = 4, 445 .num_pmcs = 4,
@@ -272,12 +449,8 @@ struct cpu_spec cpu_specs[] = {
272 .pvr_mask = 0xffffff00, 449 .pvr_mask = 0xffffff00,
273 .pvr_value = 0x70000100, 450 .pvr_value = 0x70000100,
274 .cpu_name = "750FX", 451 .cpu_name = "750FX",
275 .cpu_features = CPU_FTR_COMMON | 452 .cpu_features = CPU_FTRS_750FX1,
276 CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_MAYBE_CAN_DOZE | 453 .cpu_user_features = COMMON_USER,
277 CPU_FTR_USE_TB | CPU_FTR_L2CR | CPU_FTR_TAU |
278 CPU_FTR_HPTE_TABLE | CPU_FTR_MAYBE_CAN_NAP |
279 CPU_FTR_DUAL_PLL_750FX | CPU_FTR_NO_DPM,
280 .cpu_user_features = COMMON_PPC,
281 .icache_bsize = 32, 454 .icache_bsize = 32,
282 .dcache_bsize = 32, 455 .dcache_bsize = 32,
283 .num_pmcs = 4, 456 .num_pmcs = 4,
@@ -287,12 +460,8 @@ struct cpu_spec cpu_specs[] = {
287 .pvr_mask = 0xffffffff, 460 .pvr_mask = 0xffffffff,
288 .pvr_value = 0x70000200, 461 .pvr_value = 0x70000200,
289 .cpu_name = "750FX", 462 .cpu_name = "750FX",
290 .cpu_features = CPU_FTR_COMMON | 463 .cpu_features = CPU_FTRS_750FX2,
291 CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_MAYBE_CAN_DOZE | 464 .cpu_user_features = COMMON_USER,
292 CPU_FTR_USE_TB | CPU_FTR_L2CR | CPU_FTR_TAU |
293 CPU_FTR_HPTE_TABLE | CPU_FTR_MAYBE_CAN_NAP |
294 CPU_FTR_NO_DPM,
295 .cpu_user_features = COMMON_PPC,
296 .icache_bsize = 32, 465 .icache_bsize = 32,
297 .dcache_bsize = 32, 466 .dcache_bsize = 32,
298 .num_pmcs = 4, 467 .num_pmcs = 4,
@@ -302,12 +471,8 @@ struct cpu_spec cpu_specs[] = {
302 .pvr_mask = 0xffff0000, 471 .pvr_mask = 0xffff0000,
303 .pvr_value = 0x70000000, 472 .pvr_value = 0x70000000,
304 .cpu_name = "750FX", 473 .cpu_name = "750FX",
305 .cpu_features = CPU_FTR_COMMON | 474 .cpu_features = CPU_FTRS_750FX,
306 CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_MAYBE_CAN_DOZE | 475 .cpu_user_features = COMMON_USER,
307 CPU_FTR_USE_TB | CPU_FTR_L2CR | CPU_FTR_TAU |
308 CPU_FTR_HPTE_TABLE | CPU_FTR_MAYBE_CAN_NAP |
309 CPU_FTR_DUAL_PLL_750FX | CPU_FTR_HAS_HIGH_BATS,
310 .cpu_user_features = COMMON_PPC,
311 .icache_bsize = 32, 476 .icache_bsize = 32,
312 .dcache_bsize = 32, 477 .dcache_bsize = 32,
313 .num_pmcs = 4, 478 .num_pmcs = 4,
@@ -317,12 +482,8 @@ struct cpu_spec cpu_specs[] = {
317 .pvr_mask = 0xffff0000, 482 .pvr_mask = 0xffff0000,
318 .pvr_value = 0x70020000, 483 .pvr_value = 0x70020000,
319 .cpu_name = "750GX", 484 .cpu_name = "750GX",
320 .cpu_features = CPU_FTR_SPLIT_ID_CACHE | 485 .cpu_features = CPU_FTRS_750GX,
321 CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | 486 .cpu_user_features = COMMON_USER,
322 CPU_FTR_L2CR | CPU_FTR_TAU | CPU_FTR_HPTE_TABLE |
323 CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_DUAL_PLL_750FX |
324 CPU_FTR_HAS_HIGH_BATS,
325 .cpu_user_features = COMMON_PPC,
326 .icache_bsize = 32, 487 .icache_bsize = 32,
327 .dcache_bsize = 32, 488 .dcache_bsize = 32,
328 .num_pmcs = 4, 489 .num_pmcs = 4,
@@ -332,11 +493,8 @@ struct cpu_spec cpu_specs[] = {
332 .pvr_mask = 0xffff0000, 493 .pvr_mask = 0xffff0000,
333 .pvr_value = 0x00080000, 494 .pvr_value = 0x00080000,
334 .cpu_name = "740/750", 495 .cpu_name = "740/750",
335 .cpu_features = CPU_FTR_COMMON | 496 .cpu_features = CPU_FTRS_740,
336 CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_MAYBE_CAN_DOZE | 497 .cpu_user_features = COMMON_USER,
337 CPU_FTR_USE_TB | CPU_FTR_L2CR | CPU_FTR_TAU |
338 CPU_FTR_HPTE_TABLE | CPU_FTR_MAYBE_CAN_NAP,
339 .cpu_user_features = COMMON_PPC,
340 .icache_bsize = 32, 498 .icache_bsize = 32,
341 .dcache_bsize = 32, 499 .dcache_bsize = 32,
342 .num_pmcs = 4, 500 .num_pmcs = 4,
@@ -346,11 +504,8 @@ struct cpu_spec cpu_specs[] = {
346 .pvr_mask = 0xffffffff, 504 .pvr_mask = 0xffffffff,
347 .pvr_value = 0x000c1101, 505 .pvr_value = 0x000c1101,
348 .cpu_name = "7400 (1.1)", 506 .cpu_name = "7400 (1.1)",
349 .cpu_features = CPU_FTR_COMMON | 507 .cpu_features = CPU_FTRS_7400_NOTAU,
350 CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_MAYBE_CAN_DOZE | 508 .cpu_user_features = COMMON_USER | PPC_FEATURE_HAS_ALTIVEC_COMP,
351 CPU_FTR_USE_TB | CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP |
352 CPU_FTR_HPTE_TABLE | CPU_FTR_MAYBE_CAN_NAP,
353 .cpu_user_features = COMMON_PPC | PPC_FEATURE_ALTIVEC_COMP,
354 .icache_bsize = 32, 509 .icache_bsize = 32,
355 .dcache_bsize = 32, 510 .dcache_bsize = 32,
356 .num_pmcs = 4, 511 .num_pmcs = 4,
@@ -360,12 +515,8 @@ struct cpu_spec cpu_specs[] = {
360 .pvr_mask = 0xffff0000, 515 .pvr_mask = 0xffff0000,
361 .pvr_value = 0x000c0000, 516 .pvr_value = 0x000c0000,
362 .cpu_name = "7400", 517 .cpu_name = "7400",
363 .cpu_features = CPU_FTR_COMMON | 518 .cpu_features = CPU_FTRS_7400,
364 CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_MAYBE_CAN_DOZE | 519 .cpu_user_features = COMMON_USER | PPC_FEATURE_HAS_ALTIVEC_COMP,
365 CPU_FTR_USE_TB | CPU_FTR_L2CR | CPU_FTR_TAU |
366 CPU_FTR_ALTIVEC_COMP | CPU_FTR_HPTE_TABLE |
367 CPU_FTR_MAYBE_CAN_NAP,
368 .cpu_user_features = COMMON_PPC | PPC_FEATURE_ALTIVEC_COMP,
369 .icache_bsize = 32, 520 .icache_bsize = 32,
370 .dcache_bsize = 32, 521 .dcache_bsize = 32,
371 .num_pmcs = 4, 522 .num_pmcs = 4,
@@ -375,12 +526,8 @@ struct cpu_spec cpu_specs[] = {
375 .pvr_mask = 0xffff0000, 526 .pvr_mask = 0xffff0000,
376 .pvr_value = 0x800c0000, 527 .pvr_value = 0x800c0000,
377 .cpu_name = "7410", 528 .cpu_name = "7410",
378 .cpu_features = CPU_FTR_COMMON | 529 .cpu_features = CPU_FTRS_7400,
379 CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_MAYBE_CAN_DOZE | 530 .cpu_user_features = COMMON_USER | PPC_FEATURE_HAS_ALTIVEC_COMP,
380 CPU_FTR_USE_TB | CPU_FTR_L2CR | CPU_FTR_TAU |
381 CPU_FTR_ALTIVEC_COMP | CPU_FTR_HPTE_TABLE |
382 CPU_FTR_MAYBE_CAN_NAP,
383 .cpu_user_features = COMMON_PPC | PPC_FEATURE_ALTIVEC_COMP,
384 .icache_bsize = 32, 531 .icache_bsize = 32,
385 .dcache_bsize = 32, 532 .dcache_bsize = 32,
386 .num_pmcs = 4, 533 .num_pmcs = 4,
@@ -390,12 +537,8 @@ struct cpu_spec cpu_specs[] = {
390 .pvr_mask = 0xffffffff, 537 .pvr_mask = 0xffffffff,
391 .pvr_value = 0x80000200, 538 .pvr_value = 0x80000200,
392 .cpu_name = "7450", 539 .cpu_name = "7450",
393 .cpu_features = CPU_FTR_COMMON | 540 .cpu_features = CPU_FTRS_7450_20,
394 CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | 541 .cpu_user_features = COMMON_USER | PPC_FEATURE_HAS_ALTIVEC_COMP,
395 CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP | CPU_FTR_L3CR |
396 CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 |
397 CPU_FTR_NEED_COHERENT,
398 .cpu_user_features = COMMON_PPC | PPC_FEATURE_ALTIVEC_COMP,
399 .icache_bsize = 32, 542 .icache_bsize = 32,
400 .dcache_bsize = 32, 543 .dcache_bsize = 32,
401 .num_pmcs = 6, 544 .num_pmcs = 6,
@@ -405,14 +548,8 @@ struct cpu_spec cpu_specs[] = {
405 .pvr_mask = 0xffffffff, 548 .pvr_mask = 0xffffffff,
406 .pvr_value = 0x80000201, 549 .pvr_value = 0x80000201,
407 .cpu_name = "7450", 550 .cpu_name = "7450",
408 .cpu_features = CPU_FTR_COMMON | 551 .cpu_features = CPU_FTRS_7450_21,
409 CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | 552 .cpu_user_features = COMMON_USER | PPC_FEATURE_HAS_ALTIVEC_COMP,
410 CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_L2CR |
411 CPU_FTR_ALTIVEC_COMP | CPU_FTR_L3CR |
412 CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 |
413 CPU_FTR_NAP_DISABLE_L2_PR | CPU_FTR_L3_DISABLE_NAP |
414 CPU_FTR_NEED_COHERENT,
415 .cpu_user_features = COMMON_PPC | PPC_FEATURE_ALTIVEC_COMP,
416 .icache_bsize = 32, 553 .icache_bsize = 32,
417 .dcache_bsize = 32, 554 .dcache_bsize = 32,
418 .num_pmcs = 6, 555 .num_pmcs = 6,
@@ -422,13 +559,8 @@ struct cpu_spec cpu_specs[] = {
422 .pvr_mask = 0xffff0000, 559 .pvr_mask = 0xffff0000,
423 .pvr_value = 0x80000000, 560 .pvr_value = 0x80000000,
424 .cpu_name = "7450", 561 .cpu_name = "7450",
425 .cpu_features = CPU_FTR_COMMON | 562 .cpu_features = CPU_FTRS_7450_23,
426 CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | 563 .cpu_user_features = COMMON_USER | PPC_FEATURE_HAS_ALTIVEC_COMP,
427 CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_L2CR |
428 CPU_FTR_ALTIVEC_COMP | CPU_FTR_L3CR |
429 CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 |
430 CPU_FTR_NAP_DISABLE_L2_PR | CPU_FTR_NEED_COHERENT,
431 .cpu_user_features = COMMON_PPC | PPC_FEATURE_ALTIVEC_COMP,
432 .icache_bsize = 32, 564 .icache_bsize = 32,
433 .dcache_bsize = 32, 565 .dcache_bsize = 32,
434 .num_pmcs = 6, 566 .num_pmcs = 6,
@@ -438,12 +570,8 @@ struct cpu_spec cpu_specs[] = {
438 .pvr_mask = 0xffffff00, 570 .pvr_mask = 0xffffff00,
439 .pvr_value = 0x80010100, 571 .pvr_value = 0x80010100,
440 .cpu_name = "7455", 572 .cpu_name = "7455",
441 .cpu_features = CPU_FTR_COMMON | 573 .cpu_features = CPU_FTRS_7455_1,
442 CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | 574 .cpu_user_features = COMMON_USER | PPC_FEATURE_HAS_ALTIVEC_COMP,
443 CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP | CPU_FTR_L3CR |
444 CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 |
445 CPU_FTR_HAS_HIGH_BATS | CPU_FTR_NEED_COHERENT,
446 .cpu_user_features = COMMON_PPC | PPC_FEATURE_ALTIVEC_COMP,
447 .icache_bsize = 32, 575 .icache_bsize = 32,
448 .dcache_bsize = 32, 576 .dcache_bsize = 32,
449 .num_pmcs = 6, 577 .num_pmcs = 6,
@@ -453,14 +581,8 @@ struct cpu_spec cpu_specs[] = {
453 .pvr_mask = 0xffffffff, 581 .pvr_mask = 0xffffffff,
454 .pvr_value = 0x80010200, 582 .pvr_value = 0x80010200,
455 .cpu_name = "7455", 583 .cpu_name = "7455",
456 .cpu_features = CPU_FTR_COMMON | 584 .cpu_features = CPU_FTRS_7455_20,
457 CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | 585 .cpu_user_features = COMMON_USER | PPC_FEATURE_HAS_ALTIVEC_COMP,
458 CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_L2CR |
459 CPU_FTR_ALTIVEC_COMP | CPU_FTR_L3CR |
460 CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 |
461 CPU_FTR_NAP_DISABLE_L2_PR | CPU_FTR_L3_DISABLE_NAP |
462 CPU_FTR_NEED_COHERENT | CPU_FTR_HAS_HIGH_BATS,
463 .cpu_user_features = COMMON_PPC | PPC_FEATURE_ALTIVEC_COMP,
464 .icache_bsize = 32, 586 .icache_bsize = 32,
465 .dcache_bsize = 32, 587 .dcache_bsize = 32,
466 .num_pmcs = 6, 588 .num_pmcs = 6,
@@ -470,14 +592,8 @@ struct cpu_spec cpu_specs[] = {
470 .pvr_mask = 0xffff0000, 592 .pvr_mask = 0xffff0000,
471 .pvr_value = 0x80010000, 593 .pvr_value = 0x80010000,
472 .cpu_name = "7455", 594 .cpu_name = "7455",
473 .cpu_features = CPU_FTR_COMMON | 595 .cpu_features = CPU_FTRS_7455,
474 CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | 596 .cpu_user_features = COMMON_USER | PPC_FEATURE_HAS_ALTIVEC_COMP,
475 CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_L2CR |
476 CPU_FTR_ALTIVEC_COMP | CPU_FTR_L3CR |
477 CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 |
478 CPU_FTR_NAP_DISABLE_L2_PR | CPU_FTR_HAS_HIGH_BATS |
479 CPU_FTR_NEED_COHERENT,
480 .cpu_user_features = COMMON_PPC | PPC_FEATURE_ALTIVEC_COMP,
481 .icache_bsize = 32, 597 .icache_bsize = 32,
482 .dcache_bsize = 32, 598 .dcache_bsize = 32,
483 .num_pmcs = 6, 599 .num_pmcs = 6,
@@ -487,14 +603,8 @@ struct cpu_spec cpu_specs[] = {
487 .pvr_mask = 0xffffffff, 603 .pvr_mask = 0xffffffff,
488 .pvr_value = 0x80020100, 604 .pvr_value = 0x80020100,
489 .cpu_name = "7447/7457", 605 .cpu_name = "7447/7457",
490 .cpu_features = CPU_FTR_COMMON | 606 .cpu_features = CPU_FTRS_7447_10,
491 CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | 607 .cpu_user_features = COMMON_USER | PPC_FEATURE_HAS_ALTIVEC_COMP,
492 CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_L2CR |
493 CPU_FTR_ALTIVEC_COMP | CPU_FTR_L3CR |
494 CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 |
495 CPU_FTR_NAP_DISABLE_L2_PR | CPU_FTR_HAS_HIGH_BATS |
496 CPU_FTR_NEED_COHERENT | CPU_FTR_NO_BTIC,
497 .cpu_user_features = COMMON_PPC | PPC_FEATURE_ALTIVEC_COMP,
498 .icache_bsize = 32, 608 .icache_bsize = 32,
499 .dcache_bsize = 32, 609 .dcache_bsize = 32,
500 .num_pmcs = 6, 610 .num_pmcs = 6,
@@ -504,14 +614,8 @@ struct cpu_spec cpu_specs[] = {
504 .pvr_mask = 0xffffffff, 614 .pvr_mask = 0xffffffff,
505 .pvr_value = 0x80020101, 615 .pvr_value = 0x80020101,
506 .cpu_name = "7447/7457", 616 .cpu_name = "7447/7457",
507 .cpu_features = CPU_FTR_COMMON | 617 .cpu_features = CPU_FTRS_7447_10,
508 CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | 618 .cpu_user_features = COMMON_USER | PPC_FEATURE_HAS_ALTIVEC_COMP,
509 CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_L2CR |
510 CPU_FTR_ALTIVEC_COMP | CPU_FTR_L3CR |
511 CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 |
512 CPU_FTR_NAP_DISABLE_L2_PR | CPU_FTR_HAS_HIGH_BATS |
513 CPU_FTR_NEED_COHERENT | CPU_FTR_NO_BTIC,
514 .cpu_user_features = COMMON_PPC | PPC_FEATURE_ALTIVEC_COMP,
515 .icache_bsize = 32, 619 .icache_bsize = 32,
516 .dcache_bsize = 32, 620 .dcache_bsize = 32,
517 .num_pmcs = 6, 621 .num_pmcs = 6,
@@ -521,14 +625,8 @@ struct cpu_spec cpu_specs[] = {
521 .pvr_mask = 0xffff0000, 625 .pvr_mask = 0xffff0000,
522 .pvr_value = 0x80020000, 626 .pvr_value = 0x80020000,
523 .cpu_name = "7447/7457", 627 .cpu_name = "7447/7457",
524 .cpu_features = CPU_FTR_COMMON | 628 .cpu_features = CPU_FTRS_7447,
525 CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | 629 .cpu_user_features = COMMON_USER | PPC_FEATURE_HAS_ALTIVEC_COMP,
526 CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_L2CR |
527 CPU_FTR_ALTIVEC_COMP | CPU_FTR_L3CR |
528 CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 |
529 CPU_FTR_NAP_DISABLE_L2_PR | CPU_FTR_HAS_HIGH_BATS |
530 CPU_FTR_NEED_COHERENT,
531 .cpu_user_features = COMMON_PPC | PPC_FEATURE_ALTIVEC_COMP,
532 .icache_bsize = 32, 630 .icache_bsize = 32,
533 .dcache_bsize = 32, 631 .dcache_bsize = 32,
534 .num_pmcs = 6, 632 .num_pmcs = 6,
@@ -538,13 +636,8 @@ struct cpu_spec cpu_specs[] = {
538 .pvr_mask = 0xffff0000, 636 .pvr_mask = 0xffff0000,
539 .pvr_value = 0x80030000, 637 .pvr_value = 0x80030000,
540 .cpu_name = "7447A", 638 .cpu_name = "7447A",
541 .cpu_features = CPU_FTR_COMMON | 639 .cpu_features = CPU_FTRS_7447A,
542 CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | 640 .cpu_user_features = COMMON_USER | PPC_FEATURE_HAS_ALTIVEC_COMP,
543 CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_L2CR |
544 CPU_FTR_ALTIVEC_COMP | CPU_FTR_HPTE_TABLE |
545 CPU_FTR_SPEC7450 | CPU_FTR_NAP_DISABLE_L2_PR |
546 CPU_FTR_HAS_HIGH_BATS | CPU_FTR_NEED_COHERENT,
547 .cpu_user_features = COMMON_PPC | PPC_FEATURE_ALTIVEC_COMP,
548 .icache_bsize = 32, 641 .icache_bsize = 32,
549 .dcache_bsize = 32, 642 .dcache_bsize = 32,
550 .num_pmcs = 6, 643 .num_pmcs = 6,
@@ -554,13 +647,8 @@ struct cpu_spec cpu_specs[] = {
554 .pvr_mask = 0xffff0000, 647 .pvr_mask = 0xffff0000,
555 .pvr_value = 0x80040000, 648 .pvr_value = 0x80040000,
556 .cpu_name = "7448", 649 .cpu_name = "7448",
557 .cpu_features = CPU_FTR_COMMON | 650 .cpu_features = CPU_FTRS_7447A,
558 CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | 651 .cpu_user_features = COMMON_USER | PPC_FEATURE_HAS_ALTIVEC_COMP,
559 CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_L2CR |
560 CPU_FTR_ALTIVEC_COMP | CPU_FTR_HPTE_TABLE |
561 CPU_FTR_SPEC7450 | CPU_FTR_NAP_DISABLE_L2_PR |
562 CPU_FTR_HAS_HIGH_BATS | CPU_FTR_NEED_COHERENT,
563 .cpu_user_features = COMMON_PPC | PPC_FEATURE_ALTIVEC_COMP,
564 .icache_bsize = 32, 652 .icache_bsize = 32,
565 .dcache_bsize = 32, 653 .dcache_bsize = 32,
566 .num_pmcs = 6, 654 .num_pmcs = 6,
@@ -570,10 +658,8 @@ struct cpu_spec cpu_specs[] = {
570 .pvr_mask = 0x7fff0000, 658 .pvr_mask = 0x7fff0000,
571 .pvr_value = 0x00810000, 659 .pvr_value = 0x00810000,
572 .cpu_name = "82xx", 660 .cpu_name = "82xx",
573 .cpu_features = CPU_FTR_COMMON | 661 .cpu_features = CPU_FTRS_82XX,
574 CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_MAYBE_CAN_DOZE | 662 .cpu_user_features = COMMON_USER,
575 CPU_FTR_USE_TB,
576 .cpu_user_features = COMMON_PPC,
577 .icache_bsize = 32, 663 .icache_bsize = 32,
578 .dcache_bsize = 32, 664 .dcache_bsize = 32,
579 .cpu_setup = __setup_cpu_603 665 .cpu_setup = __setup_cpu_603
@@ -582,10 +668,8 @@ struct cpu_spec cpu_specs[] = {
582 .pvr_mask = 0x7fff0000, 668 .pvr_mask = 0x7fff0000,
583 .pvr_value = 0x00820000, 669 .pvr_value = 0x00820000,
584 .cpu_name = "G2_LE", 670 .cpu_name = "G2_LE",
585 .cpu_features = CPU_FTR_SPLIT_ID_CACHE | 671 .cpu_features = CPU_FTRS_G2_LE,
586 CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | 672 .cpu_user_features = COMMON_USER,
587 CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_HAS_HIGH_BATS,
588 .cpu_user_features = COMMON_PPC,
589 .icache_bsize = 32, 673 .icache_bsize = 32,
590 .dcache_bsize = 32, 674 .dcache_bsize = 32,
591 .cpu_setup = __setup_cpu_603 675 .cpu_setup = __setup_cpu_603
@@ -594,10 +678,8 @@ struct cpu_spec cpu_specs[] = {
594 .pvr_mask = 0x7fff0000, 678 .pvr_mask = 0x7fff0000,
595 .pvr_value = 0x00830000, 679 .pvr_value = 0x00830000,
596 .cpu_name = "e300", 680 .cpu_name = "e300",
597 .cpu_features = CPU_FTR_SPLIT_ID_CACHE | 681 .cpu_features = CPU_FTRS_E300,
598 CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | 682 .cpu_user_features = COMMON_USER,
599 CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_HAS_HIGH_BATS,
600 .cpu_user_features = COMMON_PPC,
601 .icache_bsize = 32, 683 .icache_bsize = 32,
602 .dcache_bsize = 32, 684 .dcache_bsize = 32,
603 .cpu_setup = __setup_cpu_603 685 .cpu_setup = __setup_cpu_603
@@ -606,114 +688,12 @@ struct cpu_spec cpu_specs[] = {
606 .pvr_mask = 0x00000000, 688 .pvr_mask = 0x00000000,
607 .pvr_value = 0x00000000, 689 .pvr_value = 0x00000000,
608 .cpu_name = "(generic PPC)", 690 .cpu_name = "(generic PPC)",
609 .cpu_features = CPU_FTR_COMMON | 691 .cpu_features = CPU_FTRS_CLASSIC32,
610 CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | 692 .cpu_user_features = COMMON_USER,
611 CPU_FTR_HPTE_TABLE,
612 .cpu_user_features = COMMON_PPC,
613 .icache_bsize = 32, 693 .icache_bsize = 32,
614 .dcache_bsize = 32, 694 .dcache_bsize = 32,
615 .cpu_setup = __setup_cpu_generic
616 }, 695 },
617#endif /* CLASSIC_PPC */ 696#endif /* CLASSIC_PPC */
618#ifdef CONFIG_PPC64BRIDGE
619 { /* Power3 */
620 .pvr_mask = 0xffff0000,
621 .pvr_value = 0x00400000,
622 .cpu_name = "Power3 (630)",
623 .cpu_features = CPU_FTR_COMMON |
624 CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB |
625 CPU_FTR_HPTE_TABLE,
626 .cpu_user_features = COMMON_PPC | PPC_FEATURE_64,
627 .icache_bsize = 128,
628 .dcache_bsize = 128,
629 .num_pmcs = 8,
630 .cpu_setup = __setup_cpu_power3
631 },
632 { /* Power3+ */
633 .pvr_mask = 0xffff0000,
634 .pvr_value = 0x00410000,
635 .cpu_name = "Power3 (630+)",
636 .cpu_features = CPU_FTR_COMMON |
637 CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB |
638 CPU_FTR_HPTE_TABLE,
639 .cpu_user_features = COMMON_PPC | PPC_FEATURE_64,
640 .icache_bsize = 128,
641 .dcache_bsize = 128,
642 .num_pmcs = 8,
643 .cpu_setup = __setup_cpu_power3
644 },
645 { /* I-star */
646 .pvr_mask = 0xffff0000,
647 .pvr_value = 0x00360000,
648 .cpu_name = "I-star",
649 .cpu_features = CPU_FTR_COMMON |
650 CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB |
651 CPU_FTR_HPTE_TABLE,
652 .cpu_user_features = COMMON_PPC | PPC_FEATURE_64,
653 .icache_bsize = 128,
654 .dcache_bsize = 128,
655 .num_pmcs = 8,
656 .cpu_setup = __setup_cpu_power3
657 },
658 { /* S-star */
659 .pvr_mask = 0xffff0000,
660 .pvr_value = 0x00370000,
661 .cpu_name = "S-star",
662 .cpu_features = CPU_FTR_COMMON |
663 CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB |
664 CPU_FTR_HPTE_TABLE,
665 .cpu_user_features = COMMON_PPC | PPC_FEATURE_64,
666 .icache_bsize = 128,
667 .dcache_bsize = 128,
668 .num_pmcs = 8,
669 .cpu_setup = __setup_cpu_power3
670 },
671#endif /* CONFIG_PPC64BRIDGE */
672#ifdef CONFIG_POWER4
673 { /* Power4 */
674 .pvr_mask = 0xffff0000,
675 .pvr_value = 0x00350000,
676 .cpu_name = "Power4",
677 .cpu_features = CPU_FTR_COMMON |
678 CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB |
679 CPU_FTR_HPTE_TABLE,
680 .cpu_user_features = COMMON_PPC | PPC_FEATURE_64,
681 .icache_bsize = 128,
682 .dcache_bsize = 128,
683 .num_pmcs = 8,
684 .cpu_setup = __setup_cpu_power4
685 },
686 { /* PPC970 */
687 .pvr_mask = 0xffff0000,
688 .pvr_value = 0x00390000,
689 .cpu_name = "PPC970",
690 .cpu_features = CPU_FTR_COMMON |
691 CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB |
692 CPU_FTR_HPTE_TABLE |
693 CPU_FTR_ALTIVEC_COMP | CPU_FTR_MAYBE_CAN_NAP,
694 .cpu_user_features = COMMON_PPC | PPC_FEATURE_64 |
695 PPC_FEATURE_ALTIVEC_COMP,
696 .icache_bsize = 128,
697 .dcache_bsize = 128,
698 .num_pmcs = 8,
699 .cpu_setup = __setup_cpu_ppc970
700 },
701 { /* PPC970FX */
702 .pvr_mask = 0xffff0000,
703 .pvr_value = 0x003c0000,
704 .cpu_name = "PPC970FX",
705 .cpu_features = CPU_FTR_COMMON |
706 CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB |
707 CPU_FTR_HPTE_TABLE |
708 CPU_FTR_ALTIVEC_COMP | CPU_FTR_MAYBE_CAN_NAP,
709 .cpu_user_features = COMMON_PPC | PPC_FEATURE_64 |
710 PPC_FEATURE_ALTIVEC_COMP,
711 .icache_bsize = 128,
712 .dcache_bsize = 128,
713 .num_pmcs = 8,
714 .cpu_setup = __setup_cpu_ppc970
715 },
716#endif /* CONFIG_POWER4 */
717#ifdef CONFIG_8xx 697#ifdef CONFIG_8xx
718 { /* 8xx */ 698 { /* 8xx */
719 .pvr_mask = 0xffff0000, 699 .pvr_mask = 0xffff0000,
@@ -721,8 +701,7 @@ struct cpu_spec cpu_specs[] = {
721 .cpu_name = "8xx", 701 .cpu_name = "8xx",
722 /* CPU_FTR_MAYBE_CAN_DOZE is possible, 702 /* CPU_FTR_MAYBE_CAN_DOZE is possible,
723 * if the 8xx code is there.... */ 703 * if the 8xx code is there.... */
724 .cpu_features = CPU_FTR_SPLIT_ID_CACHE | 704 .cpu_features = CPU_FTRS_8XX,
725 CPU_FTR_USE_TB,
726 .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU, 705 .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU,
727 .icache_bsize = 16, 706 .icache_bsize = 16,
728 .dcache_bsize = 16, 707 .dcache_bsize = 16,
@@ -733,8 +712,7 @@ struct cpu_spec cpu_specs[] = {
733 .pvr_mask = 0xffffff00, 712 .pvr_mask = 0xffffff00,
734 .pvr_value = 0x00200200, 713 .pvr_value = 0x00200200,
735 .cpu_name = "403GC", 714 .cpu_name = "403GC",
736 .cpu_features = CPU_FTR_SPLIT_ID_CACHE | 715 .cpu_features = CPU_FTRS_40X,
737 CPU_FTR_USE_TB,
738 .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU, 716 .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU,
739 .icache_bsize = 16, 717 .icache_bsize = 16,
740 .dcache_bsize = 16, 718 .dcache_bsize = 16,
@@ -743,8 +721,7 @@ struct cpu_spec cpu_specs[] = {
743 .pvr_mask = 0xffffff00, 721 .pvr_mask = 0xffffff00,
744 .pvr_value = 0x00201400, 722 .pvr_value = 0x00201400,
745 .cpu_name = "403GCX", 723 .cpu_name = "403GCX",
746 .cpu_features = CPU_FTR_SPLIT_ID_CACHE | 724 .cpu_features = CPU_FTRS_40X,
747 CPU_FTR_USE_TB,
748 .cpu_user_features = PPC_FEATURE_32 | 725 .cpu_user_features = PPC_FEATURE_32 |
749 PPC_FEATURE_HAS_MMU | PPC_FEATURE_NO_TB, 726 PPC_FEATURE_HAS_MMU | PPC_FEATURE_NO_TB,
750 .icache_bsize = 16, 727 .icache_bsize = 16,
@@ -754,8 +731,7 @@ struct cpu_spec cpu_specs[] = {
754 .pvr_mask = 0xffff0000, 731 .pvr_mask = 0xffff0000,
755 .pvr_value = 0x00200000, 732 .pvr_value = 0x00200000,
756 .cpu_name = "403G ??", 733 .cpu_name = "403G ??",
757 .cpu_features = CPU_FTR_SPLIT_ID_CACHE | 734 .cpu_features = CPU_FTRS_40X,
758 CPU_FTR_USE_TB,
759 .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU, 735 .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU,
760 .icache_bsize = 16, 736 .icache_bsize = 16,
761 .dcache_bsize = 16, 737 .dcache_bsize = 16,
@@ -764,8 +740,7 @@ struct cpu_spec cpu_specs[] = {
764 .pvr_mask = 0xffff0000, 740 .pvr_mask = 0xffff0000,
765 .pvr_value = 0x40110000, 741 .pvr_value = 0x40110000,
766 .cpu_name = "405GP", 742 .cpu_name = "405GP",
767 .cpu_features = CPU_FTR_SPLIT_ID_CACHE | 743 .cpu_features = CPU_FTRS_40X,
768 CPU_FTR_USE_TB,
769 .cpu_user_features = PPC_FEATURE_32 | 744 .cpu_user_features = PPC_FEATURE_32 |
770 PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC, 745 PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC,
771 .icache_bsize = 32, 746 .icache_bsize = 32,
@@ -775,8 +750,7 @@ struct cpu_spec cpu_specs[] = {
775 .pvr_mask = 0xffff0000, 750 .pvr_mask = 0xffff0000,
776 .pvr_value = 0x40130000, 751 .pvr_value = 0x40130000,
777 .cpu_name = "STB03xxx", 752 .cpu_name = "STB03xxx",
778 .cpu_features = CPU_FTR_SPLIT_ID_CACHE | 753 .cpu_features = CPU_FTRS_40X,
779 CPU_FTR_USE_TB,
780 .cpu_user_features = PPC_FEATURE_32 | 754 .cpu_user_features = PPC_FEATURE_32 |
781 PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC, 755 PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC,
782 .icache_bsize = 32, 756 .icache_bsize = 32,
@@ -786,8 +760,7 @@ struct cpu_spec cpu_specs[] = {
786 .pvr_mask = 0xffff0000, 760 .pvr_mask = 0xffff0000,
787 .pvr_value = 0x41810000, 761 .pvr_value = 0x41810000,
788 .cpu_name = "STB04xxx", 762 .cpu_name = "STB04xxx",
789 .cpu_features = CPU_FTR_SPLIT_ID_CACHE | 763 .cpu_features = CPU_FTRS_40X,
790 CPU_FTR_USE_TB,
791 .cpu_user_features = PPC_FEATURE_32 | 764 .cpu_user_features = PPC_FEATURE_32 |
792 PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC, 765 PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC,
793 .icache_bsize = 32, 766 .icache_bsize = 32,
@@ -797,8 +770,7 @@ struct cpu_spec cpu_specs[] = {
797 .pvr_mask = 0xffff0000, 770 .pvr_mask = 0xffff0000,
798 .pvr_value = 0x41610000, 771 .pvr_value = 0x41610000,
799 .cpu_name = "NP405L", 772 .cpu_name = "NP405L",
800 .cpu_features = CPU_FTR_SPLIT_ID_CACHE | 773 .cpu_features = CPU_FTRS_40X,
801 CPU_FTR_USE_TB,
802 .cpu_user_features = PPC_FEATURE_32 | 774 .cpu_user_features = PPC_FEATURE_32 |
803 PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC, 775 PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC,
804 .icache_bsize = 32, 776 .icache_bsize = 32,
@@ -808,8 +780,7 @@ struct cpu_spec cpu_specs[] = {
808 .pvr_mask = 0xffff0000, 780 .pvr_mask = 0xffff0000,
809 .pvr_value = 0x40B10000, 781 .pvr_value = 0x40B10000,
810 .cpu_name = "NP4GS3", 782 .cpu_name = "NP4GS3",
811 .cpu_features = CPU_FTR_SPLIT_ID_CACHE | 783 .cpu_features = CPU_FTRS_40X,
812 CPU_FTR_USE_TB,
813 .cpu_user_features = PPC_FEATURE_32 | 784 .cpu_user_features = PPC_FEATURE_32 |
814 PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC, 785 PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC,
815 .icache_bsize = 32, 786 .icache_bsize = 32,
@@ -819,8 +790,7 @@ struct cpu_spec cpu_specs[] = {
819 .pvr_mask = 0xffff0000, 790 .pvr_mask = 0xffff0000,
820 .pvr_value = 0x41410000, 791 .pvr_value = 0x41410000,
821 .cpu_name = "NP405H", 792 .cpu_name = "NP405H",
822 .cpu_features = CPU_FTR_SPLIT_ID_CACHE | 793 .cpu_features = CPU_FTRS_40X,
823 CPU_FTR_USE_TB,
824 .cpu_user_features = PPC_FEATURE_32 | 794 .cpu_user_features = PPC_FEATURE_32 |
825 PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC, 795 PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC,
826 .icache_bsize = 32, 796 .icache_bsize = 32,
@@ -830,8 +800,7 @@ struct cpu_spec cpu_specs[] = {
830 .pvr_mask = 0xffff0000, 800 .pvr_mask = 0xffff0000,
831 .pvr_value = 0x50910000, 801 .pvr_value = 0x50910000,
832 .cpu_name = "405GPr", 802 .cpu_name = "405GPr",
833 .cpu_features = CPU_FTR_SPLIT_ID_CACHE | 803 .cpu_features = CPU_FTRS_40X,
834 CPU_FTR_USE_TB,
835 .cpu_user_features = PPC_FEATURE_32 | 804 .cpu_user_features = PPC_FEATURE_32 |
836 PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC, 805 PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC,
837 .icache_bsize = 32, 806 .icache_bsize = 32,
@@ -841,8 +810,7 @@ struct cpu_spec cpu_specs[] = {
841 .pvr_mask = 0xffff0000, 810 .pvr_mask = 0xffff0000,
842 .pvr_value = 0x51510000, 811 .pvr_value = 0x51510000,
843 .cpu_name = "STBx25xx", 812 .cpu_name = "STBx25xx",
844 .cpu_features = CPU_FTR_SPLIT_ID_CACHE | 813 .cpu_features = CPU_FTRS_40X,
845 CPU_FTR_USE_TB,
846 .cpu_user_features = PPC_FEATURE_32 | 814 .cpu_user_features = PPC_FEATURE_32 |
847 PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC, 815 PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC,
848 .icache_bsize = 32, 816 .icache_bsize = 32,
@@ -852,8 +820,7 @@ struct cpu_spec cpu_specs[] = {
852 .pvr_mask = 0xffff0000, 820 .pvr_mask = 0xffff0000,
853 .pvr_value = 0x41F10000, 821 .pvr_value = 0x41F10000,
854 .cpu_name = "405LP", 822 .cpu_name = "405LP",
855 .cpu_features = CPU_FTR_SPLIT_ID_CACHE | 823 .cpu_features = CPU_FTRS_40X,
856 CPU_FTR_USE_TB,
857 .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU, 824 .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU,
858 .icache_bsize = 32, 825 .icache_bsize = 32,
859 .dcache_bsize = 32, 826 .dcache_bsize = 32,
@@ -862,8 +829,7 @@ struct cpu_spec cpu_specs[] = {
862 .pvr_mask = 0xffff0000, 829 .pvr_mask = 0xffff0000,
863 .pvr_value = 0x20010000, 830 .pvr_value = 0x20010000,
864 .cpu_name = "Virtex-II Pro", 831 .cpu_name = "Virtex-II Pro",
865 .cpu_features = CPU_FTR_SPLIT_ID_CACHE | 832 .cpu_features = CPU_FTRS_40X,
866 CPU_FTR_USE_TB,
867 .cpu_user_features = PPC_FEATURE_32 | 833 .cpu_user_features = PPC_FEATURE_32 |
868 PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC, 834 PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC,
869 .icache_bsize = 32, 835 .icache_bsize = 32,
@@ -873,8 +839,7 @@ struct cpu_spec cpu_specs[] = {
873 .pvr_mask = 0xffff0000, 839 .pvr_mask = 0xffff0000,
874 .pvr_value = 0x51210000, 840 .pvr_value = 0x51210000,
875 .cpu_name = "405EP", 841 .cpu_name = "405EP",
876 .cpu_features = CPU_FTR_SPLIT_ID_CACHE | 842 .cpu_features = CPU_FTRS_40X,
877 CPU_FTR_USE_TB,
878 .cpu_user_features = PPC_FEATURE_32 | 843 .cpu_user_features = PPC_FEATURE_32 |
879 PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC, 844 PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC,
880 .icache_bsize = 32, 845 .icache_bsize = 32,
@@ -887,9 +852,8 @@ struct cpu_spec cpu_specs[] = {
887 .pvr_mask = 0xf0000fff, 852 .pvr_mask = 0xf0000fff,
888 .pvr_value = 0x40000850, 853 .pvr_value = 0x40000850,
889 .cpu_name = "440EP Rev. A", 854 .cpu_name = "440EP Rev. A",
890 .cpu_features = CPU_FTR_SPLIT_ID_CACHE | 855 .cpu_features = CPU_FTRS_44X,
891 CPU_FTR_USE_TB, 856 .cpu_user_features = COMMON_USER, /* 440EP has an FPU */
892 .cpu_user_features = COMMON_PPC, /* 440EP has an FPU */
893 .icache_bsize = 32, 857 .icache_bsize = 32,
894 .dcache_bsize = 32, 858 .dcache_bsize = 32,
895 }, 859 },
@@ -897,28 +861,25 @@ struct cpu_spec cpu_specs[] = {
897 .pvr_mask = 0xf0000fff, 861 .pvr_mask = 0xf0000fff,
898 .pvr_value = 0x400008d3, 862 .pvr_value = 0x400008d3,
899 .cpu_name = "440EP Rev. B", 863 .cpu_name = "440EP Rev. B",
900 .cpu_features = CPU_FTR_SPLIT_ID_CACHE | 864 .cpu_features = CPU_FTRS_44X,
901 CPU_FTR_USE_TB, 865 .cpu_user_features = COMMON_USER, /* 440EP has an FPU */
902 .cpu_user_features = COMMON_PPC, /* 440EP has an FPU */
903 .icache_bsize = 32, 866 .icache_bsize = 32,
904 .dcache_bsize = 32, 867 .dcache_bsize = 32,
905 }, 868 },
906 { /* 440GP Rev. B */ 869 { /* 440GP Rev. B */
907 .pvr_mask = 0xf0000fff, 870 .pvr_mask = 0xf0000fff,
908 .pvr_value = 0x40000440, 871 .pvr_value = 0x40000440,
909 .cpu_name = "440GP Rev. B", 872 .cpu_name = "440GP Rev. B",
910 .cpu_features = CPU_FTR_SPLIT_ID_CACHE | 873 .cpu_features = CPU_FTRS_44X,
911 CPU_FTR_USE_TB,
912 .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU, 874 .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU,
913 .icache_bsize = 32, 875 .icache_bsize = 32,
914 .dcache_bsize = 32, 876 .dcache_bsize = 32,
915 }, 877 },
916 { /* 440GP Rev. C */ 878 { /* 440GP Rev. C */
917 .pvr_mask = 0xf0000fff, 879 .pvr_mask = 0xf0000fff,
918 .pvr_value = 0x40000481, 880 .pvr_value = 0x40000481,
919 .cpu_name = "440GP Rev. C", 881 .cpu_name = "440GP Rev. C",
920 .cpu_features = CPU_FTR_SPLIT_ID_CACHE | 882 .cpu_features = CPU_FTRS_44X,
921 CPU_FTR_USE_TB,
922 .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU, 883 .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU,
923 .icache_bsize = 32, 884 .icache_bsize = 32,
924 .dcache_bsize = 32, 885 .dcache_bsize = 32,
@@ -927,8 +888,7 @@ struct cpu_spec cpu_specs[] = {
927 .pvr_mask = 0xf0000fff, 888 .pvr_mask = 0xf0000fff,
928 .pvr_value = 0x50000850, 889 .pvr_value = 0x50000850,
929 .cpu_name = "440GX Rev. A", 890 .cpu_name = "440GX Rev. A",
930 .cpu_features = CPU_FTR_SPLIT_ID_CACHE | 891 .cpu_features = CPU_FTRS_44X,
931 CPU_FTR_USE_TB,
932 .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU, 892 .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU,
933 .icache_bsize = 32, 893 .icache_bsize = 32,
934 .dcache_bsize = 32, 894 .dcache_bsize = 32,
@@ -937,8 +897,7 @@ struct cpu_spec cpu_specs[] = {
937 .pvr_mask = 0xf0000fff, 897 .pvr_mask = 0xf0000fff,
938 .pvr_value = 0x50000851, 898 .pvr_value = 0x50000851,
939 .cpu_name = "440GX Rev. B", 899 .cpu_name = "440GX Rev. B",
940 .cpu_features = CPU_FTR_SPLIT_ID_CACHE | 900 .cpu_features = CPU_FTRS_44X,
941 CPU_FTR_USE_TB,
942 .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU, 901 .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU,
943 .icache_bsize = 32, 902 .icache_bsize = 32,
944 .dcache_bsize = 32, 903 .dcache_bsize = 32,
@@ -947,8 +906,7 @@ struct cpu_spec cpu_specs[] = {
947 .pvr_mask = 0xf0000fff, 906 .pvr_mask = 0xf0000fff,
948 .pvr_value = 0x50000892, 907 .pvr_value = 0x50000892,
949 .cpu_name = "440GX Rev. C", 908 .cpu_name = "440GX Rev. C",
950 .cpu_features = CPU_FTR_SPLIT_ID_CACHE | 909 .cpu_features = CPU_FTRS_44X,
951 CPU_FTR_USE_TB,
952 .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU, 910 .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU,
953 .icache_bsize = 32, 911 .icache_bsize = 32,
954 .dcache_bsize = 32, 912 .dcache_bsize = 32,
@@ -957,8 +915,7 @@ struct cpu_spec cpu_specs[] = {
957 .pvr_mask = 0xf0000fff, 915 .pvr_mask = 0xf0000fff,
958 .pvr_value = 0x50000894, 916 .pvr_value = 0x50000894,
959 .cpu_name = "440GX Rev. F", 917 .cpu_name = "440GX Rev. F",
960 .cpu_features = CPU_FTR_SPLIT_ID_CACHE | 918 .cpu_features = CPU_FTRS_44X,
961 CPU_FTR_USE_TB,
962 .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU, 919 .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU,
963 .icache_bsize = 32, 920 .icache_bsize = 32,
964 .dcache_bsize = 32, 921 .dcache_bsize = 32,
@@ -967,44 +924,42 @@ struct cpu_spec cpu_specs[] = {
967 .pvr_mask = 0xff000fff, 924 .pvr_mask = 0xff000fff,
968 .pvr_value = 0x53000891, 925 .pvr_value = 0x53000891,
969 .cpu_name = "440SP Rev. A", 926 .cpu_name = "440SP Rev. A",
970 .cpu_features = CPU_FTR_SPLIT_ID_CACHE | 927 .cpu_features = CPU_FTRS_44X,
971 CPU_FTR_USE_TB,
972 .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU, 928 .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU,
973 .icache_bsize = 32, 929 .icache_bsize = 32,
974 .dcache_bsize = 32, 930 .dcache_bsize = 32,
975 }, 931 },
976#endif /* CONFIG_44x */ 932#endif /* CONFIG_44x */
977#ifdef CONFIG_FSL_BOOKE 933#ifdef CONFIG_FSL_BOOKE
978 { /* e200z5 */ 934 { /* e200z5 */
979 .pvr_mask = 0xfff00000, 935 .pvr_mask = 0xfff00000,
980 .pvr_value = 0x81000000, 936 .pvr_value = 0x81000000,
981 .cpu_name = "e200z5", 937 .cpu_name = "e200z5",
982 /* xxx - galak: add CPU_FTR_MAYBE_CAN_DOZE */ 938 /* xxx - galak: add CPU_FTR_MAYBE_CAN_DOZE */
983 .cpu_features = CPU_FTR_USE_TB, 939 .cpu_features = CPU_FTRS_E200,
984 .cpu_user_features = PPC_FEATURE_32 | 940 .cpu_user_features = PPC_FEATURE_32 |
985 PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_EFP_SINGLE | 941 PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_EFP_SINGLE |
986 PPC_FEATURE_UNIFIED_CACHE, 942 PPC_FEATURE_UNIFIED_CACHE,
987 .dcache_bsize = 32, 943 .dcache_bsize = 32,
988 }, 944 },
989 { /* e200z6 */ 945 { /* e200z6 */
990 .pvr_mask = 0xfff00000, 946 .pvr_mask = 0xfff00000,
991 .pvr_value = 0x81100000, 947 .pvr_value = 0x81100000,
992 .cpu_name = "e200z6", 948 .cpu_name = "e200z6",
993 /* xxx - galak: add CPU_FTR_MAYBE_CAN_DOZE */ 949 /* xxx - galak: add CPU_FTR_MAYBE_CAN_DOZE */
994 .cpu_features = CPU_FTR_USE_TB, 950 .cpu_features = CPU_FTRS_E200,
995 .cpu_user_features = PPC_FEATURE_32 | 951 .cpu_user_features = PPC_FEATURE_32 |
996 PPC_FEATURE_HAS_MMU | PPC_FEATURE_SPE_COMP | 952 PPC_FEATURE_HAS_MMU | PPC_FEATURE_SPE_COMP |
997 PPC_FEATURE_HAS_EFP_SINGLE | 953 PPC_FEATURE_HAS_EFP_SINGLE |
998 PPC_FEATURE_UNIFIED_CACHE, 954 PPC_FEATURE_UNIFIED_CACHE,
999 .dcache_bsize = 32, 955 .dcache_bsize = 32,
1000 }, 956 },
1001 { /* e500 */ 957 { /* e500 */
1002 .pvr_mask = 0xffff0000, 958 .pvr_mask = 0xffff0000,
1003 .pvr_value = 0x80200000, 959 .pvr_value = 0x80200000,
1004 .cpu_name = "e500", 960 .cpu_name = "e500",
1005 /* xxx - galak: add CPU_FTR_MAYBE_CAN_DOZE */ 961 /* xxx - galak: add CPU_FTR_MAYBE_CAN_DOZE */
1006 .cpu_features = CPU_FTR_SPLIT_ID_CACHE | 962 .cpu_features = CPU_FTRS_E500,
1007 CPU_FTR_USE_TB,
1008 .cpu_user_features = PPC_FEATURE_32 | 963 .cpu_user_features = PPC_FEATURE_32 |
1009 PPC_FEATURE_HAS_MMU | PPC_FEATURE_SPE_COMP | 964 PPC_FEATURE_HAS_MMU | PPC_FEATURE_SPE_COMP |
1010 PPC_FEATURE_HAS_EFP_SINGLE, 965 PPC_FEATURE_HAS_EFP_SINGLE,
@@ -1012,13 +967,12 @@ struct cpu_spec cpu_specs[] = {
1012 .dcache_bsize = 32, 967 .dcache_bsize = 32,
1013 .num_pmcs = 4, 968 .num_pmcs = 4,
1014 }, 969 },
1015 { /* e500v2 */ 970 { /* e500v2 */
1016 .pvr_mask = 0xffff0000, 971 .pvr_mask = 0xffff0000,
1017 .pvr_value = 0x80210000, 972 .pvr_value = 0x80210000,
1018 .cpu_name = "e500v2", 973 .cpu_name = "e500v2",
1019 /* xxx - galak: add CPU_FTR_MAYBE_CAN_DOZE */ 974 /* xxx - galak: add CPU_FTR_MAYBE_CAN_DOZE */
1020 .cpu_features = CPU_FTR_SPLIT_ID_CACHE | 975 .cpu_features = CPU_FTRS_E500_2,
1021 CPU_FTR_USE_TB | CPU_FTR_BIG_PHYS,
1022 .cpu_user_features = PPC_FEATURE_32 | 976 .cpu_user_features = PPC_FEATURE_32 |
1023 PPC_FEATURE_HAS_MMU | PPC_FEATURE_SPE_COMP | 977 PPC_FEATURE_HAS_MMU | PPC_FEATURE_SPE_COMP |
1024 PPC_FEATURE_HAS_EFP_SINGLE | PPC_FEATURE_HAS_EFP_DOUBLE, 978 PPC_FEATURE_HAS_EFP_SINGLE | PPC_FEATURE_HAS_EFP_DOUBLE,
@@ -1032,10 +986,11 @@ struct cpu_spec cpu_specs[] = {
1032 .pvr_mask = 0x00000000, 986 .pvr_mask = 0x00000000,
1033 .pvr_value = 0x00000000, 987 .pvr_value = 0x00000000,
1034 .cpu_name = "(generic PPC)", 988 .cpu_name = "(generic PPC)",
1035 .cpu_features = CPU_FTR_COMMON, 989 .cpu_features = CPU_FTRS_GENERIC_32,
1036 .cpu_user_features = PPC_FEATURE_32, 990 .cpu_user_features = PPC_FEATURE_32,
1037 .icache_bsize = 32, 991 .icache_bsize = 32,
1038 .dcache_bsize = 32, 992 .dcache_bsize = 32,
1039 } 993 }
1040#endif /* !CLASSIC_PPC */ 994#endif /* !CLASSIC_PPC */
995#endif /* CONFIG_PPC32 */
1041}; 996};
diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S
new file mode 100644
index 000000000000..2e99ae41723c
--- /dev/null
+++ b/arch/powerpc/kernel/entry_32.S
@@ -0,0 +1,1000 @@
1/*
2 * PowerPC version
3 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
4 * Rewritten by Cort Dougan (cort@fsmlabs.com) for PReP
5 * Copyright (C) 1996 Cort Dougan <cort@fsmlabs.com>
6 * Adapted for Power Macintosh by Paul Mackerras.
7 * Low-level exception handlers and MMU support
8 * rewritten by Paul Mackerras.
9 * Copyright (C) 1996 Paul Mackerras.
10 * MPC8xx modifications Copyright (C) 1997 Dan Malek (dmalek@jlc.net).
11 *
12 * This file contains the system call entry code, context switch
13 * code, and exception/interrupt return code for PowerPC.
14 *
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License
17 * as published by the Free Software Foundation; either version
18 * 2 of the License, or (at your option) any later version.
19 *
20 */
21
22#include <linux/config.h>
23#include <linux/errno.h>
24#include <linux/sys.h>
25#include <linux/threads.h>
26#include <asm/reg.h>
27#include <asm/page.h>
28#include <asm/mmu.h>
29#include <asm/cputable.h>
30#include <asm/thread_info.h>
31#include <asm/ppc_asm.h>
32#include <asm/asm-offsets.h>
33#include <asm/unistd.h>
34
35#undef SHOW_SYSCALLS
36#undef SHOW_SYSCALLS_TASK
37
38/*
39 * MSR_KERNEL is > 0x10000 on 4xx/Book-E since it include MSR_CE.
40 */
41#if MSR_KERNEL >= 0x10000
42#define LOAD_MSR_KERNEL(r, x) lis r,(x)@h; ori r,r,(x)@l
43#else
44#define LOAD_MSR_KERNEL(r, x) li r,(x)
45#endif
46
47#ifdef CONFIG_BOOKE
48#include "head_booke.h"
49#define TRANSFER_TO_HANDLER_EXC_LEVEL(exc_level) \
50 mtspr exc_level##_SPRG,r8; \
51 BOOKE_LOAD_EXC_LEVEL_STACK(exc_level); \
52 lwz r0,GPR10-INT_FRAME_SIZE(r8); \
53 stw r0,GPR10(r11); \
54 lwz r0,GPR11-INT_FRAME_SIZE(r8); \
55 stw r0,GPR11(r11); \
56 mfspr r8,exc_level##_SPRG
57
58 .globl mcheck_transfer_to_handler
59mcheck_transfer_to_handler:
60 TRANSFER_TO_HANDLER_EXC_LEVEL(MCHECK)
61 b transfer_to_handler_full
62
63 .globl debug_transfer_to_handler
64debug_transfer_to_handler:
65 TRANSFER_TO_HANDLER_EXC_LEVEL(DEBUG)
66 b transfer_to_handler_full
67
68 .globl crit_transfer_to_handler
69crit_transfer_to_handler:
70 TRANSFER_TO_HANDLER_EXC_LEVEL(CRIT)
71 /* fall through */
72#endif
73
74#ifdef CONFIG_40x
75 .globl crit_transfer_to_handler
76crit_transfer_to_handler:
77 lwz r0,crit_r10@l(0)
78 stw r0,GPR10(r11)
79 lwz r0,crit_r11@l(0)
80 stw r0,GPR11(r11)
81 /* fall through */
82#endif
83
84/*
85 * This code finishes saving the registers to the exception frame
86 * and jumps to the appropriate handler for the exception, turning
87 * on address translation.
88 * Note that we rely on the caller having set cr0.eq iff the exception
89 * occurred in kernel mode (i.e. MSR:PR = 0).
90 */
91 .globl transfer_to_handler_full
92transfer_to_handler_full:
93 SAVE_NVGPRS(r11)
94 /* fall through */
95
96 .globl transfer_to_handler
97transfer_to_handler:
98 stw r2,GPR2(r11)
99 stw r12,_NIP(r11)
100 stw r9,_MSR(r11)
101 andi. r2,r9,MSR_PR
102 mfctr r12
103 mfspr r2,SPRN_XER
104 stw r12,_CTR(r11)
105 stw r2,_XER(r11)
106 mfspr r12,SPRN_SPRG3
107 addi r2,r12,-THREAD
108 tovirt(r2,r2) /* set r2 to current */
109 beq 2f /* if from user, fix up THREAD.regs */
110 addi r11,r1,STACK_FRAME_OVERHEAD
111 stw r11,PT_REGS(r12)
112#if defined(CONFIG_40x) || defined(CONFIG_BOOKE)
113 /* Check to see if the dbcr0 register is set up to debug. Use the
114 single-step bit to do this. */
115 lwz r12,THREAD_DBCR0(r12)
116 andis. r12,r12,DBCR0_IC@h
117 beq+ 3f
118 /* From user and task is ptraced - load up global dbcr0 */
119 li r12,-1 /* clear all pending debug events */
120 mtspr SPRN_DBSR,r12
121 lis r11,global_dbcr0@ha
122 tophys(r11,r11)
123 addi r11,r11,global_dbcr0@l
124 lwz r12,0(r11)
125 mtspr SPRN_DBCR0,r12
126 lwz r12,4(r11)
127 addi r12,r12,-1
128 stw r12,4(r11)
129#endif
130 b 3f
1312: /* if from kernel, check interrupted DOZE/NAP mode and
132 * check for stack overflow
133 */
134#ifdef CONFIG_6xx
135 mfspr r11,SPRN_HID0
136 mtcr r11
137BEGIN_FTR_SECTION
138 bt- 8,power_save_6xx_restore /* Check DOZE */
139END_FTR_SECTION_IFSET(CPU_FTR_CAN_DOZE)
140BEGIN_FTR_SECTION
141 bt- 9,power_save_6xx_restore /* Check NAP */
142END_FTR_SECTION_IFSET(CPU_FTR_CAN_NAP)
143#endif /* CONFIG_6xx */
144 .globl transfer_to_handler_cont
145transfer_to_handler_cont:
146 lwz r11,THREAD_INFO-THREAD(r12)
147 cmplw r1,r11 /* if r1 <= current->thread_info */
148 ble- stack_ovf /* then the kernel stack overflowed */
1493:
150 mflr r9
151 lwz r11,0(r9) /* virtual address of handler */
152 lwz r9,4(r9) /* where to go when done */
153 FIX_SRR1(r10,r12)
154 mtspr SPRN_SRR0,r11
155 mtspr SPRN_SRR1,r10
156 mtlr r9
157 SYNC
158 RFI /* jump to handler, enable MMU */
159
160/*
161 * On kernel stack overflow, load up an initial stack pointer
162 * and call StackOverflow(regs), which should not return.
163 */
164stack_ovf:
165 /* sometimes we use a statically-allocated stack, which is OK. */
166 lis r11,_end@h
167 ori r11,r11,_end@l
168 cmplw r1,r11
169 ble 3b /* r1 <= &_end is OK */
170 SAVE_NVGPRS(r11)
171 addi r3,r1,STACK_FRAME_OVERHEAD
172 lis r1,init_thread_union@ha
173 addi r1,r1,init_thread_union@l
174 addi r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD
175 lis r9,StackOverflow@ha
176 addi r9,r9,StackOverflow@l
177 LOAD_MSR_KERNEL(r10,MSR_KERNEL)
178 FIX_SRR1(r10,r12)
179 mtspr SPRN_SRR0,r9
180 mtspr SPRN_SRR1,r10
181 SYNC
182 RFI
183
184/*
185 * Handle a system call.
186 */
187 .stabs "arch/powerpc/kernel/",N_SO,0,0,0f
188 .stabs "entry_32.S",N_SO,0,0,0f
1890:
190
191_GLOBAL(DoSyscall)
192 stw r0,THREAD+LAST_SYSCALL(r2)
193 stw r3,ORIG_GPR3(r1)
194 li r12,0
195 stw r12,RESULT(r1)
196 lwz r11,_CCR(r1) /* Clear SO bit in CR */
197 rlwinm r11,r11,0,4,2
198 stw r11,_CCR(r1)
199#ifdef SHOW_SYSCALLS
200 bl do_show_syscall
201#endif /* SHOW_SYSCALLS */
202 rlwinm r10,r1,0,0,(31-THREAD_SHIFT) /* current_thread_info() */
203 li r11,0
204 stb r11,TI_SC_NOERR(r10)
205 lwz r11,TI_FLAGS(r10)
206 andi. r11,r11,_TIF_SYSCALL_T_OR_A
207 bne- syscall_dotrace
208syscall_dotrace_cont:
209 cmplwi 0,r0,NR_syscalls
210 lis r10,sys_call_table@h
211 ori r10,r10,sys_call_table@l
212 slwi r0,r0,2
213 bge- 66f
214 lwzx r10,r10,r0 /* Fetch system call handler [ptr] */
215 mtlr r10
216 addi r9,r1,STACK_FRAME_OVERHEAD
217 PPC440EP_ERR42
218 blrl /* Call handler */
219 .globl ret_from_syscall
220ret_from_syscall:
221#ifdef SHOW_SYSCALLS
222 bl do_show_syscall_exit
223#endif
224 mr r6,r3
225 li r11,-_LAST_ERRNO
226 cmplw 0,r3,r11
227 rlwinm r12,r1,0,0,(31-THREAD_SHIFT) /* current_thread_info() */
228 blt+ 30f
229 lbz r11,TI_SC_NOERR(r12)
230 cmpwi r11,0
231 bne 30f
232 neg r3,r3
233 lwz r10,_CCR(r1) /* Set SO bit in CR */
234 oris r10,r10,0x1000
235 stw r10,_CCR(r1)
236
237 /* disable interrupts so current_thread_info()->flags can't change */
23830: LOAD_MSR_KERNEL(r10,MSR_KERNEL) /* doesn't include MSR_EE */
239 SYNC
240 MTMSRD(r10)
241 lwz r9,TI_FLAGS(r12)
242 andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SIGPENDING|_TIF_NEED_RESCHED)
243 bne- syscall_exit_work
244syscall_exit_cont:
245#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
246 /* If the process has its own DBCR0 value, load it up. The single
247 step bit tells us that dbcr0 should be loaded. */
248 lwz r0,THREAD+THREAD_DBCR0(r2)
249 andis. r10,r0,DBCR0_IC@h
250 bnel- load_dbcr0
251#endif
252 stwcx. r0,0,r1 /* to clear the reservation */
253 lwz r4,_LINK(r1)
254 lwz r5,_CCR(r1)
255 mtlr r4
256 mtcr r5
257 lwz r7,_NIP(r1)
258 lwz r8,_MSR(r1)
259 FIX_SRR1(r8, r0)
260 lwz r2,GPR2(r1)
261 lwz r1,GPR1(r1)
262 mtspr SPRN_SRR0,r7
263 mtspr SPRN_SRR1,r8
264 SYNC
265 RFI
266
26766: li r3,-ENOSYS
268 b ret_from_syscall
269
270 .globl ret_from_fork
271ret_from_fork:
272 REST_NVGPRS(r1)
273 bl schedule_tail
274 li r3,0
275 b ret_from_syscall
276
277/* Traced system call support */
278syscall_dotrace:
279 SAVE_NVGPRS(r1)
280 li r0,0xc00
281 stw r0,_TRAP(r1)
282 addi r3,r1,STACK_FRAME_OVERHEAD
283 bl do_syscall_trace_enter
284 lwz r0,GPR0(r1) /* Restore original registers */
285 lwz r3,GPR3(r1)
286 lwz r4,GPR4(r1)
287 lwz r5,GPR5(r1)
288 lwz r6,GPR6(r1)
289 lwz r7,GPR7(r1)
290 lwz r8,GPR8(r1)
291 REST_NVGPRS(r1)
292 b syscall_dotrace_cont
293
294syscall_exit_work:
295 stw r6,RESULT(r1) /* Save result */
296 stw r3,GPR3(r1) /* Update return value */
297 andi. r0,r9,_TIF_SYSCALL_T_OR_A
298 beq 5f
299 ori r10,r10,MSR_EE
300 SYNC
301 MTMSRD(r10) /* re-enable interrupts */
302 lwz r4,_TRAP(r1)
303 andi. r4,r4,1
304 beq 4f
305 SAVE_NVGPRS(r1)
306 li r4,0xc00
307 stw r4,_TRAP(r1)
3084:
309 addi r3,r1,STACK_FRAME_OVERHEAD
310 bl do_syscall_trace_leave
311 REST_NVGPRS(r1)
3122:
313 lwz r3,GPR3(r1)
314 LOAD_MSR_KERNEL(r10,MSR_KERNEL) /* doesn't include MSR_EE */
315 SYNC
316 MTMSRD(r10) /* disable interrupts again */
317 rlwinm r12,r1,0,0,(31-THREAD_SHIFT) /* current_thread_info() */
318 lwz r9,TI_FLAGS(r12)
3195:
320 andi. r0,r9,_TIF_NEED_RESCHED
321 bne 1f
322 lwz r5,_MSR(r1)
323 andi. r5,r5,MSR_PR
324 beq syscall_exit_cont
325 andi. r0,r9,_TIF_SIGPENDING
326 beq syscall_exit_cont
327 b do_user_signal
3281:
329 ori r10,r10,MSR_EE
330 SYNC
331 MTMSRD(r10) /* re-enable interrupts */
332 bl schedule
333 b 2b
334
335#ifdef SHOW_SYSCALLS
336do_show_syscall:
337#ifdef SHOW_SYSCALLS_TASK
338 lis r11,show_syscalls_task@ha
339 lwz r11,show_syscalls_task@l(r11)
340 cmp 0,r2,r11
341 bnelr
342#endif
343 stw r31,GPR31(r1)
344 mflr r31
345 lis r3,7f@ha
346 addi r3,r3,7f@l
347 lwz r4,GPR0(r1)
348 lwz r5,GPR3(r1)
349 lwz r6,GPR4(r1)
350 lwz r7,GPR5(r1)
351 lwz r8,GPR6(r1)
352 lwz r9,GPR7(r1)
353 bl printk
354 lis r3,77f@ha
355 addi r3,r3,77f@l
356 lwz r4,GPR8(r1)
357 mr r5,r2
358 bl printk
359 lwz r0,GPR0(r1)
360 lwz r3,GPR3(r1)
361 lwz r4,GPR4(r1)
362 lwz r5,GPR5(r1)
363 lwz r6,GPR6(r1)
364 lwz r7,GPR7(r1)
365 lwz r8,GPR8(r1)
366 mtlr r31
367 lwz r31,GPR31(r1)
368 blr
369
370do_show_syscall_exit:
371#ifdef SHOW_SYSCALLS_TASK
372 lis r11,show_syscalls_task@ha
373 lwz r11,show_syscalls_task@l(r11)
374 cmp 0,r2,r11
375 bnelr
376#endif
377 stw r31,GPR31(r1)
378 mflr r31
379 stw r3,RESULT(r1) /* Save result */
380 mr r4,r3
381 lis r3,79f@ha
382 addi r3,r3,79f@l
383 bl printk
384 lwz r3,RESULT(r1)
385 mtlr r31
386 lwz r31,GPR31(r1)
387 blr
388
3897: .string "syscall %d(%x, %x, %x, %x, %x, "
39077: .string "%x), current=%p\n"
39179: .string " -> %x\n"
392 .align 2,0
393
394#ifdef SHOW_SYSCALLS_TASK
395 .data
396 .globl show_syscalls_task
397show_syscalls_task:
398 .long -1
399 .text
400#endif
401#endif /* SHOW_SYSCALLS */
402
403/*
404 * The sigsuspend and rt_sigsuspend system calls can call do_signal
405 * and thus put the process into the stopped state where we might
406 * want to examine its user state with ptrace. Therefore we need
407 * to save all the nonvolatile registers (r13 - r31) before calling
408 * the C code.
409 */
410 .globl ppc_sigsuspend
411ppc_sigsuspend:
412 SAVE_NVGPRS(r1)
413 lwz r0,_TRAP(r1)
414 rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */
415 stw r0,_TRAP(r1) /* register set saved */
416 b sys_sigsuspend
417
418 .globl ppc_rt_sigsuspend
419ppc_rt_sigsuspend:
420 SAVE_NVGPRS(r1)
421 lwz r0,_TRAP(r1)
422 rlwinm r0,r0,0,0,30
423 stw r0,_TRAP(r1)
424 b sys_rt_sigsuspend
425
426 .globl ppc_fork
427ppc_fork:
428 SAVE_NVGPRS(r1)
429 lwz r0,_TRAP(r1)
430 rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */
431 stw r0,_TRAP(r1) /* register set saved */
432 b sys_fork
433
434 .globl ppc_vfork
435ppc_vfork:
436 SAVE_NVGPRS(r1)
437 lwz r0,_TRAP(r1)
438 rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */
439 stw r0,_TRAP(r1) /* register set saved */
440 b sys_vfork
441
442 .globl ppc_clone
443ppc_clone:
444 SAVE_NVGPRS(r1)
445 lwz r0,_TRAP(r1)
446 rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */
447 stw r0,_TRAP(r1) /* register set saved */
448 b sys_clone
449
450 .globl ppc_swapcontext
451ppc_swapcontext:
452 SAVE_NVGPRS(r1)
453 lwz r0,_TRAP(r1)
454 rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */
455 stw r0,_TRAP(r1) /* register set saved */
456 b sys_swapcontext
457
458/*
459 * Top-level page fault handling.
460 * This is in assembler because if do_page_fault tells us that
461 * it is a bad kernel page fault, we want to save the non-volatile
462 * registers before calling bad_page_fault.
463 */
464 .globl handle_page_fault
465handle_page_fault:
466 stw r4,_DAR(r1)
467 addi r3,r1,STACK_FRAME_OVERHEAD
468 bl do_page_fault
469 cmpwi r3,0
470 beq+ ret_from_except
471 SAVE_NVGPRS(r1)
472 lwz r0,_TRAP(r1)
473 clrrwi r0,r0,1
474 stw r0,_TRAP(r1)
475 mr r5,r3
476 addi r3,r1,STACK_FRAME_OVERHEAD
477 lwz r4,_DAR(r1)
478 bl bad_page_fault
479 b ret_from_except_full
480
481/*
482 * This routine switches between two different tasks. The process
483 * state of one is saved on its kernel stack. Then the state
484 * of the other is restored from its kernel stack. The memory
485 * management hardware is updated to the second process's state.
486 * Finally, we can return to the second process.
487 * On entry, r3 points to the THREAD for the current task, r4
488 * points to the THREAD for the new task.
489 *
490 * This routine is always called with interrupts disabled.
491 *
492 * Note: there are two ways to get to the "going out" portion
493 * of this code; either by coming in via the entry (_switch)
494 * or via "fork" which must set up an environment equivalent
495 * to the "_switch" path. If you change this , you'll have to
496 * change the fork code also.
497 *
498 * The code which creates the new task context is in 'copy_thread'
499 * in arch/ppc/kernel/process.c
500 */
501_GLOBAL(_switch)
502 stwu r1,-INT_FRAME_SIZE(r1)
503 mflr r0
504 stw r0,INT_FRAME_SIZE+4(r1)
505 /* r3-r12 are caller saved -- Cort */
506 SAVE_NVGPRS(r1)
507 stw r0,_NIP(r1) /* Return to switch caller */
508 mfmsr r11
509 li r0,MSR_FP /* Disable floating-point */
510#ifdef CONFIG_ALTIVEC
511BEGIN_FTR_SECTION
512 oris r0,r0,MSR_VEC@h /* Disable altivec */
513 mfspr r12,SPRN_VRSAVE /* save vrsave register value */
514 stw r12,THREAD+THREAD_VRSAVE(r2)
515END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
516#endif /* CONFIG_ALTIVEC */
517#ifdef CONFIG_SPE
518 oris r0,r0,MSR_SPE@h /* Disable SPE */
519 mfspr r12,SPRN_SPEFSCR /* save spefscr register value */
520 stw r12,THREAD+THREAD_SPEFSCR(r2)
521#endif /* CONFIG_SPE */
522 and. r0,r0,r11 /* FP or altivec or SPE enabled? */
523 beq+ 1f
524 andc r11,r11,r0
525 MTMSRD(r11)
526 isync
5271: stw r11,_MSR(r1)
528 mfcr r10
529 stw r10,_CCR(r1)
530 stw r1,KSP(r3) /* Set old stack pointer */
531
532#ifdef CONFIG_SMP
533 /* We need a sync somewhere here to make sure that if the
534 * previous task gets rescheduled on another CPU, it sees all
535 * stores it has performed on this one.
536 */
537 sync
538#endif /* CONFIG_SMP */
539
540 tophys(r0,r4)
541 CLR_TOP32(r0)
542 mtspr SPRN_SPRG3,r0 /* Update current THREAD phys addr */
543 lwz r1,KSP(r4) /* Load new stack pointer */
544
545 /* save the old current 'last' for return value */
546 mr r3,r2
547 addi r2,r4,-THREAD /* Update current */
548
549#ifdef CONFIG_ALTIVEC
550BEGIN_FTR_SECTION
551 lwz r0,THREAD+THREAD_VRSAVE(r2)
552 mtspr SPRN_VRSAVE,r0 /* if G4, restore VRSAVE reg */
553END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
554#endif /* CONFIG_ALTIVEC */
555#ifdef CONFIG_SPE
556 lwz r0,THREAD+THREAD_SPEFSCR(r2)
557 mtspr SPRN_SPEFSCR,r0 /* restore SPEFSCR reg */
558#endif /* CONFIG_SPE */
559
560 lwz r0,_CCR(r1)
561 mtcrf 0xFF,r0
562 /* r3-r12 are destroyed -- Cort */
563 REST_NVGPRS(r1)
564
565 lwz r4,_NIP(r1) /* Return to _switch caller in new task */
566 mtlr r4
567 addi r1,r1,INT_FRAME_SIZE
568 blr
569
570 .globl fast_exception_return
571fast_exception_return:
572#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
573 andi. r10,r9,MSR_RI /* check for recoverable interrupt */
574 beq 1f /* if not, we've got problems */
575#endif
576
5772: REST_4GPRS(3, r11)
578 lwz r10,_CCR(r11)
579 REST_GPR(1, r11)
580 mtcr r10
581 lwz r10,_LINK(r11)
582 mtlr r10
583 REST_GPR(10, r11)
584 mtspr SPRN_SRR1,r9
585 mtspr SPRN_SRR0,r12
586 REST_GPR(9, r11)
587 REST_GPR(12, r11)
588 lwz r11,GPR11(r11)
589 SYNC
590 RFI
591
592#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
593/* check if the exception happened in a restartable section */
5941: lis r3,exc_exit_restart_end@ha
595 addi r3,r3,exc_exit_restart_end@l
596 cmplw r12,r3
597 bge 3f
598 lis r4,exc_exit_restart@ha
599 addi r4,r4,exc_exit_restart@l
600 cmplw r12,r4
601 blt 3f
602 lis r3,fee_restarts@ha
603 tophys(r3,r3)
604 lwz r5,fee_restarts@l(r3)
605 addi r5,r5,1
606 stw r5,fee_restarts@l(r3)
607 mr r12,r4 /* restart at exc_exit_restart */
608 b 2b
609
610 .comm fee_restarts,4
611
612/* aargh, a nonrecoverable interrupt, panic */
613/* aargh, we don't know which trap this is */
614/* but the 601 doesn't implement the RI bit, so assume it's OK */
6153:
616BEGIN_FTR_SECTION
617 b 2b
618END_FTR_SECTION_IFSET(CPU_FTR_601)
619 li r10,-1
620 stw r10,_TRAP(r11)
621 addi r3,r1,STACK_FRAME_OVERHEAD
622 lis r10,MSR_KERNEL@h
623 ori r10,r10,MSR_KERNEL@l
624 bl transfer_to_handler_full
625 .long nonrecoverable_exception
626 .long ret_from_except
627#endif
628
629 .globl sigreturn_exit
630sigreturn_exit:
631 subi r1,r3,STACK_FRAME_OVERHEAD
632 rlwinm r12,r1,0,0,(31-THREAD_SHIFT) /* current_thread_info() */
633 lwz r9,TI_FLAGS(r12)
634 andi. r0,r9,_TIF_SYSCALL_T_OR_A
635 beq+ ret_from_except_full
636 bl do_syscall_trace_leave
637 /* fall through */
638
639 .globl ret_from_except_full
640ret_from_except_full:
641 REST_NVGPRS(r1)
642 /* fall through */
643
644 .globl ret_from_except
645ret_from_except:
646 /* Hard-disable interrupts so that current_thread_info()->flags
647 * can't change between when we test it and when we return
648 * from the interrupt. */
649 LOAD_MSR_KERNEL(r10,MSR_KERNEL)
650 SYNC /* Some chip revs have problems here... */
651 MTMSRD(r10) /* disable interrupts */
652
653 lwz r3,_MSR(r1) /* Returning to user mode? */
654 andi. r0,r3,MSR_PR
655 beq resume_kernel
656
657user_exc_return: /* r10 contains MSR_KERNEL here */
658 /* Check current_thread_info()->flags */
659 rlwinm r9,r1,0,0,(31-THREAD_SHIFT)
660 lwz r9,TI_FLAGS(r9)
661 andi. r0,r9,(_TIF_SIGPENDING|_TIF_NEED_RESCHED)
662 bne do_work
663
664restore_user:
665#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
666 /* Check whether this process has its own DBCR0 value. The single
667 step bit tells us that dbcr0 should be loaded. */
668 lwz r0,THREAD+THREAD_DBCR0(r2)
669 andis. r10,r0,DBCR0_IC@h
670 bnel- load_dbcr0
671#endif
672
673#ifdef CONFIG_PREEMPT
674 b restore
675
676/* N.B. the only way to get here is from the beq following ret_from_except. */
677resume_kernel:
678 /* check current_thread_info->preempt_count */
679 rlwinm r9,r1,0,0,(31-THREAD_SHIFT)
680 lwz r0,TI_PREEMPT(r9)
681 cmpwi 0,r0,0 /* if non-zero, just restore regs and return */
682 bne restore
683 lwz r0,TI_FLAGS(r9)
684 andi. r0,r0,_TIF_NEED_RESCHED
685 beq+ restore
686 andi. r0,r3,MSR_EE /* interrupts off? */
687 beq restore /* don't schedule if so */
6881: bl preempt_schedule_irq
689 rlwinm r9,r1,0,0,(31-THREAD_SHIFT)
690 lwz r3,TI_FLAGS(r9)
691 andi. r0,r3,_TIF_NEED_RESCHED
692 bne- 1b
693#else
694resume_kernel:
695#endif /* CONFIG_PREEMPT */
696
697 /* interrupts are hard-disabled at this point */
698restore:
699 lwz r0,GPR0(r1)
700 lwz r2,GPR2(r1)
701 REST_4GPRS(3, r1)
702 REST_2GPRS(7, r1)
703
704 lwz r10,_XER(r1)
705 lwz r11,_CTR(r1)
706 mtspr SPRN_XER,r10
707 mtctr r11
708
709 PPC405_ERR77(0,r1)
710 stwcx. r0,0,r1 /* to clear the reservation */
711
712#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
713 lwz r9,_MSR(r1)
714 andi. r10,r9,MSR_RI /* check if this exception occurred */
715 beql nonrecoverable /* at a bad place (MSR:RI = 0) */
716
717 lwz r10,_CCR(r1)
718 lwz r11,_LINK(r1)
719 mtcrf 0xFF,r10
720 mtlr r11
721
722 /*
723 * Once we put values in SRR0 and SRR1, we are in a state
724 * where exceptions are not recoverable, since taking an
725 * exception will trash SRR0 and SRR1. Therefore we clear the
726 * MSR:RI bit to indicate this. If we do take an exception,
727 * we can't return to the point of the exception but we
728 * can restart the exception exit path at the label
729 * exc_exit_restart below. -- paulus
730 */
731 LOAD_MSR_KERNEL(r10,MSR_KERNEL & ~MSR_RI)
732 SYNC
733 MTMSRD(r10) /* clear the RI bit */
734 .globl exc_exit_restart
735exc_exit_restart:
736 lwz r9,_MSR(r1)
737 lwz r12,_NIP(r1)
738 FIX_SRR1(r9,r10)
739 mtspr SPRN_SRR0,r12
740 mtspr SPRN_SRR1,r9
741 REST_4GPRS(9, r1)
742 lwz r1,GPR1(r1)
743 .globl exc_exit_restart_end
744exc_exit_restart_end:
745 SYNC
746 RFI
747
748#else /* !(CONFIG_4xx || CONFIG_BOOKE) */
749 /*
750 * This is a bit different on 4xx/Book-E because it doesn't have
751 * the RI bit in the MSR.
752 * The TLB miss handler checks if we have interrupted
753 * the exception exit path and restarts it if so
754 * (well maybe one day it will... :).
755 */
756 lwz r11,_LINK(r1)
757 mtlr r11
758 lwz r10,_CCR(r1)
759 mtcrf 0xff,r10
760 REST_2GPRS(9, r1)
761 .globl exc_exit_restart
762exc_exit_restart:
763 lwz r11,_NIP(r1)
764 lwz r12,_MSR(r1)
765exc_exit_start:
766 mtspr SPRN_SRR0,r11
767 mtspr SPRN_SRR1,r12
768 REST_2GPRS(11, r1)
769 lwz r1,GPR1(r1)
770 .globl exc_exit_restart_end
771exc_exit_restart_end:
772 PPC405_ERR77_SYNC
773 rfi
774 b . /* prevent prefetch past rfi */
775
776/*
777 * Returning from a critical interrupt in user mode doesn't need
778 * to be any different from a normal exception. For a critical
779 * interrupt in the kernel, we just return (without checking for
780 * preemption) since the interrupt may have happened at some crucial
781 * place (e.g. inside the TLB miss handler), and because we will be
782 * running with r1 pointing into critical_stack, not the current
783 * process's kernel stack (and therefore current_thread_info() will
784 * give the wrong answer).
785 * We have to restore various SPRs that may have been in use at the
786 * time of the critical interrupt.
787 *
788 */
789#ifdef CONFIG_40x
790#define PPC_40x_TURN_OFF_MSR_DR \
791 /* avoid any possible TLB misses here by turning off MSR.DR, we \
792 * assume the instructions here are mapped by a pinned TLB entry */ \
793 li r10,MSR_IR; \
794 mtmsr r10; \
795 isync; \
796 tophys(r1, r1);
797#else
798#define PPC_40x_TURN_OFF_MSR_DR
799#endif
800
801#define RET_FROM_EXC_LEVEL(exc_lvl_srr0, exc_lvl_srr1, exc_lvl_rfi) \
802 REST_NVGPRS(r1); \
803 lwz r3,_MSR(r1); \
804 andi. r3,r3,MSR_PR; \
805 LOAD_MSR_KERNEL(r10,MSR_KERNEL); \
806 bne user_exc_return; \
807 lwz r0,GPR0(r1); \
808 lwz r2,GPR2(r1); \
809 REST_4GPRS(3, r1); \
810 REST_2GPRS(7, r1); \
811 lwz r10,_XER(r1); \
812 lwz r11,_CTR(r1); \
813 mtspr SPRN_XER,r10; \
814 mtctr r11; \
815 PPC405_ERR77(0,r1); \
816 stwcx. r0,0,r1; /* to clear the reservation */ \
817 lwz r11,_LINK(r1); \
818 mtlr r11; \
819 lwz r10,_CCR(r1); \
820 mtcrf 0xff,r10; \
821 PPC_40x_TURN_OFF_MSR_DR; \
822 lwz r9,_DEAR(r1); \
823 lwz r10,_ESR(r1); \
824 mtspr SPRN_DEAR,r9; \
825 mtspr SPRN_ESR,r10; \
826 lwz r11,_NIP(r1); \
827 lwz r12,_MSR(r1); \
828 mtspr exc_lvl_srr0,r11; \
829 mtspr exc_lvl_srr1,r12; \
830 lwz r9,GPR9(r1); \
831 lwz r12,GPR12(r1); \
832 lwz r10,GPR10(r1); \
833 lwz r11,GPR11(r1); \
834 lwz r1,GPR1(r1); \
835 PPC405_ERR77_SYNC; \
836 exc_lvl_rfi; \
837 b .; /* prevent prefetch past exc_lvl_rfi */
838
839 .globl ret_from_crit_exc
840ret_from_crit_exc:
841 RET_FROM_EXC_LEVEL(SPRN_CSRR0, SPRN_CSRR1, RFCI)
842
843#ifdef CONFIG_BOOKE
844 .globl ret_from_debug_exc
845ret_from_debug_exc:
846 RET_FROM_EXC_LEVEL(SPRN_DSRR0, SPRN_DSRR1, RFDI)
847
848 .globl ret_from_mcheck_exc
849ret_from_mcheck_exc:
850 RET_FROM_EXC_LEVEL(SPRN_MCSRR0, SPRN_MCSRR1, RFMCI)
851#endif /* CONFIG_BOOKE */
852
853/*
854 * Load the DBCR0 value for a task that is being ptraced,
855 * having first saved away the global DBCR0. Note that r0
856 * has the dbcr0 value to set upon entry to this.
857 */
858load_dbcr0:
859 mfmsr r10 /* first disable debug exceptions */
860 rlwinm r10,r10,0,~MSR_DE
861 mtmsr r10
862 isync
863 mfspr r10,SPRN_DBCR0
864 lis r11,global_dbcr0@ha
865 addi r11,r11,global_dbcr0@l
866 stw r10,0(r11)
867 mtspr SPRN_DBCR0,r0
868 lwz r10,4(r11)
869 addi r10,r10,1
870 stw r10,4(r11)
871 li r11,-1
872 mtspr SPRN_DBSR,r11 /* clear all pending debug events */
873 blr
874
875 .comm global_dbcr0,8
876#endif /* !(CONFIG_4xx || CONFIG_BOOKE) */
877
878do_work: /* r10 contains MSR_KERNEL here */
879 andi. r0,r9,_TIF_NEED_RESCHED
880 beq do_user_signal
881
882do_resched: /* r10 contains MSR_KERNEL here */
883 ori r10,r10,MSR_EE
884 SYNC
885 MTMSRD(r10) /* hard-enable interrupts */
886 bl schedule
887recheck:
888 LOAD_MSR_KERNEL(r10,MSR_KERNEL)
889 SYNC
890 MTMSRD(r10) /* disable interrupts */
891 rlwinm r9,r1,0,0,(31-THREAD_SHIFT)
892 lwz r9,TI_FLAGS(r9)
893 andi. r0,r9,_TIF_NEED_RESCHED
894 bne- do_resched
895 andi. r0,r9,_TIF_SIGPENDING
896 beq restore_user
897do_user_signal: /* r10 contains MSR_KERNEL here */
898 ori r10,r10,MSR_EE
899 SYNC
900 MTMSRD(r10) /* hard-enable interrupts */
901 /* save r13-r31 in the exception frame, if not already done */
902 lwz r3,_TRAP(r1)
903 andi. r0,r3,1
904 beq 2f
905 SAVE_NVGPRS(r1)
906 rlwinm r3,r3,0,0,30
907 stw r3,_TRAP(r1)
9082: li r3,0
909 addi r4,r1,STACK_FRAME_OVERHEAD
910 bl do_signal
911 REST_NVGPRS(r1)
912 b recheck
913
914/*
915 * We come here when we are at the end of handling an exception
916 * that occurred at a place where taking an exception will lose
917 * state information, such as the contents of SRR0 and SRR1.
918 */
919nonrecoverable:
920 lis r10,exc_exit_restart_end@ha
921 addi r10,r10,exc_exit_restart_end@l
922 cmplw r12,r10
923 bge 3f
924 lis r11,exc_exit_restart@ha
925 addi r11,r11,exc_exit_restart@l
926 cmplw r12,r11
927 blt 3f
928 lis r10,ee_restarts@ha
929 lwz r12,ee_restarts@l(r10)
930 addi r12,r12,1
931 stw r12,ee_restarts@l(r10)
932 mr r12,r11 /* restart at exc_exit_restart */
933 blr
9343: /* OK, we can't recover, kill this process */
935 /* but the 601 doesn't implement the RI bit, so assume it's OK */
936BEGIN_FTR_SECTION
937 blr
938END_FTR_SECTION_IFSET(CPU_FTR_601)
939 lwz r3,_TRAP(r1)
940 andi. r0,r3,1
941 beq 4f
942 SAVE_NVGPRS(r1)
943 rlwinm r3,r3,0,0,30
944 stw r3,_TRAP(r1)
9454: addi r3,r1,STACK_FRAME_OVERHEAD
946 bl nonrecoverable_exception
947 /* shouldn't return */
948 b 4b
949
950 .comm ee_restarts,4
951
952/*
953 * PROM code for specific machines follows. Put it
954 * here so it's easy to add arch-specific sections later.
955 * -- Cort
956 */
957#ifdef CONFIG_PPC_RTAS
958/*
959 * On CHRP, the Run-Time Abstraction Services (RTAS) have to be
960 * called with the MMU off.
961 */
962_GLOBAL(enter_rtas)
963 stwu r1,-INT_FRAME_SIZE(r1)
964 mflr r0
965 stw r0,INT_FRAME_SIZE+4(r1)
966 LOADADDR(r4, rtas)
967 lis r6,1f@ha /* physical return address for rtas */
968 addi r6,r6,1f@l
969 tophys(r6,r6)
970 tophys(r7,r1)
971 lwz r8,RTASENTRY(r4)
972 lwz r4,RTASBASE(r4)
973 mfmsr r9
974 stw r9,8(r1)
975 LOAD_MSR_KERNEL(r0,MSR_KERNEL)
976 SYNC /* disable interrupts so SRR0/1 */
977 MTMSRD(r0) /* don't get trashed */
978 li r9,MSR_KERNEL & ~(MSR_IR|MSR_DR)
979 mtlr r6
980 mtspr SPRN_SPRG2,r7
981 mtspr SPRN_SRR0,r8
982 mtspr SPRN_SRR1,r9
983 RFI
9841: tophys(r9,r1)
985 lwz r8,INT_FRAME_SIZE+4(r9) /* get return address */
986 lwz r9,8(r9) /* original msr value */
987 FIX_SRR1(r9,r0)
988 addi r1,r1,INT_FRAME_SIZE
989 li r0,0
990 mtspr SPRN_SPRG2,r0
991 mtspr SPRN_SRR0,r8
992 mtspr SPRN_SRR1,r9
993 RFI /* return to caller */
994
995 .globl machine_check_in_rtas
996machine_check_in_rtas:
997 twi 31,0,0
998 /* XXX load up BATs and panic */
999
1000#endif /* CONFIG_PPC_RTAS */
diff --git a/arch/ppc64/kernel/entry.S b/arch/powerpc/kernel/entry_64.S
index e8c0bbf4d000..984a10630714 100644
--- a/arch/ppc64/kernel/entry.S
+++ b/arch/powerpc/kernel/entry_64.S
@@ -42,9 +42,6 @@
42.SYS_CALL_TABLE: 42.SYS_CALL_TABLE:
43 .tc .sys_call_table[TC],.sys_call_table 43 .tc .sys_call_table[TC],.sys_call_table
44 44
45.SYS_CALL_TABLE32:
46 .tc .sys_call_table32[TC],.sys_call_table32
47
48/* This value is used to mark exception frames on the stack. */ 45/* This value is used to mark exception frames on the stack. */
49exception_marker: 46exception_marker:
50 .tc ID_72656773_68657265[TC],0x7265677368657265 47 .tc ID_72656773_68657265[TC],0x7265677368657265
@@ -133,7 +130,7 @@ system_call: /* label this so stack traces look sane */
133 ld r11,.SYS_CALL_TABLE@toc(2) 130 ld r11,.SYS_CALL_TABLE@toc(2)
134 andi. r10,r10,_TIF_32BIT 131 andi. r10,r10,_TIF_32BIT
135 beq 15f 132 beq 15f
136 ld r11,.SYS_CALL_TABLE32@toc(2) 133 addi r11,r11,8 /* use 32-bit syscall entries */
137 clrldi r3,r3,32 134 clrldi r3,r3,32
138 clrldi r4,r4,32 135 clrldi r4,r4,32
139 clrldi r5,r5,32 136 clrldi r5,r5,32
@@ -141,7 +138,7 @@ system_call: /* label this so stack traces look sane */
141 clrldi r7,r7,32 138 clrldi r7,r7,32
142 clrldi r8,r8,32 139 clrldi r8,r8,32
14315: 14015:
144 slwi r0,r0,3 141 slwi r0,r0,4
145 ldx r10,r11,r0 /* Fetch system call handler [ptr] */ 142 ldx r10,r11,r0 /* Fetch system call handler [ptr] */
146 mtctr r10 143 mtctr r10
147 bctrl /* Call handler */ 144 bctrl /* Call handler */
@@ -191,8 +188,8 @@ syscall_exit_trace_cont:
191 ld r1,GPR1(r1) 188 ld r1,GPR1(r1)
192 mtlr r4 189 mtlr r4
193 mtcr r5 190 mtcr r5
194 mtspr SRR0,r7 191 mtspr SPRN_SRR0,r7
195 mtspr SRR1,r8 192 mtspr SPRN_SRR1,r8
196 rfid 193 rfid
197 b . /* prevent speculative execution */ 194 b . /* prevent speculative execution */
198 195
@@ -265,7 +262,7 @@ _GLOBAL(save_nvgprs)
265 */ 262 */
266_GLOBAL(ppc32_sigsuspend) 263_GLOBAL(ppc32_sigsuspend)
267 bl .save_nvgprs 264 bl .save_nvgprs
268 bl .sys32_sigsuspend 265 bl .compat_sys_sigsuspend
269 b 70f 266 b 70f
270 267
271_GLOBAL(ppc64_rt_sigsuspend) 268_GLOBAL(ppc64_rt_sigsuspend)
@@ -275,7 +272,7 @@ _GLOBAL(ppc64_rt_sigsuspend)
275 272
276_GLOBAL(ppc32_rt_sigsuspend) 273_GLOBAL(ppc32_rt_sigsuspend)
277 bl .save_nvgprs 274 bl .save_nvgprs
278 bl .sys32_rt_sigsuspend 275 bl .compat_sys_rt_sigsuspend
27970: cmpdi 0,r3,0 27670: cmpdi 0,r3,0
280 /* If it returned an error, we need to return via syscall_exit to set 277 /* If it returned an error, we need to return via syscall_exit to set
281 the SO bit in cr0 and potentially stop for ptrace. */ 278 the SO bit in cr0 and potentially stop for ptrace. */
@@ -310,7 +307,7 @@ _GLOBAL(ppc_clone)
310 307
311_GLOBAL(ppc32_swapcontext) 308_GLOBAL(ppc32_swapcontext)
312 bl .save_nvgprs 309 bl .save_nvgprs
313 bl .sys32_swapcontext 310 bl .compat_sys_swapcontext
314 b 80f 311 b 80f
315 312
316_GLOBAL(ppc64_swapcontext) 313_GLOBAL(ppc64_swapcontext)
@@ -319,11 +316,11 @@ _GLOBAL(ppc64_swapcontext)
319 b 80f 316 b 80f
320 317
321_GLOBAL(ppc32_sigreturn) 318_GLOBAL(ppc32_sigreturn)
322 bl .sys32_sigreturn 319 bl .compat_sys_sigreturn
323 b 80f 320 b 80f
324 321
325_GLOBAL(ppc32_rt_sigreturn) 322_GLOBAL(ppc32_rt_sigreturn)
326 bl .sys32_rt_sigreturn 323 bl .compat_sys_rt_sigreturn
327 b 80f 324 b 80f
328 325
329_GLOBAL(ppc64_rt_sigreturn) 326_GLOBAL(ppc64_rt_sigreturn)
@@ -531,7 +528,7 @@ restore:
531 mtctr r3 528 mtctr r3
532 mtlr r0 529 mtlr r0
533 ld r3,_XER(r1) 530 ld r3,_XER(r1)
534 mtspr XER,r3 531 mtspr SPRN_XER,r3
535 532
536 REST_8GPRS(5, r1) 533 REST_8GPRS(5, r1)
537 534
@@ -543,12 +540,12 @@ restore:
543 mtmsrd r0,1 540 mtmsrd r0,1
544 541
545 ld r0,_MSR(r1) 542 ld r0,_MSR(r1)
546 mtspr SRR1,r0 543 mtspr SPRN_SRR1,r0
547 544
548 ld r2,_CCR(r1) 545 ld r2,_CCR(r1)
549 mtcrf 0xFF,r2 546 mtcrf 0xFF,r2
550 ld r2,_NIP(r1) 547 ld r2,_NIP(r1)
551 mtspr SRR0,r2 548 mtspr SPRN_SRR0,r2
552 549
553 ld r0,GPR0(r1) 550 ld r0,GPR0(r1)
554 ld r2,GPR2(r1) 551 ld r2,GPR2(r1)
@@ -643,7 +640,7 @@ _GLOBAL(enter_rtas)
643 std r4,_CCR(r1) 640 std r4,_CCR(r1)
644 mfctr r5 641 mfctr r5
645 std r5,_CTR(r1) 642 std r5,_CTR(r1)
646 mfspr r6,XER 643 mfspr r6,SPRN_XER
647 std r6,_XER(r1) 644 std r6,_XER(r1)
648 mfdar r7 645 mfdar r7
649 std r7,_DAR(r1) 646 std r7,_DAR(r1)
@@ -697,14 +694,14 @@ _GLOBAL(enter_rtas)
697 ld r5,RTASENTRY(r4) /* get the rtas->entry value */ 694 ld r5,RTASENTRY(r4) /* get the rtas->entry value */
698 ld r4,RTASBASE(r4) /* get the rtas->base value */ 695 ld r4,RTASBASE(r4) /* get the rtas->base value */
699 696
700 mtspr SRR0,r5 697 mtspr SPRN_SRR0,r5
701 mtspr SRR1,r6 698 mtspr SPRN_SRR1,r6
702 rfid 699 rfid
703 b . /* prevent speculative execution */ 700 b . /* prevent speculative execution */
704 701
705_STATIC(rtas_return_loc) 702_STATIC(rtas_return_loc)
706 /* relocation is off at this point */ 703 /* relocation is off at this point */
707 mfspr r4,SPRG3 /* Get PACA */ 704 mfspr r4,SPRN_SPRG3 /* Get PACA */
708 SET_REG_TO_CONST(r5, KERNELBASE) 705 SET_REG_TO_CONST(r5, KERNELBASE)
709 sub r4,r4,r5 /* RELOC the PACA base pointer */ 706 sub r4,r4,r5 /* RELOC the PACA base pointer */
710 707
@@ -718,8 +715,8 @@ _STATIC(rtas_return_loc)
718 LOADADDR(r3,.rtas_restore_regs) 715 LOADADDR(r3,.rtas_restore_regs)
719 ld r4,PACASAVEDMSR(r4) /* Restore our MSR */ 716 ld r4,PACASAVEDMSR(r4) /* Restore our MSR */
720 717
721 mtspr SRR0,r3 718 mtspr SPRN_SRR0,r3
722 mtspr SRR1,r4 719 mtspr SPRN_SRR1,r4
723 rfid 720 rfid
724 b . /* prevent speculative execution */ 721 b . /* prevent speculative execution */
725 722
@@ -730,14 +727,14 @@ _STATIC(rtas_restore_regs)
730 REST_8GPRS(14, r1) /* Restore the non-volatiles */ 727 REST_8GPRS(14, r1) /* Restore the non-volatiles */
731 REST_10GPRS(22, r1) /* ditto */ 728 REST_10GPRS(22, r1) /* ditto */
732 729
733 mfspr r13,SPRG3 730 mfspr r13,SPRN_SPRG3
734 731
735 ld r4,_CCR(r1) 732 ld r4,_CCR(r1)
736 mtcr r4 733 mtcr r4
737 ld r5,_CTR(r1) 734 ld r5,_CTR(r1)
738 mtctr r5 735 mtctr r5
739 ld r6,_XER(r1) 736 ld r6,_XER(r1)
740 mtspr XER,r6 737 mtspr SPRN_XER,r6
741 ld r7,_DAR(r1) 738 ld r7,_DAR(r1)
742 mtdar r7 739 mtdar r7
743 ld r8,_DSISR(r1) 740 ld r8,_DSISR(r1)
@@ -774,7 +771,7 @@ _GLOBAL(enter_prom)
774 std r4,_CCR(r1) 771 std r4,_CCR(r1)
775 mfctr r5 772 mfctr r5
776 std r5,_CTR(r1) 773 std r5,_CTR(r1)
777 mfspr r6,XER 774 mfspr r6,SPRN_XER
778 std r6,_XER(r1) 775 std r6,_XER(r1)
779 mfdar r7 776 mfdar r7
780 std r7,_DAR(r1) 777 std r7,_DAR(r1)
@@ -827,7 +824,7 @@ _GLOBAL(enter_prom)
827 ld r5,_CTR(r1) 824 ld r5,_CTR(r1)
828 mtctr r5 825 mtctr r5
829 ld r6,_XER(r1) 826 ld r6,_XER(r1)
830 mtspr XER,r6 827 mtspr SPRN_XER,r6
831 ld r7,_DAR(r1) 828 ld r7,_DAR(r1)
832 mtdar r7 829 mtdar r7
833 ld r8,_DSISR(r1) 830 ld r8,_DSISR(r1)
diff --git a/arch/ppc/kernel/fpu.S b/arch/powerpc/kernel/fpu.S
index 665d7d34304c..4d6001fa1cf2 100644
--- a/arch/ppc/kernel/fpu.S
+++ b/arch/powerpc/kernel/fpu.S
@@ -10,7 +10,7 @@
10 */ 10 */
11 11
12#include <linux/config.h> 12#include <linux/config.h>
13#include <asm/processor.h> 13#include <asm/reg.h>
14#include <asm/page.h> 14#include <asm/page.h>
15#include <asm/mmu.h> 15#include <asm/mmu.h>
16#include <asm/pgtable.h> 16#include <asm/pgtable.h>
@@ -27,13 +27,9 @@
27 * Load up this task's FP registers from its thread_struct, 27 * Load up this task's FP registers from its thread_struct,
28 * enable the FPU for the current task and return to the task. 28 * enable the FPU for the current task and return to the task.
29 */ 29 */
30 .globl load_up_fpu 30_GLOBAL(load_up_fpu)
31load_up_fpu:
32 mfmsr r5 31 mfmsr r5
33 ori r5,r5,MSR_FP 32 ori r5,r5,MSR_FP
34#ifdef CONFIG_PPC64BRIDGE
35 clrldi r5,r5,1 /* turn off 64-bit mode */
36#endif /* CONFIG_PPC64BRIDGE */
37 SYNC 33 SYNC
38 MTMSRD(r5) /* enable use of fpu now */ 34 MTMSRD(r5) /* enable use of fpu now */
39 isync 35 isync
@@ -43,67 +39,57 @@ load_up_fpu:
43 * to another. Instead we call giveup_fpu in switch_to. 39 * to another. Instead we call giveup_fpu in switch_to.
44 */ 40 */
45#ifndef CONFIG_SMP 41#ifndef CONFIG_SMP
46 tophys(r6,0) /* get __pa constant */ 42 LOADBASE(r3, last_task_used_math)
47 addis r3,r6,last_task_used_math@ha 43 toreal(r3)
48 lwz r4,last_task_used_math@l(r3) 44 LDL r4,OFF(last_task_used_math)(r3)
49 cmpwi 0,r4,0 45 CMPI 0,r4,0
50 beq 1f 46 beq 1f
51 add r4,r4,r6 47 toreal(r4)
52 addi r4,r4,THREAD /* want last_task_used_math->thread */ 48 addi r4,r4,THREAD /* want last_task_used_math->thread */
53 SAVE_32FPRS(0, r4) 49 SAVE_32FPRS(0, r4)
54 mffs fr0 50 mffs fr0
55 stfd fr0,THREAD_FPSCR-4(r4) 51 stfd fr0,THREAD_FPSCR(r4)
56 lwz r5,PT_REGS(r4) 52 LDL r5,PT_REGS(r4)
57 add r5,r5,r6 53 toreal(r5)
58 lwz r4,_MSR-STACK_FRAME_OVERHEAD(r5) 54 LDL r4,_MSR-STACK_FRAME_OVERHEAD(r5)
59 li r10,MSR_FP|MSR_FE0|MSR_FE1 55 li r10,MSR_FP|MSR_FE0|MSR_FE1
60 andc r4,r4,r10 /* disable FP for previous task */ 56 andc r4,r4,r10 /* disable FP for previous task */
61 stw r4,_MSR-STACK_FRAME_OVERHEAD(r5) 57 STL r4,_MSR-STACK_FRAME_OVERHEAD(r5)
621: 581:
63#endif /* CONFIG_SMP */ 59#endif /* CONFIG_SMP */
64 /* enable use of FP after return */ 60 /* enable use of FP after return */
61#ifdef CONFIG_PPC32
65 mfspr r5,SPRN_SPRG3 /* current task's THREAD (phys) */ 62 mfspr r5,SPRN_SPRG3 /* current task's THREAD (phys) */
66 lwz r4,THREAD_FPEXC_MODE(r5) 63 lwz r4,THREAD_FPEXC_MODE(r5)
67 ori r9,r9,MSR_FP /* enable FP for current */ 64 ori r9,r9,MSR_FP /* enable FP for current */
68 or r9,r9,r4 65 or r9,r9,r4
69 lfd fr0,THREAD_FPSCR-4(r5) 66#else
67 ld r4,PACACURRENT(r13)
68 addi r5,r4,THREAD /* Get THREAD */
69 ld r4,THREAD_FPEXC_MODE(r5)
70 ori r12,r12,MSR_FP
71 or r12,r12,r4
72 std r12,_MSR(r1)
73#endif
74 lfd fr0,THREAD_FPSCR(r5)
70 mtfsf 0xff,fr0 75 mtfsf 0xff,fr0
71 REST_32FPRS(0, r5) 76 REST_32FPRS(0, r5)
72#ifndef CONFIG_SMP 77#ifndef CONFIG_SMP
73 subi r4,r5,THREAD 78 subi r4,r5,THREAD
74 sub r4,r4,r6 79 fromreal(r4)
75 stw r4,last_task_used_math@l(r3) 80 STL r4,OFF(last_task_used_math)(r3)
76#endif /* CONFIG_SMP */ 81#endif /* CONFIG_SMP */
77 /* restore registers and return */ 82 /* restore registers and return */
78 /* we haven't used ctr or xer or lr */ 83 /* we haven't used ctr or xer or lr */
79 b fast_exception_return 84 b fast_exception_return
80 85
81/* 86/*
82 * FP unavailable trap from kernel - print a message, but let
83 * the task use FP in the kernel until it returns to user mode.
84 */
85 .globl KernelFP
86KernelFP:
87 lwz r3,_MSR(r1)
88 ori r3,r3,MSR_FP
89 stw r3,_MSR(r1) /* enable use of FP after return */
90 lis r3,86f@h
91 ori r3,r3,86f@l
92 mr r4,r2 /* current */
93 lwz r5,_NIP(r1)
94 bl printk
95 b ret_from_except
9686: .string "floating point used in kernel (task=%p, pc=%x)\n"
97 .align 4,0
98
99/*
100 * giveup_fpu(tsk) 87 * giveup_fpu(tsk)
101 * Disable FP for the task given as the argument, 88 * Disable FP for the task given as the argument,
102 * and save the floating-point registers in its thread_struct. 89 * and save the floating-point registers in its thread_struct.
103 * Enables the FPU for use in the kernel on return. 90 * Enables the FPU for use in the kernel on return.
104 */ 91 */
105 .globl giveup_fpu 92_GLOBAL(giveup_fpu)
106giveup_fpu:
107 mfmsr r5 93 mfmsr r5
108 ori r5,r5,MSR_FP 94 ori r5,r5,MSR_FP
109 SYNC_601 95 SYNC_601
@@ -111,23 +97,48 @@ giveup_fpu:
111 MTMSRD(r5) /* enable use of fpu now */ 97 MTMSRD(r5) /* enable use of fpu now */
112 SYNC_601 98 SYNC_601
113 isync 99 isync
114 cmpwi 0,r3,0 100 CMPI 0,r3,0
115 beqlr- /* if no previous owner, done */ 101 beqlr- /* if no previous owner, done */
116 addi r3,r3,THREAD /* want THREAD of task */ 102 addi r3,r3,THREAD /* want THREAD of task */
117 lwz r5,PT_REGS(r3) 103 LDL r5,PT_REGS(r3)
118 cmpwi 0,r5,0 104 CMPI 0,r5,0
119 SAVE_32FPRS(0, r3) 105 SAVE_32FPRS(0, r3)
120 mffs fr0 106 mffs fr0
121 stfd fr0,THREAD_FPSCR-4(r3) 107 stfd fr0,THREAD_FPSCR(r3)
122 beq 1f 108 beq 1f
123 lwz r4,_MSR-STACK_FRAME_OVERHEAD(r5) 109 LDL r4,_MSR-STACK_FRAME_OVERHEAD(r5)
124 li r3,MSR_FP|MSR_FE0|MSR_FE1 110 li r3,MSR_FP|MSR_FE0|MSR_FE1
125 andc r4,r4,r3 /* disable FP for previous task */ 111 andc r4,r4,r3 /* disable FP for previous task */
126 stw r4,_MSR-STACK_FRAME_OVERHEAD(r5) 112 STL r4,_MSR-STACK_FRAME_OVERHEAD(r5)
1271: 1131:
128#ifndef CONFIG_SMP 114#ifndef CONFIG_SMP
129 li r5,0 115 li r5,0
130 lis r4,last_task_used_math@ha 116 LOADBASE(r4,last_task_used_math)
131 stw r5,last_task_used_math@l(r4) 117 STL r5,OFF(last_task_used_math)(r4)
132#endif /* CONFIG_SMP */ 118#endif /* CONFIG_SMP */
133 blr 119 blr
120
121/*
122 * These are used in the alignment trap handler when emulating
123 * single-precision loads and stores.
124 * We restore and save the fpscr so the task gets the same result
125 * and exceptions as if the cpu had performed the load or store.
126 */
127
128_GLOBAL(cvt_fd)
129 lfd 0,THREAD_FPSCR(r5) /* load up fpscr value */
130 mtfsf 0xff,0
131 lfs 0,0(r3)
132 stfd 0,0(r4)
133 mffs 0
134 stfd 0,THREAD_FPSCR(r5) /* save new fpscr value */
135 blr
136
137_GLOBAL(cvt_df)
138 lfd 0,THREAD_FPSCR(r5) /* load up fpscr value */
139 mtfsf 0xff,0
140 lfd 0,0(r3)
141 stfs 0,0(r4)
142 mffs 0
143 stfd 0,THREAD_FPSCR(r5) /* save new fpscr value */
144 blr
diff --git a/arch/powerpc/kernel/head_32.S b/arch/powerpc/kernel/head_32.S
new file mode 100644
index 000000000000..b102e3a2415e
--- /dev/null
+++ b/arch/powerpc/kernel/head_32.S
@@ -0,0 +1,1381 @@
1/*
2 * PowerPC version
3 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
4 *
5 * Rewritten by Cort Dougan (cort@cs.nmt.edu) for PReP
6 * Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu>
7 * Adapted for Power Macintosh by Paul Mackerras.
8 * Low-level exception handlers and MMU support
9 * rewritten by Paul Mackerras.
10 * Copyright (C) 1996 Paul Mackerras.
11 * MPC8xx modifications Copyright (C) 1997 Dan Malek (dmalek@jlc.net).
12 * Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk).
13 *
14 * This file contains the low-level support and setup for the
15 * PowerPC platform, including trap and interrupt dispatch.
16 * (The PPC 8xx embedded CPUs use head_8xx.S instead.)
17 *
18 * This program is free software; you can redistribute it and/or
19 * modify it under the terms of the GNU General Public License
20 * as published by the Free Software Foundation; either version
21 * 2 of the License, or (at your option) any later version.
22 *
23 */
24
25#include <linux/config.h>
26#include <asm/reg.h>
27#include <asm/page.h>
28#include <asm/mmu.h>
29#include <asm/pgtable.h>
30#include <asm/cputable.h>
31#include <asm/cache.h>
32#include <asm/thread_info.h>
33#include <asm/ppc_asm.h>
34#include <asm/asm-offsets.h>
35
36#ifdef CONFIG_APUS
37#include <asm/amigappc.h>
38#endif
39
40/* 601 only have IBAT; cr0.eq is set on 601 when using this macro */
41#define LOAD_BAT(n, reg, RA, RB) \
42 /* see the comment for clear_bats() -- Cort */ \
43 li RA,0; \
44 mtspr SPRN_IBAT##n##U,RA; \
45 mtspr SPRN_DBAT##n##U,RA; \
46 lwz RA,(n*16)+0(reg); \
47 lwz RB,(n*16)+4(reg); \
48 mtspr SPRN_IBAT##n##U,RA; \
49 mtspr SPRN_IBAT##n##L,RB; \
50 beq 1f; \
51 lwz RA,(n*16)+8(reg); \
52 lwz RB,(n*16)+12(reg); \
53 mtspr SPRN_DBAT##n##U,RA; \
54 mtspr SPRN_DBAT##n##L,RB; \
551:
56
57 .text
58 .stabs "arch/powerpc/kernel/",N_SO,0,0,0f
59 .stabs "head_32.S",N_SO,0,0,0f
600:
61 .globl _stext
62_stext:
63
64/*
65 * _start is defined this way because the XCOFF loader in the OpenFirmware
66 * on the powermac expects the entry point to be a procedure descriptor.
67 */
68 .text
69 .globl _start
70_start:
71 /*
72 * These are here for legacy reasons, the kernel used to
73 * need to look like a coff function entry for the pmac
74 * but we're always started by some kind of bootloader now.
75 * -- Cort
76 */
77 nop /* used by __secondary_hold on prep (mtx) and chrp smp */
78 nop /* used by __secondary_hold on prep (mtx) and chrp smp */
79 nop
80
81/* PMAC
82 * Enter here with the kernel text, data and bss loaded starting at
83 * 0, running with virtual == physical mapping.
84 * r5 points to the prom entry point (the client interface handler
85 * address). Address translation is turned on, with the prom
86 * managing the hash table. Interrupts are disabled. The stack
87 * pointer (r1) points to just below the end of the half-meg region
88 * from 0x380000 - 0x400000, which is mapped in already.
89 *
90 * If we are booted from MacOS via BootX, we enter with the kernel
91 * image loaded somewhere, and the following values in registers:
92 * r3: 'BooX' (0x426f6f58)
93 * r4: virtual address of boot_infos_t
94 * r5: 0
95 *
96 * APUS
97 * r3: 'APUS'
98 * r4: physical address of memory base
99 * Linux/m68k style BootInfo structure at &_end.
100 *
101 * PREP
102 * This is jumped to on prep systems right after the kernel is relocated
103 * to its proper place in memory by the boot loader. The expected layout
104 * of the regs is:
105 * r3: ptr to residual data
106 * r4: initrd_start or if no initrd then 0
107 * r5: initrd_end - unused if r4 is 0
108 * r6: Start of command line string
109 * r7: End of command line string
110 *
111 * This just gets a minimal mmu environment setup so we can call
112 * start_here() to do the real work.
113 * -- Cort
114 */
115
116 .globl __start
117__start:
118/*
119 * We have to do any OF calls before we map ourselves to KERNELBASE,
120 * because OF may have I/O devices mapped into that area
121 * (particularly on CHRP).
122 */
123 cmpwi 0,r5,0
124 beq 1f
125 bl prom_init
126 trap
127
1281: mr r31,r3 /* save parameters */
129 mr r30,r4
130 li r24,0 /* cpu # */
131
132/*
133 * early_init() does the early machine identification and does
134 * the necessary low-level setup and clears the BSS
135 * -- Cort <cort@fsmlabs.com>
136 */
137 bl early_init
138
139#ifdef CONFIG_APUS
140/* On APUS the __va/__pa constants need to be set to the correct
141 * values before continuing.
142 */
143 mr r4,r30
144 bl fix_mem_constants
145#endif /* CONFIG_APUS */
146
147/* Switch MMU off, clear BATs and flush TLB. At this point, r3 contains
148 * the physical address we are running at, returned by early_init()
149 */
150 bl mmu_off
151__after_mmu_off:
152 bl clear_bats
153 bl flush_tlbs
154
155 bl initial_bats
156
157/*
158 * Call setup_cpu for CPU 0 and initialize 6xx Idle
159 */
160 bl reloc_offset
161 li r24,0 /* cpu# */
162 bl call_setup_cpu /* Call setup_cpu for this CPU */
163#ifdef CONFIG_6xx
164 bl reloc_offset
165 bl init_idle_6xx
166#endif /* CONFIG_6xx */
167
168
169#ifndef CONFIG_APUS
170/*
171 * We need to run with _start at physical address 0.
172 * On CHRP, we are loaded at 0x10000 since OF on CHRP uses
173 * the exception vectors at 0 (and therefore this copy
174 * overwrites OF's exception vectors with our own).
175 * The MMU is off at this point.
176 */
177 bl reloc_offset
178 mr r26,r3
179 addis r4,r3,KERNELBASE@h /* current address of _start */
180 cmpwi 0,r4,0 /* are we already running at 0? */
181 bne relocate_kernel
182#endif /* CONFIG_APUS */
183/*
184 * we now have the 1st 16M of ram mapped with the bats.
185 * prep needs the mmu to be turned on here, but pmac already has it on.
186 * this shouldn't bother the pmac since it just gets turned on again
187 * as we jump to our code at KERNELBASE. -- Cort
188 * Actually no, pmac doesn't have it on any more. BootX enters with MMU
189 * off, and in other cases, we now turn it off before changing BATs above.
190 */
191turn_on_mmu:
192 mfmsr r0
193 ori r0,r0,MSR_DR|MSR_IR
194 mtspr SPRN_SRR1,r0
195 lis r0,start_here@h
196 ori r0,r0,start_here@l
197 mtspr SPRN_SRR0,r0
198 SYNC
199 RFI /* enables MMU */
200
201/*
202 * We need __secondary_hold as a place to hold the other cpus on
203 * an SMP machine, even when we are running a UP kernel.
204 */
205 . = 0xc0 /* for prep bootloader */
206 li r3,1 /* MTX only has 1 cpu */
207 .globl __secondary_hold
208__secondary_hold:
209 /* tell the master we're here */
210 stw r3,__secondary_hold_acknowledge@l(0)
211#ifdef CONFIG_SMP
212100: lwz r4,0(0)
213 /* wait until we're told to start */
214 cmpw 0,r4,r3
215 bne 100b
216 /* our cpu # was at addr 0 - go */
217 mr r24,r3 /* cpu # */
218 b __secondary_start
219#else
220 b .
221#endif /* CONFIG_SMP */
222
223 .globl __secondary_hold_spinloop
224__secondary_hold_spinloop:
225 .long 0
226 .globl __secondary_hold_acknowledge
227__secondary_hold_acknowledge:
228 .long -1
229
230/*
231 * Exception entry code. This code runs with address translation
232 * turned off, i.e. using physical addresses.
233 * We assume sprg3 has the physical address of the current
234 * task's thread_struct.
235 */
236#define EXCEPTION_PROLOG \
237 mtspr SPRN_SPRG0,r10; \
238 mtspr SPRN_SPRG1,r11; \
239 mfcr r10; \
240 EXCEPTION_PROLOG_1; \
241 EXCEPTION_PROLOG_2
242
243#define EXCEPTION_PROLOG_1 \
244 mfspr r11,SPRN_SRR1; /* check whether user or kernel */ \
245 andi. r11,r11,MSR_PR; \
246 tophys(r11,r1); /* use tophys(r1) if kernel */ \
247 beq 1f; \
248 mfspr r11,SPRN_SPRG3; \
249 lwz r11,THREAD_INFO-THREAD(r11); \
250 addi r11,r11,THREAD_SIZE; \
251 tophys(r11,r11); \
2521: subi r11,r11,INT_FRAME_SIZE /* alloc exc. frame */
253
254
255#define EXCEPTION_PROLOG_2 \
256 CLR_TOP32(r11); \
257 stw r10,_CCR(r11); /* save registers */ \
258 stw r12,GPR12(r11); \
259 stw r9,GPR9(r11); \
260 mfspr r10,SPRN_SPRG0; \
261 stw r10,GPR10(r11); \
262 mfspr r12,SPRN_SPRG1; \
263 stw r12,GPR11(r11); \
264 mflr r10; \
265 stw r10,_LINK(r11); \
266 mfspr r12,SPRN_SRR0; \
267 mfspr r9,SPRN_SRR1; \
268 stw r1,GPR1(r11); \
269 stw r1,0(r11); \
270 tovirt(r1,r11); /* set new kernel sp */ \
271 li r10,MSR_KERNEL & ~(MSR_IR|MSR_DR); /* can take exceptions */ \
272 MTMSRD(r10); /* (except for mach check in rtas) */ \
273 stw r0,GPR0(r11); \
274 lis r10,0x7265; /* put exception frame marker */ \
275 addi r10,r10,0x6773; \
276 stw r10,8(r11); \
277 SAVE_4GPRS(3, r11); \
278 SAVE_2GPRS(7, r11)
279
280/*
281 * Note: code which follows this uses cr0.eq (set if from kernel),
282 * r11, r12 (SRR0), and r9 (SRR1).
283 *
284 * Note2: once we have set r1 we are in a position to take exceptions
285 * again, and we could thus set MSR:RI at that point.
286 */
287
288/*
289 * Exception vectors.
290 */
291#define EXCEPTION(n, label, hdlr, xfer) \
292 . = n; \
293label: \
294 EXCEPTION_PROLOG; \
295 addi r3,r1,STACK_FRAME_OVERHEAD; \
296 xfer(n, hdlr)
297
298#define EXC_XFER_TEMPLATE(n, hdlr, trap, copyee, tfer, ret) \
299 li r10,trap; \
300 stw r10,_TRAP(r11); \
301 li r10,MSR_KERNEL; \
302 copyee(r10, r9); \
303 bl tfer; \
304i##n: \
305 .long hdlr; \
306 .long ret
307
308#define COPY_EE(d, s) rlwimi d,s,0,16,16
309#define NOCOPY(d, s)
310
311#define EXC_XFER_STD(n, hdlr) \
312 EXC_XFER_TEMPLATE(n, hdlr, n, NOCOPY, transfer_to_handler_full, \
313 ret_from_except_full)
314
315#define EXC_XFER_LITE(n, hdlr) \
316 EXC_XFER_TEMPLATE(n, hdlr, n+1, NOCOPY, transfer_to_handler, \
317 ret_from_except)
318
319#define EXC_XFER_EE(n, hdlr) \
320 EXC_XFER_TEMPLATE(n, hdlr, n, COPY_EE, transfer_to_handler_full, \
321 ret_from_except_full)
322
323#define EXC_XFER_EE_LITE(n, hdlr) \
324 EXC_XFER_TEMPLATE(n, hdlr, n+1, COPY_EE, transfer_to_handler, \
325 ret_from_except)
326
327/* System reset */
328/* core99 pmac starts the seconary here by changing the vector, and
329 putting it back to what it was (unknown_exception) when done. */
330#if defined(CONFIG_GEMINI) && defined(CONFIG_SMP)
331 . = 0x100
332 b __secondary_start_gemini
333#else
334 EXCEPTION(0x100, Reset, unknown_exception, EXC_XFER_STD)
335#endif
336
337/* Machine check */
338/*
339 * On CHRP, this is complicated by the fact that we could get a
340 * machine check inside RTAS, and we have no guarantee that certain
341 * critical registers will have the values we expect. The set of
342 * registers that might have bad values includes all the GPRs
343 * and all the BATs. We indicate that we are in RTAS by putting
344 * a non-zero value, the address of the exception frame to use,
345 * in SPRG2. The machine check handler checks SPRG2 and uses its
346 * value if it is non-zero. If we ever needed to free up SPRG2,
347 * we could use a field in the thread_info or thread_struct instead.
348 * (Other exception handlers assume that r1 is a valid kernel stack
349 * pointer when we take an exception from supervisor mode.)
350 * -- paulus.
351 */
352 . = 0x200
353 mtspr SPRN_SPRG0,r10
354 mtspr SPRN_SPRG1,r11
355 mfcr r10
356#ifdef CONFIG_PPC_CHRP
357 mfspr r11,SPRN_SPRG2
358 cmpwi 0,r11,0
359 bne 7f
360#endif /* CONFIG_PPC_CHRP */
361 EXCEPTION_PROLOG_1
3627: EXCEPTION_PROLOG_2
363 addi r3,r1,STACK_FRAME_OVERHEAD
364#ifdef CONFIG_PPC_CHRP
365 mfspr r4,SPRN_SPRG2
366 cmpwi cr1,r4,0
367 bne cr1,1f
368#endif
369 EXC_XFER_STD(0x200, machine_check_exception)
370#ifdef CONFIG_PPC_CHRP
3711: b machine_check_in_rtas
372#endif
373
374/* Data access exception. */
375 . = 0x300
376DataAccess:
377 EXCEPTION_PROLOG
378 mfspr r10,SPRN_DSISR
379 andis. r0,r10,0xa470 /* weird error? */
380 bne 1f /* if not, try to put a PTE */
381 mfspr r4,SPRN_DAR /* into the hash table */
382 rlwinm r3,r10,32-15,21,21 /* DSISR_STORE -> _PAGE_RW */
383 bl hash_page
3841: stw r10,_DSISR(r11)
385 mr r5,r10
386 mfspr r4,SPRN_DAR
387 EXC_XFER_EE_LITE(0x300, handle_page_fault)
388
389
390/* Instruction access exception. */
391 . = 0x400
392InstructionAccess:
393 EXCEPTION_PROLOG
394 andis. r0,r9,0x4000 /* no pte found? */
395 beq 1f /* if so, try to put a PTE */
396 li r3,0 /* into the hash table */
397 mr r4,r12 /* SRR0 is fault address */
398 bl hash_page
3991: mr r4,r12
400 mr r5,r9
401 EXC_XFER_EE_LITE(0x400, handle_page_fault)
402
403/* External interrupt */
404 EXCEPTION(0x500, HardwareInterrupt, do_IRQ, EXC_XFER_LITE)
405
406/* Alignment exception */
407 . = 0x600
408Alignment:
409 EXCEPTION_PROLOG
410 mfspr r4,SPRN_DAR
411 stw r4,_DAR(r11)
412 mfspr r5,SPRN_DSISR
413 stw r5,_DSISR(r11)
414 addi r3,r1,STACK_FRAME_OVERHEAD
415 EXC_XFER_EE(0x600, alignment_exception)
416
417/* Program check exception */
418 EXCEPTION(0x700, ProgramCheck, program_check_exception, EXC_XFER_STD)
419
420/* Floating-point unavailable */
421 . = 0x800
422FPUnavailable:
423 EXCEPTION_PROLOG
424 bne load_up_fpu /* if from user, just load it up */
425 addi r3,r1,STACK_FRAME_OVERHEAD
426 EXC_XFER_EE_LITE(0x800, kernel_fp_unavailable_exception)
427
428/* Decrementer */
429 EXCEPTION(0x900, Decrementer, timer_interrupt, EXC_XFER_LITE)
430
431 EXCEPTION(0xa00, Trap_0a, unknown_exception, EXC_XFER_EE)
432 EXCEPTION(0xb00, Trap_0b, unknown_exception, EXC_XFER_EE)
433
434/* System call */
435 . = 0xc00
436SystemCall:
437 EXCEPTION_PROLOG
438 EXC_XFER_EE_LITE(0xc00, DoSyscall)
439
440/* Single step - not used on 601 */
441 EXCEPTION(0xd00, SingleStep, single_step_exception, EXC_XFER_STD)
442 EXCEPTION(0xe00, Trap_0e, unknown_exception, EXC_XFER_EE)
443
444/*
445 * The Altivec unavailable trap is at 0x0f20. Foo.
446 * We effectively remap it to 0x3000.
447 * We include an altivec unavailable exception vector even if
448 * not configured for Altivec, so that you can't panic a
449 * non-altivec kernel running on a machine with altivec just
450 * by executing an altivec instruction.
451 */
452 . = 0xf00
453 b Trap_0f
454
455 . = 0xf20
456 b AltiVecUnavailable
457
458Trap_0f:
459 EXCEPTION_PROLOG
460 addi r3,r1,STACK_FRAME_OVERHEAD
461 EXC_XFER_EE(0xf00, unknown_exception)
462
463/*
464 * Handle TLB miss for instruction on 603/603e.
465 * Note: we get an alternate set of r0 - r3 to use automatically.
466 */
467 . = 0x1000
468InstructionTLBMiss:
469/*
470 * r0: stored ctr
471 * r1: linux style pte ( later becomes ppc hardware pte )
472 * r2: ptr to linux-style pte
473 * r3: scratch
474 */
475 mfctr r0
476 /* Get PTE (linux-style) and check access */
477 mfspr r3,SPRN_IMISS
478 lis r1,KERNELBASE@h /* check if kernel address */
479 cmplw 0,r3,r1
480 mfspr r2,SPRN_SPRG3
481 li r1,_PAGE_USER|_PAGE_PRESENT /* low addresses tested as user */
482 lwz r2,PGDIR(r2)
483 blt+ 112f
484 lis r2,swapper_pg_dir@ha /* if kernel address, use */
485 addi r2,r2,swapper_pg_dir@l /* kernel page table */
486 mfspr r1,SPRN_SRR1 /* and MSR_PR bit from SRR1 */
487 rlwinm r1,r1,32-12,29,29 /* shift MSR_PR to _PAGE_USER posn */
488112: tophys(r2,r2)
489 rlwimi r2,r3,12,20,29 /* insert top 10 bits of address */
490 lwz r2,0(r2) /* get pmd entry */
491 rlwinm. r2,r2,0,0,19 /* extract address of pte page */
492 beq- InstructionAddressInvalid /* return if no mapping */
493 rlwimi r2,r3,22,20,29 /* insert next 10 bits of address */
494 lwz r3,0(r2) /* get linux-style pte */
495 andc. r1,r1,r3 /* check access & ~permission */
496 bne- InstructionAddressInvalid /* return if access not permitted */
497 ori r3,r3,_PAGE_ACCESSED /* set _PAGE_ACCESSED in pte */
498 /*
499 * NOTE! We are assuming this is not an SMP system, otherwise
500 * we would need to update the pte atomically with lwarx/stwcx.
501 */
502 stw r3,0(r2) /* update PTE (accessed bit) */
503 /* Convert linux-style PTE to low word of PPC-style PTE */
504 rlwinm r1,r3,32-10,31,31 /* _PAGE_RW -> PP lsb */
505 rlwinm r2,r3,32-7,31,31 /* _PAGE_DIRTY -> PP lsb */
506 and r1,r1,r2 /* writable if _RW and _DIRTY */
507 rlwimi r3,r3,32-1,30,30 /* _PAGE_USER -> PP msb */
508 rlwimi r3,r3,32-1,31,31 /* _PAGE_USER -> PP lsb */
509 ori r1,r1,0xe14 /* clear out reserved bits and M */
510 andc r1,r3,r1 /* PP = user? (rw&dirty? 2: 3): 0 */
511 mtspr SPRN_RPA,r1
512 mfspr r3,SPRN_IMISS
513 tlbli r3
514 mfspr r3,SPRN_SRR1 /* Need to restore CR0 */
515 mtcrf 0x80,r3
516 rfi
517InstructionAddressInvalid:
518 mfspr r3,SPRN_SRR1
519 rlwinm r1,r3,9,6,6 /* Get load/store bit */
520
521 addis r1,r1,0x2000
522 mtspr SPRN_DSISR,r1 /* (shouldn't be needed) */
523 mtctr r0 /* Restore CTR */
524 andi. r2,r3,0xFFFF /* Clear upper bits of SRR1 */
525 or r2,r2,r1
526 mtspr SPRN_SRR1,r2
527 mfspr r1,SPRN_IMISS /* Get failing address */
528 rlwinm. r2,r2,0,31,31 /* Check for little endian access */
529 rlwimi r2,r2,1,30,30 /* change 1 -> 3 */
530 xor r1,r1,r2
531 mtspr SPRN_DAR,r1 /* Set fault address */
532 mfmsr r0 /* Restore "normal" registers */
533 xoris r0,r0,MSR_TGPR>>16
534 mtcrf 0x80,r3 /* Restore CR0 */
535 mtmsr r0
536 b InstructionAccess
537
538/*
539 * Handle TLB miss for DATA Load operation on 603/603e
540 */
541 . = 0x1100
542DataLoadTLBMiss:
543/*
544 * r0: stored ctr
545 * r1: linux style pte ( later becomes ppc hardware pte )
546 * r2: ptr to linux-style pte
547 * r3: scratch
548 */
549 mfctr r0
550 /* Get PTE (linux-style) and check access */
551 mfspr r3,SPRN_DMISS
552 lis r1,KERNELBASE@h /* check if kernel address */
553 cmplw 0,r3,r1
554 mfspr r2,SPRN_SPRG3
555 li r1,_PAGE_USER|_PAGE_PRESENT /* low addresses tested as user */
556 lwz r2,PGDIR(r2)
557 blt+ 112f
558 lis r2,swapper_pg_dir@ha /* if kernel address, use */
559 addi r2,r2,swapper_pg_dir@l /* kernel page table */
560 mfspr r1,SPRN_SRR1 /* and MSR_PR bit from SRR1 */
561 rlwinm r1,r1,32-12,29,29 /* shift MSR_PR to _PAGE_USER posn */
562112: tophys(r2,r2)
563 rlwimi r2,r3,12,20,29 /* insert top 10 bits of address */
564 lwz r2,0(r2) /* get pmd entry */
565 rlwinm. r2,r2,0,0,19 /* extract address of pte page */
566 beq- DataAddressInvalid /* return if no mapping */
567 rlwimi r2,r3,22,20,29 /* insert next 10 bits of address */
568 lwz r3,0(r2) /* get linux-style pte */
569 andc. r1,r1,r3 /* check access & ~permission */
570 bne- DataAddressInvalid /* return if access not permitted */
571 ori r3,r3,_PAGE_ACCESSED /* set _PAGE_ACCESSED in pte */
572 /*
573 * NOTE! We are assuming this is not an SMP system, otherwise
574 * we would need to update the pte atomically with lwarx/stwcx.
575 */
576 stw r3,0(r2) /* update PTE (accessed bit) */
577 /* Convert linux-style PTE to low word of PPC-style PTE */
578 rlwinm r1,r3,32-10,31,31 /* _PAGE_RW -> PP lsb */
579 rlwinm r2,r3,32-7,31,31 /* _PAGE_DIRTY -> PP lsb */
580 and r1,r1,r2 /* writable if _RW and _DIRTY */
581 rlwimi r3,r3,32-1,30,30 /* _PAGE_USER -> PP msb */
582 rlwimi r3,r3,32-1,31,31 /* _PAGE_USER -> PP lsb */
583 ori r1,r1,0xe14 /* clear out reserved bits and M */
584 andc r1,r3,r1 /* PP = user? (rw&dirty? 2: 3): 0 */
585 mtspr SPRN_RPA,r1
586 mfspr r3,SPRN_DMISS
587 tlbld r3
588 mfspr r3,SPRN_SRR1 /* Need to restore CR0 */
589 mtcrf 0x80,r3
590 rfi
591DataAddressInvalid:
592 mfspr r3,SPRN_SRR1
593 rlwinm r1,r3,9,6,6 /* Get load/store bit */
594 addis r1,r1,0x2000
595 mtspr SPRN_DSISR,r1
596 mtctr r0 /* Restore CTR */
597 andi. r2,r3,0xFFFF /* Clear upper bits of SRR1 */
598 mtspr SPRN_SRR1,r2
599 mfspr r1,SPRN_DMISS /* Get failing address */
600 rlwinm. r2,r2,0,31,31 /* Check for little endian access */
601 beq 20f /* Jump if big endian */
602 xori r1,r1,3
60320: mtspr SPRN_DAR,r1 /* Set fault address */
604 mfmsr r0 /* Restore "normal" registers */
605 xoris r0,r0,MSR_TGPR>>16
606 mtcrf 0x80,r3 /* Restore CR0 */
607 mtmsr r0
608 b DataAccess
609
610/*
611 * Handle TLB miss for DATA Store on 603/603e
612 */
613 . = 0x1200
614DataStoreTLBMiss:
615/*
616 * r0: stored ctr
617 * r1: linux style pte ( later becomes ppc hardware pte )
618 * r2: ptr to linux-style pte
619 * r3: scratch
620 */
621 mfctr r0
622 /* Get PTE (linux-style) and check access */
623 mfspr r3,SPRN_DMISS
624 lis r1,KERNELBASE@h /* check if kernel address */
625 cmplw 0,r3,r1
626 mfspr r2,SPRN_SPRG3
627 li r1,_PAGE_RW|_PAGE_USER|_PAGE_PRESENT /* access flags */
628 lwz r2,PGDIR(r2)
629 blt+ 112f
630 lis r2,swapper_pg_dir@ha /* if kernel address, use */
631 addi r2,r2,swapper_pg_dir@l /* kernel page table */
632 mfspr r1,SPRN_SRR1 /* and MSR_PR bit from SRR1 */
633 rlwinm r1,r1,32-12,29,29 /* shift MSR_PR to _PAGE_USER posn */
634112: tophys(r2,r2)
635 rlwimi r2,r3,12,20,29 /* insert top 10 bits of address */
636 lwz r2,0(r2) /* get pmd entry */
637 rlwinm. r2,r2,0,0,19 /* extract address of pte page */
638 beq- DataAddressInvalid /* return if no mapping */
639 rlwimi r2,r3,22,20,29 /* insert next 10 bits of address */
640 lwz r3,0(r2) /* get linux-style pte */
641 andc. r1,r1,r3 /* check access & ~permission */
642 bne- DataAddressInvalid /* return if access not permitted */
643 ori r3,r3,_PAGE_ACCESSED|_PAGE_DIRTY
644 /*
645 * NOTE! We are assuming this is not an SMP system, otherwise
646 * we would need to update the pte atomically with lwarx/stwcx.
647 */
648 stw r3,0(r2) /* update PTE (accessed/dirty bits) */
649 /* Convert linux-style PTE to low word of PPC-style PTE */
650 rlwimi r3,r3,32-1,30,30 /* _PAGE_USER -> PP msb */
651 li r1,0xe15 /* clear out reserved bits and M */
652 andc r1,r3,r1 /* PP = user? 2: 0 */
653 mtspr SPRN_RPA,r1
654 mfspr r3,SPRN_DMISS
655 tlbld r3
656 mfspr r3,SPRN_SRR1 /* Need to restore CR0 */
657 mtcrf 0x80,r3
658 rfi
659
660#ifndef CONFIG_ALTIVEC
661#define altivec_assist_exception unknown_exception
662#endif
663
664 EXCEPTION(0x1300, Trap_13, instruction_breakpoint_exception, EXC_XFER_EE)
665 EXCEPTION(0x1400, SMI, SMIException, EXC_XFER_EE)
666 EXCEPTION(0x1500, Trap_15, unknown_exception, EXC_XFER_EE)
667 EXCEPTION(0x1600, Trap_16, altivec_assist_exception, EXC_XFER_EE)
668 EXCEPTION(0x1700, Trap_17, TAUException, EXC_XFER_STD)
669 EXCEPTION(0x1800, Trap_18, unknown_exception, EXC_XFER_EE)
670 EXCEPTION(0x1900, Trap_19, unknown_exception, EXC_XFER_EE)
671 EXCEPTION(0x1a00, Trap_1a, unknown_exception, EXC_XFER_EE)
672 EXCEPTION(0x1b00, Trap_1b, unknown_exception, EXC_XFER_EE)
673 EXCEPTION(0x1c00, Trap_1c, unknown_exception, EXC_XFER_EE)
674 EXCEPTION(0x1d00, Trap_1d, unknown_exception, EXC_XFER_EE)
675 EXCEPTION(0x1e00, Trap_1e, unknown_exception, EXC_XFER_EE)
676 EXCEPTION(0x1f00, Trap_1f, unknown_exception, EXC_XFER_EE)
677 EXCEPTION(0x2000, RunMode, RunModeException, EXC_XFER_EE)
678 EXCEPTION(0x2100, Trap_21, unknown_exception, EXC_XFER_EE)
679 EXCEPTION(0x2200, Trap_22, unknown_exception, EXC_XFER_EE)
680 EXCEPTION(0x2300, Trap_23, unknown_exception, EXC_XFER_EE)
681 EXCEPTION(0x2400, Trap_24, unknown_exception, EXC_XFER_EE)
682 EXCEPTION(0x2500, Trap_25, unknown_exception, EXC_XFER_EE)
683 EXCEPTION(0x2600, Trap_26, unknown_exception, EXC_XFER_EE)
684 EXCEPTION(0x2700, Trap_27, unknown_exception, EXC_XFER_EE)
685 EXCEPTION(0x2800, Trap_28, unknown_exception, EXC_XFER_EE)
686 EXCEPTION(0x2900, Trap_29, unknown_exception, EXC_XFER_EE)
687 EXCEPTION(0x2a00, Trap_2a, unknown_exception, EXC_XFER_EE)
688 EXCEPTION(0x2b00, Trap_2b, unknown_exception, EXC_XFER_EE)
689 EXCEPTION(0x2c00, Trap_2c, unknown_exception, EXC_XFER_EE)
690 EXCEPTION(0x2d00, Trap_2d, unknown_exception, EXC_XFER_EE)
691 EXCEPTION(0x2e00, Trap_2e, unknown_exception, EXC_XFER_EE)
692 EXCEPTION(0x2f00, MOLTrampoline, unknown_exception, EXC_XFER_EE_LITE)
693
694 .globl mol_trampoline
695 .set mol_trampoline, i0x2f00
696
697 . = 0x3000
698
699AltiVecUnavailable:
700 EXCEPTION_PROLOG
701#ifdef CONFIG_ALTIVEC
702 bne load_up_altivec /* if from user, just load it up */
703#endif /* CONFIG_ALTIVEC */
704 EXC_XFER_EE_LITE(0xf20, altivec_unavailable_exception)
705
706#ifdef CONFIG_ALTIVEC
707/* Note that the AltiVec support is closely modeled after the FP
708 * support. Changes to one are likely to be applicable to the
709 * other! */
710load_up_altivec:
711/*
712 * Disable AltiVec for the task which had AltiVec previously,
713 * and save its AltiVec registers in its thread_struct.
714 * Enables AltiVec for use in the kernel on return.
715 * On SMP we know the AltiVec units are free, since we give it up every
716 * switch. -- Kumar
717 */
718 mfmsr r5
719 oris r5,r5,MSR_VEC@h
720 MTMSRD(r5) /* enable use of AltiVec now */
721 isync
722/*
723 * For SMP, we don't do lazy AltiVec switching because it just gets too
724 * horrendously complex, especially when a task switches from one CPU
725 * to another. Instead we call giveup_altivec in switch_to.
726 */
727#ifndef CONFIG_SMP
728 tophys(r6,0)
729 addis r3,r6,last_task_used_altivec@ha
730 lwz r4,last_task_used_altivec@l(r3)
731 cmpwi 0,r4,0
732 beq 1f
733 add r4,r4,r6
734 addi r4,r4,THREAD /* want THREAD of last_task_used_altivec */
735 SAVE_32VRS(0,r10,r4)
736 mfvscr vr0
737 li r10,THREAD_VSCR
738 stvx vr0,r10,r4
739 lwz r5,PT_REGS(r4)
740 add r5,r5,r6
741 lwz r4,_MSR-STACK_FRAME_OVERHEAD(r5)
742 lis r10,MSR_VEC@h
743 andc r4,r4,r10 /* disable altivec for previous task */
744 stw r4,_MSR-STACK_FRAME_OVERHEAD(r5)
7451:
746#endif /* CONFIG_SMP */
747 /* enable use of AltiVec after return */
748 oris r9,r9,MSR_VEC@h
749 mfspr r5,SPRN_SPRG3 /* current task's THREAD (phys) */
750 li r4,1
751 li r10,THREAD_VSCR
752 stw r4,THREAD_USED_VR(r5)
753 lvx vr0,r10,r5
754 mtvscr vr0
755 REST_32VRS(0,r10,r5)
756#ifndef CONFIG_SMP
757 subi r4,r5,THREAD
758 sub r4,r4,r6
759 stw r4,last_task_used_altivec@l(r3)
760#endif /* CONFIG_SMP */
761 /* restore registers and return */
762 /* we haven't used ctr or xer or lr */
763 b fast_exception_return
764
765/*
766 * AltiVec unavailable trap from kernel - print a message, but let
767 * the task use AltiVec in the kernel until it returns to user mode.
768 */
769KernelAltiVec:
770 lwz r3,_MSR(r1)
771 oris r3,r3,MSR_VEC@h
772 stw r3,_MSR(r1) /* enable use of AltiVec after return */
773 lis r3,87f@h
774 ori r3,r3,87f@l
775 mr r4,r2 /* current */
776 lwz r5,_NIP(r1)
777 bl printk
778 b ret_from_except
77987: .string "AltiVec used in kernel (task=%p, pc=%x) \n"
780 .align 4,0
781
782/*
783 * giveup_altivec(tsk)
784 * Disable AltiVec for the task given as the argument,
785 * and save the AltiVec registers in its thread_struct.
786 * Enables AltiVec for use in the kernel on return.
787 */
788
789 .globl giveup_altivec
790giveup_altivec:
791 mfmsr r5
792 oris r5,r5,MSR_VEC@h
793 SYNC
794 MTMSRD(r5) /* enable use of AltiVec now */
795 isync
796 cmpwi 0,r3,0
797 beqlr- /* if no previous owner, done */
798 addi r3,r3,THREAD /* want THREAD of task */
799 lwz r5,PT_REGS(r3)
800 cmpwi 0,r5,0
801 SAVE_32VRS(0, r4, r3)
802 mfvscr vr0
803 li r4,THREAD_VSCR
804 stvx vr0,r4,r3
805 beq 1f
806 lwz r4,_MSR-STACK_FRAME_OVERHEAD(r5)
807 lis r3,MSR_VEC@h
808 andc r4,r4,r3 /* disable AltiVec for previous task */
809 stw r4,_MSR-STACK_FRAME_OVERHEAD(r5)
8101:
811#ifndef CONFIG_SMP
812 li r5,0
813 lis r4,last_task_used_altivec@ha
814 stw r5,last_task_used_altivec@l(r4)
815#endif /* CONFIG_SMP */
816 blr
817#endif /* CONFIG_ALTIVEC */
818
819/*
820 * This code is jumped to from the startup code to copy
821 * the kernel image to physical address 0.
822 */
823relocate_kernel:
824 addis r9,r26,klimit@ha /* fetch klimit */
825 lwz r25,klimit@l(r9)
826 addis r25,r25,-KERNELBASE@h
827 li r3,0 /* Destination base address */
828 li r6,0 /* Destination offset */
829 li r5,0x4000 /* # bytes of memory to copy */
830 bl copy_and_flush /* copy the first 0x4000 bytes */
831 addi r0,r3,4f@l /* jump to the address of 4f */
832 mtctr r0 /* in copy and do the rest. */
833 bctr /* jump to the copy */
8344: mr r5,r25
835 bl copy_and_flush /* copy the rest */
836 b turn_on_mmu
837
838/*
839 * Copy routine used to copy the kernel to start at physical address 0
840 * and flush and invalidate the caches as needed.
841 * r3 = dest addr, r4 = source addr, r5 = copy limit, r6 = start offset
842 * on exit, r3, r4, r5 are unchanged, r6 is updated to be >= r5.
843 */
844_GLOBAL(copy_and_flush)
845 addi r5,r5,-4
846 addi r6,r6,-4
8474: li r0,L1_CACHE_BYTES/4
848 mtctr r0
8493: addi r6,r6,4 /* copy a cache line */
850 lwzx r0,r6,r4
851 stwx r0,r6,r3
852 bdnz 3b
853 dcbst r6,r3 /* write it to memory */
854 sync
855 icbi r6,r3 /* flush the icache line */
856 cmplw 0,r6,r5
857 blt 4b
858 sync /* additional sync needed on g4 */
859 isync
860 addi r5,r5,4
861 addi r6,r6,4
862 blr
863
864#ifdef CONFIG_APUS
865/*
866 * On APUS the physical base address of the kernel is not known at compile
867 * time, which means the __pa/__va constants used are incorrect. In the
868 * __init section is recorded the virtual addresses of instructions using
869 * these constants, so all that has to be done is fix these before
870 * continuing the kernel boot.
871 *
872 * r4 = The physical address of the kernel base.
873 */
874fix_mem_constants:
875 mr r10,r4
876 addis r10,r10,-KERNELBASE@h /* virt_to_phys constant */
877 neg r11,r10 /* phys_to_virt constant */
878
879 lis r12,__vtop_table_begin@h
880 ori r12,r12,__vtop_table_begin@l
881 add r12,r12,r10 /* table begin phys address */
882 lis r13,__vtop_table_end@h
883 ori r13,r13,__vtop_table_end@l
884 add r13,r13,r10 /* table end phys address */
885 subi r12,r12,4
886 subi r13,r13,4
8871: lwzu r14,4(r12) /* virt address of instruction */
888 add r14,r14,r10 /* phys address of instruction */
889 lwz r15,0(r14) /* instruction, now insert top */
890 rlwimi r15,r10,16,16,31 /* half of vp const in low half */
891 stw r15,0(r14) /* of instruction and restore. */
892 dcbst r0,r14 /* write it to memory */
893 sync
894 icbi r0,r14 /* flush the icache line */
895 cmpw r12,r13
896 bne 1b
897 sync /* additional sync needed on g4 */
898 isync
899
900/*
901 * Map the memory where the exception handlers will
902 * be copied to when hash constants have been patched.
903 */
904#ifdef CONFIG_APUS_FAST_EXCEPT
905 lis r8,0xfff0
906#else
907 lis r8,0
908#endif
909 ori r8,r8,0x2 /* 128KB, supervisor */
910 mtspr SPRN_DBAT3U,r8
911 mtspr SPRN_DBAT3L,r8
912
913 lis r12,__ptov_table_begin@h
914 ori r12,r12,__ptov_table_begin@l
915 add r12,r12,r10 /* table begin phys address */
916 lis r13,__ptov_table_end@h
917 ori r13,r13,__ptov_table_end@l
918 add r13,r13,r10 /* table end phys address */
919 subi r12,r12,4
920 subi r13,r13,4
9211: lwzu r14,4(r12) /* virt address of instruction */
922 add r14,r14,r10 /* phys address of instruction */
923 lwz r15,0(r14) /* instruction, now insert top */
924 rlwimi r15,r11,16,16,31 /* half of pv const in low half*/
925 stw r15,0(r14) /* of instruction and restore. */
926 dcbst r0,r14 /* write it to memory */
927 sync
928 icbi r0,r14 /* flush the icache line */
929 cmpw r12,r13
930 bne 1b
931
932 sync /* additional sync needed on g4 */
933 isync /* No speculative loading until now */
934 blr
935
936/***********************************************************************
937 * Please note that on APUS the exception handlers are located at the
938 * physical address 0xfff0000. For this reason, the exception handlers
939 * cannot use relative branches to access the code below.
940 ***********************************************************************/
941#endif /* CONFIG_APUS */
942
943#ifdef CONFIG_SMP
944#ifdef CONFIG_GEMINI
945 .globl __secondary_start_gemini
946__secondary_start_gemini:
947 mfspr r4,SPRN_HID0
948 ori r4,r4,HID0_ICFI
949 li r3,0
950 ori r3,r3,HID0_ICE
951 andc r4,r4,r3
952 mtspr SPRN_HID0,r4
953 sync
954 b __secondary_start
955#endif /* CONFIG_GEMINI */
956
957 .globl __secondary_start_pmac_0
958__secondary_start_pmac_0:
959 /* NB the entries for cpus 0, 1, 2 must each occupy 8 bytes. */
960 li r24,0
961 b 1f
962 li r24,1
963 b 1f
964 li r24,2
965 b 1f
966 li r24,3
9671:
968 /* on powersurge, we come in here with IR=0 and DR=1, and DBAT 0
969 set to map the 0xf0000000 - 0xffffffff region */
970 mfmsr r0
971 rlwinm r0,r0,0,28,26 /* clear DR (0x10) */
972 SYNC
973 mtmsr r0
974 isync
975
976 .globl __secondary_start
977__secondary_start:
978 /* Copy some CPU settings from CPU 0 */
979 bl __restore_cpu_setup
980
981 lis r3,-KERNELBASE@h
982 mr r4,r24
983 bl call_setup_cpu /* Call setup_cpu for this CPU */
984#ifdef CONFIG_6xx
985 lis r3,-KERNELBASE@h
986 bl init_idle_6xx
987#endif /* CONFIG_6xx */
988
989 /* get current_thread_info and current */
990 lis r1,secondary_ti@ha
991 tophys(r1,r1)
992 lwz r1,secondary_ti@l(r1)
993 tophys(r2,r1)
994 lwz r2,TI_TASK(r2)
995
996 /* stack */
997 addi r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD
998 li r0,0
999 tophys(r3,r1)
1000 stw r0,0(r3)
1001
1002 /* load up the MMU */
1003 bl load_up_mmu
1004
1005 /* ptr to phys current thread */
1006 tophys(r4,r2)
1007 addi r4,r4,THREAD /* phys address of our thread_struct */
1008 CLR_TOP32(r4)
1009 mtspr SPRN_SPRG3,r4
1010 li r3,0
1011 mtspr SPRN_SPRG2,r3 /* 0 => not in RTAS */
1012
1013 /* enable MMU and jump to start_secondary */
1014 li r4,MSR_KERNEL
1015 FIX_SRR1(r4,r5)
1016 lis r3,start_secondary@h
1017 ori r3,r3,start_secondary@l
1018 mtspr SPRN_SRR0,r3
1019 mtspr SPRN_SRR1,r4
1020 SYNC
1021 RFI
1022#endif /* CONFIG_SMP */
1023
1024/*
1025 * Those generic dummy functions are kept for CPUs not
1026 * included in CONFIG_6xx
1027 */
1028#if !defined(CONFIG_6xx)
1029_GLOBAL(__save_cpu_setup)
1030 blr
1031_GLOBAL(__restore_cpu_setup)
1032 blr
1033#endif /* !defined(CONFIG_6xx) */
1034
1035
1036/*
1037 * Load stuff into the MMU. Intended to be called with
1038 * IR=0 and DR=0.
1039 */
1040load_up_mmu:
1041 sync /* Force all PTE updates to finish */
1042 isync
1043 tlbia /* Clear all TLB entries */
1044 sync /* wait for tlbia/tlbie to finish */
1045 TLBSYNC /* ... on all CPUs */
1046 /* Load the SDR1 register (hash table base & size) */
1047 lis r6,_SDR1@ha
1048 tophys(r6,r6)
1049 lwz r6,_SDR1@l(r6)
1050 mtspr SPRN_SDR1,r6
1051 li r0,16 /* load up segment register values */
1052 mtctr r0 /* for context 0 */
1053 lis r3,0x2000 /* Ku = 1, VSID = 0 */
1054 li r4,0
10553: mtsrin r3,r4
1056 addi r3,r3,0x111 /* increment VSID */
1057 addis r4,r4,0x1000 /* address of next segment */
1058 bdnz 3b
1059
1060/* Load the BAT registers with the values set up by MMU_init.
1061 MMU_init takes care of whether we're on a 601 or not. */
1062 mfpvr r3
1063 srwi r3,r3,16
1064 cmpwi r3,1
1065 lis r3,BATS@ha
1066 addi r3,r3,BATS@l
1067 tophys(r3,r3)
1068 LOAD_BAT(0,r3,r4,r5)
1069 LOAD_BAT(1,r3,r4,r5)
1070 LOAD_BAT(2,r3,r4,r5)
1071 LOAD_BAT(3,r3,r4,r5)
1072
1073 blr
1074
1075/*
1076 * This is where the main kernel code starts.
1077 */
1078start_here:
1079 /* ptr to current */
1080 lis r2,init_task@h
1081 ori r2,r2,init_task@l
1082 /* Set up for using our exception vectors */
1083 /* ptr to phys current thread */
1084 tophys(r4,r2)
1085 addi r4,r4,THREAD /* init task's THREAD */
1086 CLR_TOP32(r4)
1087 mtspr SPRN_SPRG3,r4
1088 li r3,0
1089 mtspr SPRN_SPRG2,r3 /* 0 => not in RTAS */
1090
1091 /* stack */
1092 lis r1,init_thread_union@ha
1093 addi r1,r1,init_thread_union@l
1094 li r0,0
1095 stwu r0,THREAD_SIZE-STACK_FRAME_OVERHEAD(r1)
1096/*
1097 * Do early platform-specific initialization,
1098 * and set up the MMU.
1099 */
1100 mr r3,r31
1101 mr r4,r30
1102 bl machine_init
1103 bl MMU_init
1104
1105#ifdef CONFIG_APUS
1106 /* Copy exception code to exception vector base on APUS. */
1107 lis r4,KERNELBASE@h
1108#ifdef CONFIG_APUS_FAST_EXCEPT
1109 lis r3,0xfff0 /* Copy to 0xfff00000 */
1110#else
1111 lis r3,0 /* Copy to 0x00000000 */
1112#endif
1113 li r5,0x4000 /* # bytes of memory to copy */
1114 li r6,0
1115 bl copy_and_flush /* copy the first 0x4000 bytes */
1116#endif /* CONFIG_APUS */
1117
1118/*
1119 * Go back to running unmapped so we can load up new values
1120 * for SDR1 (hash table pointer) and the segment registers
1121 * and change to using our exception vectors.
1122 */
1123 lis r4,2f@h
1124 ori r4,r4,2f@l
1125 tophys(r4,r4)
1126 li r3,MSR_KERNEL & ~(MSR_IR|MSR_DR)
1127 FIX_SRR1(r3,r5)
1128 mtspr SPRN_SRR0,r4
1129 mtspr SPRN_SRR1,r3
1130 SYNC
1131 RFI
1132/* Load up the kernel context */
11332: bl load_up_mmu
1134
1135#ifdef CONFIG_BDI_SWITCH
1136 /* Add helper information for the Abatron bdiGDB debugger.
1137 * We do this here because we know the mmu is disabled, and
1138 * will be enabled for real in just a few instructions.
1139 */
1140 lis r5, abatron_pteptrs@h
1141 ori r5, r5, abatron_pteptrs@l
1142 stw r5, 0xf0(r0) /* This much match your Abatron config */
1143 lis r6, swapper_pg_dir@h
1144 ori r6, r6, swapper_pg_dir@l
1145 tophys(r5, r5)
1146 stw r6, 0(r5)
1147#endif /* CONFIG_BDI_SWITCH */
1148
1149/* Now turn on the MMU for real! */
1150 li r4,MSR_KERNEL
1151 FIX_SRR1(r4,r5)
1152 lis r3,start_kernel@h
1153 ori r3,r3,start_kernel@l
1154 mtspr SPRN_SRR0,r3
1155 mtspr SPRN_SRR1,r4
1156 SYNC
1157 RFI
1158
1159/*
1160 * Set up the segment registers for a new context.
1161 */
1162_GLOBAL(set_context)
1163 mulli r3,r3,897 /* multiply context by skew factor */
1164 rlwinm r3,r3,4,8,27 /* VSID = (context & 0xfffff) << 4 */
1165 addis r3,r3,0x6000 /* Set Ks, Ku bits */
1166 li r0,NUM_USER_SEGMENTS
1167 mtctr r0
1168
1169#ifdef CONFIG_BDI_SWITCH
1170 /* Context switch the PTE pointer for the Abatron BDI2000.
1171 * The PGDIR is passed as second argument.
1172 */
1173 lis r5, KERNELBASE@h
1174 lwz r5, 0xf0(r5)
1175 stw r4, 0x4(r5)
1176#endif
1177 li r4,0
1178 isync
11793:
1180 mtsrin r3,r4
1181 addi r3,r3,0x111 /* next VSID */
1182 rlwinm r3,r3,0,8,3 /* clear out any overflow from VSID field */
1183 addis r4,r4,0x1000 /* address of next segment */
1184 bdnz 3b
1185 sync
1186 isync
1187 blr
1188
1189/*
1190 * An undocumented "feature" of 604e requires that the v bit
1191 * be cleared before changing BAT values.
1192 *
1193 * Also, newer IBM firmware does not clear bat3 and 4 so
1194 * this makes sure it's done.
1195 * -- Cort
1196 */
1197clear_bats:
1198 li r10,0
1199 mfspr r9,SPRN_PVR
1200 rlwinm r9,r9,16,16,31 /* r9 = 1 for 601, 4 for 604 */
1201 cmpwi r9, 1
1202 beq 1f
1203
1204 mtspr SPRN_DBAT0U,r10
1205 mtspr SPRN_DBAT0L,r10
1206 mtspr SPRN_DBAT1U,r10
1207 mtspr SPRN_DBAT1L,r10
1208 mtspr SPRN_DBAT2U,r10
1209 mtspr SPRN_DBAT2L,r10
1210 mtspr SPRN_DBAT3U,r10
1211 mtspr SPRN_DBAT3L,r10
12121:
1213 mtspr SPRN_IBAT0U,r10
1214 mtspr SPRN_IBAT0L,r10
1215 mtspr SPRN_IBAT1U,r10
1216 mtspr SPRN_IBAT1L,r10
1217 mtspr SPRN_IBAT2U,r10
1218 mtspr SPRN_IBAT2L,r10
1219 mtspr SPRN_IBAT3U,r10
1220 mtspr SPRN_IBAT3L,r10
1221BEGIN_FTR_SECTION
1222 /* Here's a tweak: at this point, CPU setup have
1223 * not been called yet, so HIGH_BAT_EN may not be
1224 * set in HID0 for the 745x processors. However, it
1225 * seems that doesn't affect our ability to actually
1226 * write to these SPRs.
1227 */
1228 mtspr SPRN_DBAT4U,r10
1229 mtspr SPRN_DBAT4L,r10
1230 mtspr SPRN_DBAT5U,r10
1231 mtspr SPRN_DBAT5L,r10
1232 mtspr SPRN_DBAT6U,r10
1233 mtspr SPRN_DBAT6L,r10
1234 mtspr SPRN_DBAT7U,r10
1235 mtspr SPRN_DBAT7L,r10
1236 mtspr SPRN_IBAT4U,r10
1237 mtspr SPRN_IBAT4L,r10
1238 mtspr SPRN_IBAT5U,r10
1239 mtspr SPRN_IBAT5L,r10
1240 mtspr SPRN_IBAT6U,r10
1241 mtspr SPRN_IBAT6L,r10
1242 mtspr SPRN_IBAT7U,r10
1243 mtspr SPRN_IBAT7L,r10
1244END_FTR_SECTION_IFSET(CPU_FTR_HAS_HIGH_BATS)
1245 blr
1246
1247flush_tlbs:
1248 lis r10, 0x40
12491: addic. r10, r10, -0x1000
1250 tlbie r10
1251 blt 1b
1252 sync
1253 blr
1254
1255mmu_off:
1256 addi r4, r3, __after_mmu_off - _start
1257 mfmsr r3
1258 andi. r0,r3,MSR_DR|MSR_IR /* MMU enabled? */
1259 beqlr
1260 andc r3,r3,r0
1261 mtspr SPRN_SRR0,r4
1262 mtspr SPRN_SRR1,r3
1263 sync
1264 RFI
1265
1266/*
1267 * Use the first pair of BAT registers to map the 1st 16MB
1268 * of RAM to KERNELBASE. From this point on we can't safely
1269 * call OF any more.
1270 */
1271initial_bats:
1272 lis r11,KERNELBASE@h
1273 mfspr r9,SPRN_PVR
1274 rlwinm r9,r9,16,16,31 /* r9 = 1 for 601, 4 for 604 */
1275 cmpwi 0,r9,1
1276 bne 4f
1277 ori r11,r11,4 /* set up BAT registers for 601 */
1278 li r8,0x7f /* valid, block length = 8MB */
1279 oris r9,r11,0x800000@h /* set up BAT reg for 2nd 8M */
1280 oris r10,r8,0x800000@h /* set up BAT reg for 2nd 8M */
1281 mtspr SPRN_IBAT0U,r11 /* N.B. 601 has valid bit in */
1282 mtspr SPRN_IBAT0L,r8 /* lower BAT register */
1283 mtspr SPRN_IBAT1U,r9
1284 mtspr SPRN_IBAT1L,r10
1285 isync
1286 blr
1287
12884: tophys(r8,r11)
1289#ifdef CONFIG_SMP
1290 ori r8,r8,0x12 /* R/W access, M=1 */
1291#else
1292 ori r8,r8,2 /* R/W access */
1293#endif /* CONFIG_SMP */
1294#ifdef CONFIG_APUS
1295 ori r11,r11,BL_8M<<2|0x2 /* set up 8MB BAT registers for 604 */
1296#else
1297 ori r11,r11,BL_256M<<2|0x2 /* set up BAT registers for 604 */
1298#endif /* CONFIG_APUS */
1299
1300 mtspr SPRN_DBAT0L,r8 /* N.B. 6xx (not 601) have valid */
1301 mtspr SPRN_DBAT0U,r11 /* bit in upper BAT register */
1302 mtspr SPRN_IBAT0L,r8
1303 mtspr SPRN_IBAT0U,r11
1304 isync
1305 blr
1306
1307
1308#ifdef CONFIG_8260
1309/* Jump into the system reset for the rom.
1310 * We first disable the MMU, and then jump to the ROM reset address.
1311 *
1312 * r3 is the board info structure, r4 is the location for starting.
1313 * I use this for building a small kernel that can load other kernels,
1314 * rather than trying to write or rely on a rom monitor that can tftp load.
1315 */
1316 .globl m8260_gorom
1317m8260_gorom:
1318 mfmsr r0
1319 rlwinm r0,r0,0,17,15 /* clear MSR_EE in r0 */
1320 sync
1321 mtmsr r0
1322 sync
1323 mfspr r11, SPRN_HID0
1324 lis r10, 0
1325 ori r10,r10,HID0_ICE|HID0_DCE
1326 andc r11, r11, r10
1327 mtspr SPRN_HID0, r11
1328 isync
1329 li r5, MSR_ME|MSR_RI
1330 lis r6,2f@h
1331 addis r6,r6,-KERNELBASE@h
1332 ori r6,r6,2f@l
1333 mtspr SPRN_SRR0,r6
1334 mtspr SPRN_SRR1,r5
1335 isync
1336 sync
1337 rfi
13382:
1339 mtlr r4
1340 blr
1341#endif
1342
1343
1344/*
1345 * We put a few things here that have to be page-aligned.
1346 * This stuff goes at the beginning of the data segment,
1347 * which is page-aligned.
1348 */
1349 .data
1350 .globl sdata
1351sdata:
1352 .globl empty_zero_page
1353empty_zero_page:
1354 .space 4096
1355
1356 .globl swapper_pg_dir
1357swapper_pg_dir:
1358 .space 4096
1359
1360/*
1361 * This space gets a copy of optional info passed to us by the bootstrap
1362 * Used to pass parameters into the kernel like root=/dev/sda1, etc.
1363 */
1364 .globl cmd_line
1365cmd_line:
1366 .space 512
1367
1368 .globl intercept_table
1369intercept_table:
1370 .long 0, 0, i0x200, i0x300, i0x400, 0, i0x600, i0x700
1371 .long i0x800, 0, 0, 0, 0, i0xd00, 0, 0
1372 .long 0, 0, 0, i0x1300, 0, 0, 0, 0
1373 .long 0, 0, 0, 0, 0, 0, 0, 0
1374 .long 0, 0, 0, 0, 0, 0, 0, 0
1375 .long 0, 0, 0, 0, 0, 0, 0, 0
1376
1377/* Room for two PTE pointers, usually the kernel and current user pointers
1378 * to their respective root page table.
1379 */
1380abatron_pteptrs:
1381 .space 8
diff --git a/arch/powerpc/kernel/head_44x.S b/arch/powerpc/kernel/head_44x.S
new file mode 100644
index 000000000000..8b49679fad54
--- /dev/null
+++ b/arch/powerpc/kernel/head_44x.S
@@ -0,0 +1,782 @@
1/*
2 * arch/ppc/kernel/head_44x.S
3 *
4 * Kernel execution entry point code.
5 *
6 * Copyright (c) 1995-1996 Gary Thomas <gdt@linuxppc.org>
7 * Initial PowerPC version.
8 * Copyright (c) 1996 Cort Dougan <cort@cs.nmt.edu>
9 * Rewritten for PReP
10 * Copyright (c) 1996 Paul Mackerras <paulus@cs.anu.edu.au>
11 * Low-level exception handers, MMU support, and rewrite.
12 * Copyright (c) 1997 Dan Malek <dmalek@jlc.net>
13 * PowerPC 8xx modifications.
14 * Copyright (c) 1998-1999 TiVo, Inc.
15 * PowerPC 403GCX modifications.
16 * Copyright (c) 1999 Grant Erickson <grant@lcse.umn.edu>
17 * PowerPC 403GCX/405GP modifications.
18 * Copyright 2000 MontaVista Software Inc.
19 * PPC405 modifications
20 * PowerPC 403GCX/405GP modifications.
21 * Author: MontaVista Software, Inc.
22 * frank_rowand@mvista.com or source@mvista.com
23 * debbie_chu@mvista.com
24 * Copyright 2002-2005 MontaVista Software, Inc.
25 * PowerPC 44x support, Matt Porter <mporter@kernel.crashing.org>
26 *
27 * This program is free software; you can redistribute it and/or modify it
28 * under the terms of the GNU General Public License as published by the
29 * Free Software Foundation; either version 2 of the License, or (at your
30 * option) any later version.
31 */
32
33#include <linux/config.h>
34#include <asm/processor.h>
35#include <asm/page.h>
36#include <asm/mmu.h>
37#include <asm/pgtable.h>
38#include <asm/ibm4xx.h>
39#include <asm/ibm44x.h>
40#include <asm/cputable.h>
41#include <asm/thread_info.h>
42#include <asm/ppc_asm.h>
43#include <asm/asm-offsets.h>
44#include "head_booke.h"
45
46
47/* As with the other PowerPC ports, it is expected that when code
48 * execution begins here, the following registers contain valid, yet
49 * optional, information:
50 *
51 * r3 - Board info structure pointer (DRAM, frequency, MAC address, etc.)
52 * r4 - Starting address of the init RAM disk
53 * r5 - Ending address of the init RAM disk
54 * r6 - Start of kernel command line string (e.g. "mem=128")
55 * r7 - End of kernel command line string
56 *
57 */
58 .text
59_GLOBAL(_stext)
60_GLOBAL(_start)
61 /*
62 * Reserve a word at a fixed location to store the address
63 * of abatron_pteptrs
64 */
65 nop
66/*
67 * Save parameters we are passed
68 */
69 mr r31,r3
70 mr r30,r4
71 mr r29,r5
72 mr r28,r6
73 mr r27,r7
74 li r24,0 /* CPU number */
75
76/*
77 * Set up the initial MMU state
78 *
79 * We are still executing code at the virtual address
80 * mappings set by the firmware for the base of RAM.
81 *
82 * We first invalidate all TLB entries but the one
83 * we are running from. We then load the KERNELBASE
84 * mappings so we can begin to use kernel addresses
85 * natively and so the interrupt vector locations are
86 * permanently pinned (necessary since Book E
87 * implementations always have translation enabled).
88 *
89 * TODO: Use the known TLB entry we are running from to
90 * determine which physical region we are located
91 * in. This can be used to determine where in RAM
92 * (on a shared CPU system) or PCI memory space
93 * (on a DRAMless system) we are located.
94 * For now, we assume a perfect world which means
95 * we are located at the base of DRAM (physical 0).
96 */
97
98/*
99 * Search TLB for entry that we are currently using.
100 * Invalidate all entries but the one we are using.
101 */
102 /* Load our current PID->MMUCR TID and MSR IS->MMUCR STS */
103 mfspr r3,SPRN_PID /* Get PID */
104 mfmsr r4 /* Get MSR */
105 andi. r4,r4,MSR_IS@l /* TS=1? */
106 beq wmmucr /* If not, leave STS=0 */
107 oris r3,r3,PPC44x_MMUCR_STS@h /* Set STS=1 */
108wmmucr: mtspr SPRN_MMUCR,r3 /* Put MMUCR */
109 sync
110
111 bl invstr /* Find our address */
112invstr: mflr r5 /* Make it accessible */
113 tlbsx r23,0,r5 /* Find entry we are in */
114 li r4,0 /* Start at TLB entry 0 */
115 li r3,0 /* Set PAGEID inval value */
1161: cmpw r23,r4 /* Is this our entry? */
117 beq skpinv /* If so, skip the inval */
118 tlbwe r3,r4,PPC44x_TLB_PAGEID /* If not, inval the entry */
119skpinv: addi r4,r4,1 /* Increment */
120 cmpwi r4,64 /* Are we done? */
121 bne 1b /* If not, repeat */
122 isync /* If so, context change */
123
124/*
125 * Configure and load pinned entry into TLB slot 63.
126 */
127
128 lis r3,KERNELBASE@h /* Load the kernel virtual address */
129 ori r3,r3,KERNELBASE@l
130
131 /* Kernel is at the base of RAM */
132 li r4, 0 /* Load the kernel physical address */
133
134 /* Load the kernel PID = 0 */
135 li r0,0
136 mtspr SPRN_PID,r0
137 sync
138
139 /* Initialize MMUCR */
140 li r5,0
141 mtspr SPRN_MMUCR,r5
142 sync
143
144 /* pageid fields */
145 clrrwi r3,r3,10 /* Mask off the effective page number */
146 ori r3,r3,PPC44x_TLB_VALID | PPC44x_TLB_256M
147
148 /* xlat fields */
149 clrrwi r4,r4,10 /* Mask off the real page number */
150 /* ERPN is 0 for first 4GB page */
151
152 /* attrib fields */
153 /* Added guarded bit to protect against speculative loads/stores */
154 li r5,0
155 ori r5,r5,(PPC44x_TLB_SW | PPC44x_TLB_SR | PPC44x_TLB_SX | PPC44x_TLB_G)
156
157 li r0,63 /* TLB slot 63 */
158
159 tlbwe r3,r0,PPC44x_TLB_PAGEID /* Load the pageid fields */
160 tlbwe r4,r0,PPC44x_TLB_XLAT /* Load the translation fields */
161 tlbwe r5,r0,PPC44x_TLB_ATTRIB /* Load the attrib/access fields */
162
163 /* Force context change */
164 mfmsr r0
165 mtspr SPRN_SRR1, r0
166 lis r0,3f@h
167 ori r0,r0,3f@l
168 mtspr SPRN_SRR0,r0
169 sync
170 rfi
171
172 /* If necessary, invalidate original entry we used */
1733: cmpwi r23,63
174 beq 4f
175 li r6,0
176 tlbwe r6,r23,PPC44x_TLB_PAGEID
177 isync
178
1794:
180#ifdef CONFIG_SERIAL_TEXT_DEBUG
181 /*
182 * Add temporary UART mapping for early debug.
183 * We can map UART registers wherever we want as long as they don't
184 * interfere with other system mappings (e.g. with pinned entries).
185 * For an example of how we handle this - see ocotea.h. --ebs
186 */
187 /* pageid fields */
188 lis r3,UART0_IO_BASE@h
189 ori r3,r3,PPC44x_TLB_VALID | PPC44x_TLB_4K
190
191 /* xlat fields */
192 lis r4,UART0_PHYS_IO_BASE@h /* RPN depends on SoC */
193#ifndef CONFIG_440EP
194 ori r4,r4,0x0001 /* ERPN is 1 for second 4GB page */
195#endif
196
197 /* attrib fields */
198 li r5,0
199 ori r5,r5,(PPC44x_TLB_SW | PPC44x_TLB_SR | PPC44x_TLB_I | PPC44x_TLB_G)
200
201 li r0,0 /* TLB slot 0 */
202
203 tlbwe r3,r0,PPC44x_TLB_PAGEID /* Load the pageid fields */
204 tlbwe r4,r0,PPC44x_TLB_XLAT /* Load the translation fields */
205 tlbwe r5,r0,PPC44x_TLB_ATTRIB /* Load the attrib/access fields */
206
207 /* Force context change */
208 isync
209#endif /* CONFIG_SERIAL_TEXT_DEBUG */
210
211 /* Establish the interrupt vector offsets */
212 SET_IVOR(0, CriticalInput);
213 SET_IVOR(1, MachineCheck);
214 SET_IVOR(2, DataStorage);
215 SET_IVOR(3, InstructionStorage);
216 SET_IVOR(4, ExternalInput);
217 SET_IVOR(5, Alignment);
218 SET_IVOR(6, Program);
219 SET_IVOR(7, FloatingPointUnavailable);
220 SET_IVOR(8, SystemCall);
221 SET_IVOR(9, AuxillaryProcessorUnavailable);
222 SET_IVOR(10, Decrementer);
223 SET_IVOR(11, FixedIntervalTimer);
224 SET_IVOR(12, WatchdogTimer);
225 SET_IVOR(13, DataTLBError);
226 SET_IVOR(14, InstructionTLBError);
227 SET_IVOR(15, Debug);
228
229 /* Establish the interrupt vector base */
230 lis r4,interrupt_base@h /* IVPR only uses the high 16-bits */
231 mtspr SPRN_IVPR,r4
232
233#ifdef CONFIG_440EP
234 /* Clear DAPUIB flag in CCR0 (enable APU between CPU and FPU) */
235 mfspr r2,SPRN_CCR0
236 lis r3,0xffef
237 ori r3,r3,0xffff
238 and r2,r2,r3
239 mtspr SPRN_CCR0,r2
240 isync
241#endif
242
243 /*
244 * This is where the main kernel code starts.
245 */
246
247 /* ptr to current */
248 lis r2,init_task@h
249 ori r2,r2,init_task@l
250
251 /* ptr to current thread */
252 addi r4,r2,THREAD /* init task's THREAD */
253 mtspr SPRN_SPRG3,r4
254
255 /* stack */
256 lis r1,init_thread_union@h
257 ori r1,r1,init_thread_union@l
258 li r0,0
259 stwu r0,THREAD_SIZE-STACK_FRAME_OVERHEAD(r1)
260
261 bl early_init
262
263/*
264 * Decide what sort of machine this is and initialize the MMU.
265 */
266 mr r3,r31
267 mr r4,r30
268 mr r5,r29
269 mr r6,r28
270 mr r7,r27
271 bl machine_init
272 bl MMU_init
273
274 /* Setup PTE pointers for the Abatron bdiGDB */
275 lis r6, swapper_pg_dir@h
276 ori r6, r6, swapper_pg_dir@l
277 lis r5, abatron_pteptrs@h
278 ori r5, r5, abatron_pteptrs@l
279 lis r4, KERNELBASE@h
280 ori r4, r4, KERNELBASE@l
281 stw r5, 0(r4) /* Save abatron_pteptrs at a fixed location */
282 stw r6, 0(r5)
283
284 /* Let's move on */
285 lis r4,start_kernel@h
286 ori r4,r4,start_kernel@l
287 lis r3,MSR_KERNEL@h
288 ori r3,r3,MSR_KERNEL@l
289 mtspr SPRN_SRR0,r4
290 mtspr SPRN_SRR1,r3
291 rfi /* change context and jump to start_kernel */
292
293/*
294 * Interrupt vector entry code
295 *
296 * The Book E MMUs are always on so we don't need to handle
297 * interrupts in real mode as with previous PPC processors. In
298 * this case we handle interrupts in the kernel virtual address
299 * space.
300 *
301 * Interrupt vectors are dynamically placed relative to the
302 * interrupt prefix as determined by the address of interrupt_base.
303 * The interrupt vectors offsets are programmed using the labels
304 * for each interrupt vector entry.
305 *
306 * Interrupt vectors must be aligned on a 16 byte boundary.
307 * We align on a 32 byte cache line boundary for good measure.
308 */
309
310interrupt_base:
311 /* Critical Input Interrupt */
312 CRITICAL_EXCEPTION(0x0100, CriticalInput, unknown_exception)
313
314 /* Machine Check Interrupt */
315#ifdef CONFIG_440A
316 MCHECK_EXCEPTION(0x0200, MachineCheck, machine_check_exception)
317#else
318 CRITICAL_EXCEPTION(0x0200, MachineCheck, machine_check_exception)
319#endif
320
321 /* Data Storage Interrupt */
322 START_EXCEPTION(DataStorage)
323 mtspr SPRN_SPRG0, r10 /* Save some working registers */
324 mtspr SPRN_SPRG1, r11
325 mtspr SPRN_SPRG4W, r12
326 mtspr SPRN_SPRG5W, r13
327 mfcr r11
328 mtspr SPRN_SPRG7W, r11
329
330 /*
331 * Check if it was a store fault, if not then bail
332 * because a user tried to access a kernel or
333 * read-protected page. Otherwise, get the
334 * offending address and handle it.
335 */
336 mfspr r10, SPRN_ESR
337 andis. r10, r10, ESR_ST@h
338 beq 2f
339
340 mfspr r10, SPRN_DEAR /* Get faulting address */
341
342 /* If we are faulting a kernel address, we have to use the
343 * kernel page tables.
344 */
345 lis r11, TASK_SIZE@h
346 cmplw r10, r11
347 blt+ 3f
348 lis r11, swapper_pg_dir@h
349 ori r11, r11, swapper_pg_dir@l
350
351 mfspr r12,SPRN_MMUCR
352 rlwinm r12,r12,0,0,23 /* Clear TID */
353
354 b 4f
355
356 /* Get the PGD for the current thread */
3573:
358 mfspr r11,SPRN_SPRG3
359 lwz r11,PGDIR(r11)
360
361 /* Load PID into MMUCR TID */
362 mfspr r12,SPRN_MMUCR /* Get MMUCR */
363 mfspr r13,SPRN_PID /* Get PID */
364 rlwimi r12,r13,0,24,31 /* Set TID */
365
3664:
367 mtspr SPRN_MMUCR,r12
368
369 rlwinm r12, r10, 13, 19, 29 /* Compute pgdir/pmd offset */
370 lwzx r11, r12, r11 /* Get pgd/pmd entry */
371 rlwinm. r12, r11, 0, 0, 20 /* Extract pt base address */
372 beq 2f /* Bail if no table */
373
374 rlwimi r12, r10, 23, 20, 28 /* Compute pte address */
375 lwz r11, 4(r12) /* Get pte entry */
376
377 andi. r13, r11, _PAGE_RW /* Is it writeable? */
378 beq 2f /* Bail if not */
379
380 /* Update 'changed'.
381 */
382 ori r11, r11, _PAGE_DIRTY|_PAGE_ACCESSED|_PAGE_HWWRITE
383 stw r11, 4(r12) /* Update Linux page table */
384
385 li r13, PPC44x_TLB_SR@l /* Set SR */
386 rlwimi r13, r11, 29, 29, 29 /* SX = _PAGE_HWEXEC */
387 rlwimi r13, r11, 0, 30, 30 /* SW = _PAGE_RW */
388 rlwimi r13, r11, 29, 28, 28 /* UR = _PAGE_USER */
389 rlwimi r12, r11, 31, 26, 26 /* (_PAGE_USER>>1)->r12 */
390 rlwimi r12, r11, 29, 30, 30 /* (_PAGE_USER>>3)->r12 */
391 and r12, r12, r11 /* HWEXEC/RW & USER */
392 rlwimi r13, r12, 0, 26, 26 /* UX = HWEXEC & USER */
393 rlwimi r13, r12, 3, 27, 27 /* UW = RW & USER */
394
395 rlwimi r11,r13,0,26,31 /* Insert static perms */
396
397 rlwinm r11,r11,0,20,15 /* Clear U0-U3 */
398
399 /* find the TLB index that caused the fault. It has to be here. */
400 tlbsx r10, 0, r10
401
402 tlbwe r11, r10, PPC44x_TLB_ATTRIB /* Write ATTRIB */
403
404 /* Done...restore registers and get out of here.
405 */
406 mfspr r11, SPRN_SPRG7R
407 mtcr r11
408 mfspr r13, SPRN_SPRG5R
409 mfspr r12, SPRN_SPRG4R
410
411 mfspr r11, SPRN_SPRG1
412 mfspr r10, SPRN_SPRG0
413 rfi /* Force context change */
414
4152:
416 /*
417 * The bailout. Restore registers to pre-exception conditions
418 * and call the heavyweights to help us out.
419 */
420 mfspr r11, SPRN_SPRG7R
421 mtcr r11
422 mfspr r13, SPRN_SPRG5R
423 mfspr r12, SPRN_SPRG4R
424
425 mfspr r11, SPRN_SPRG1
426 mfspr r10, SPRN_SPRG0
427 b data_access
428
429 /* Instruction Storage Interrupt */
430 INSTRUCTION_STORAGE_EXCEPTION
431
432 /* External Input Interrupt */
433 EXCEPTION(0x0500, ExternalInput, do_IRQ, EXC_XFER_LITE)
434
435 /* Alignment Interrupt */
436 ALIGNMENT_EXCEPTION
437
438 /* Program Interrupt */
439 PROGRAM_EXCEPTION
440
441 /* Floating Point Unavailable Interrupt */
442#ifdef CONFIG_PPC_FPU
443 FP_UNAVAILABLE_EXCEPTION
444#else
445 EXCEPTION(0x2010, FloatingPointUnavailable, unknown_exception, EXC_XFER_EE)
446#endif
447
448 /* System Call Interrupt */
449 START_EXCEPTION(SystemCall)
450 NORMAL_EXCEPTION_PROLOG
451 EXC_XFER_EE_LITE(0x0c00, DoSyscall)
452
453 /* Auxillary Processor Unavailable Interrupt */
454 EXCEPTION(0x2020, AuxillaryProcessorUnavailable, unknown_exception, EXC_XFER_EE)
455
456 /* Decrementer Interrupt */
457 DECREMENTER_EXCEPTION
458
459 /* Fixed Internal Timer Interrupt */
460 /* TODO: Add FIT support */
461 EXCEPTION(0x1010, FixedIntervalTimer, unknown_exception, EXC_XFER_EE)
462
463 /* Watchdog Timer Interrupt */
464 /* TODO: Add watchdog support */
465#ifdef CONFIG_BOOKE_WDT
466 CRITICAL_EXCEPTION(0x1020, WatchdogTimer, WatchdogException)
467#else
468 CRITICAL_EXCEPTION(0x1020, WatchdogTimer, unknown_exception)
469#endif
470
471 /* Data TLB Error Interrupt */
472 START_EXCEPTION(DataTLBError)
473 mtspr SPRN_SPRG0, r10 /* Save some working registers */
474 mtspr SPRN_SPRG1, r11
475 mtspr SPRN_SPRG4W, r12
476 mtspr SPRN_SPRG5W, r13
477 mfcr r11
478 mtspr SPRN_SPRG7W, r11
479 mfspr r10, SPRN_DEAR /* Get faulting address */
480
481 /* If we are faulting a kernel address, we have to use the
482 * kernel page tables.
483 */
484 lis r11, TASK_SIZE@h
485 cmplw r10, r11
486 blt+ 3f
487 lis r11, swapper_pg_dir@h
488 ori r11, r11, swapper_pg_dir@l
489
490 mfspr r12,SPRN_MMUCR
491 rlwinm r12,r12,0,0,23 /* Clear TID */
492
493 b 4f
494
495 /* Get the PGD for the current thread */
4963:
497 mfspr r11,SPRN_SPRG3
498 lwz r11,PGDIR(r11)
499
500 /* Load PID into MMUCR TID */
501 mfspr r12,SPRN_MMUCR
502 mfspr r13,SPRN_PID /* Get PID */
503 rlwimi r12,r13,0,24,31 /* Set TID */
504
5054:
506 mtspr SPRN_MMUCR,r12
507
508 rlwinm r12, r10, 13, 19, 29 /* Compute pgdir/pmd offset */
509 lwzx r11, r12, r11 /* Get pgd/pmd entry */
510 rlwinm. r12, r11, 0, 0, 20 /* Extract pt base address */
511 beq 2f /* Bail if no table */
512
513 rlwimi r12, r10, 23, 20, 28 /* Compute pte address */
514 lwz r11, 4(r12) /* Get pte entry */
515 andi. r13, r11, _PAGE_PRESENT /* Is the page present? */
516 beq 2f /* Bail if not present */
517
518 ori r11, r11, _PAGE_ACCESSED
519 stw r11, 4(r12)
520
521 /* Jump to common tlb load */
522 b finish_tlb_load
523
5242:
525 /* The bailout. Restore registers to pre-exception conditions
526 * and call the heavyweights to help us out.
527 */
528 mfspr r11, SPRN_SPRG7R
529 mtcr r11
530 mfspr r13, SPRN_SPRG5R
531 mfspr r12, SPRN_SPRG4R
532 mfspr r11, SPRN_SPRG1
533 mfspr r10, SPRN_SPRG0
534 b data_access
535
536 /* Instruction TLB Error Interrupt */
537 /*
538 * Nearly the same as above, except we get our
539 * information from different registers and bailout
540 * to a different point.
541 */
542 START_EXCEPTION(InstructionTLBError)
543 mtspr SPRN_SPRG0, r10 /* Save some working registers */
544 mtspr SPRN_SPRG1, r11
545 mtspr SPRN_SPRG4W, r12
546 mtspr SPRN_SPRG5W, r13
547 mfcr r11
548 mtspr SPRN_SPRG7W, r11
549 mfspr r10, SPRN_SRR0 /* Get faulting address */
550
551 /* If we are faulting a kernel address, we have to use the
552 * kernel page tables.
553 */
554 lis r11, TASK_SIZE@h
555 cmplw r10, r11
556 blt+ 3f
557 lis r11, swapper_pg_dir@h
558 ori r11, r11, swapper_pg_dir@l
559
560 mfspr r12,SPRN_MMUCR
561 rlwinm r12,r12,0,0,23 /* Clear TID */
562
563 b 4f
564
565 /* Get the PGD for the current thread */
5663:
567 mfspr r11,SPRN_SPRG3
568 lwz r11,PGDIR(r11)
569
570 /* Load PID into MMUCR TID */
571 mfspr r12,SPRN_MMUCR
572 mfspr r13,SPRN_PID /* Get PID */
573 rlwimi r12,r13,0,24,31 /* Set TID */
574
5754:
576 mtspr SPRN_MMUCR,r12
577
578 rlwinm r12, r10, 13, 19, 29 /* Compute pgdir/pmd offset */
579 lwzx r11, r12, r11 /* Get pgd/pmd entry */
580 rlwinm. r12, r11, 0, 0, 20 /* Extract pt base address */
581 beq 2f /* Bail if no table */
582
583 rlwimi r12, r10, 23, 20, 28 /* Compute pte address */
584 lwz r11, 4(r12) /* Get pte entry */
585 andi. r13, r11, _PAGE_PRESENT /* Is the page present? */
586 beq 2f /* Bail if not present */
587
588 ori r11, r11, _PAGE_ACCESSED
589 stw r11, 4(r12)
590
591 /* Jump to common TLB load point */
592 b finish_tlb_load
593
5942:
595 /* The bailout. Restore registers to pre-exception conditions
596 * and call the heavyweights to help us out.
597 */
598 mfspr r11, SPRN_SPRG7R
599 mtcr r11
600 mfspr r13, SPRN_SPRG5R
601 mfspr r12, SPRN_SPRG4R
602 mfspr r11, SPRN_SPRG1
603 mfspr r10, SPRN_SPRG0
604 b InstructionStorage
605
606 /* Debug Interrupt */
607 DEBUG_EXCEPTION
608
609/*
610 * Local functions
611 */
612 /*
613 * Data TLB exceptions will bail out to this point
614 * if they can't resolve the lightweight TLB fault.
615 */
616data_access:
617 NORMAL_EXCEPTION_PROLOG
618 mfspr r5,SPRN_ESR /* Grab the ESR, save it, pass arg3 */
619 stw r5,_ESR(r11)
620 mfspr r4,SPRN_DEAR /* Grab the DEAR, save it, pass arg2 */
621 EXC_XFER_EE_LITE(0x0300, handle_page_fault)
622
623/*
624
625 * Both the instruction and data TLB miss get to this
626 * point to load the TLB.
627 * r10 - EA of fault
628 * r11 - available to use
629 * r12 - Pointer to the 64-bit PTE
630 * r13 - available to use
631 * MMUCR - loaded with proper value when we get here
632 * Upon exit, we reload everything and RFI.
633 */
634finish_tlb_load:
635 /*
636 * We set execute, because we don't have the granularity to
637 * properly set this at the page level (Linux problem).
638 * If shared is set, we cause a zero PID->TID load.
639 * Many of these bits are software only. Bits we don't set
640 * here we (properly should) assume have the appropriate value.
641 */
642
643 /* Load the next available TLB index */
644 lis r13, tlb_44x_index@ha
645 lwz r13, tlb_44x_index@l(r13)
646 /* Load the TLB high watermark */
647 lis r11, tlb_44x_hwater@ha
648 lwz r11, tlb_44x_hwater@l(r11)
649
650 /* Increment, rollover, and store TLB index */
651 addi r13, r13, 1
652 cmpw 0, r13, r11 /* reserve entries */
653 ble 7f
654 li r13, 0
6557:
656 /* Store the next available TLB index */
657 lis r11, tlb_44x_index@ha
658 stw r13, tlb_44x_index@l(r11)
659
660 lwz r11, 0(r12) /* Get MS word of PTE */
661 lwz r12, 4(r12) /* Get LS word of PTE */
662 rlwimi r11, r12, 0, 0 , 19 /* Insert RPN */
663 tlbwe r11, r13, PPC44x_TLB_XLAT /* Write XLAT */
664
665 /*
666 * Create PAGEID. This is the faulting address,
667 * page size, and valid flag.
668 */
669 li r11, PPC44x_TLB_VALID | PPC44x_TLB_4K
670 rlwimi r10, r11, 0, 20, 31 /* Insert valid and page size */
671 tlbwe r10, r13, PPC44x_TLB_PAGEID /* Write PAGEID */
672
673 li r10, PPC44x_TLB_SR@l /* Set SR */
674 rlwimi r10, r12, 0, 30, 30 /* Set SW = _PAGE_RW */
675 rlwimi r10, r12, 29, 29, 29 /* SX = _PAGE_HWEXEC */
676 rlwimi r10, r12, 29, 28, 28 /* UR = _PAGE_USER */
677 rlwimi r11, r12, 31, 26, 26 /* (_PAGE_USER>>1)->r12 */
678 and r11, r12, r11 /* HWEXEC & USER */
679 rlwimi r10, r11, 0, 26, 26 /* UX = HWEXEC & USER */
680
681 rlwimi r12, r10, 0, 26, 31 /* Insert static perms */
682 rlwinm r12, r12, 0, 20, 15 /* Clear U0-U3 */
683 tlbwe r12, r13, PPC44x_TLB_ATTRIB /* Write ATTRIB */
684
685 /* Done...restore registers and get out of here.
686 */
687 mfspr r11, SPRN_SPRG7R
688 mtcr r11
689 mfspr r13, SPRN_SPRG5R
690 mfspr r12, SPRN_SPRG4R
691 mfspr r11, SPRN_SPRG1
692 mfspr r10, SPRN_SPRG0
693 rfi /* Force context change */
694
695/*
696 * Global functions
697 */
698
699/*
700 * extern void giveup_altivec(struct task_struct *prev)
701 *
702 * The 44x core does not have an AltiVec unit.
703 */
704_GLOBAL(giveup_altivec)
705 blr
706
707/*
708 * extern void giveup_fpu(struct task_struct *prev)
709 *
710 * The 44x core does not have an FPU.
711 */
712#ifndef CONFIG_PPC_FPU
713_GLOBAL(giveup_fpu)
714 blr
715#endif
716
717/*
718 * extern void abort(void)
719 *
720 * At present, this routine just applies a system reset.
721 */
722_GLOBAL(abort)
723 mfspr r13,SPRN_DBCR0
724 oris r13,r13,DBCR0_RST_SYSTEM@h
725 mtspr SPRN_DBCR0,r13
726
727_GLOBAL(set_context)
728
729#ifdef CONFIG_BDI_SWITCH
730 /* Context switch the PTE pointer for the Abatron BDI2000.
731 * The PGDIR is the second parameter.
732 */
733 lis r5, abatron_pteptrs@h
734 ori r5, r5, abatron_pteptrs@l
735 stw r4, 0x4(r5)
736#endif
737 mtspr SPRN_PID,r3
738 isync /* Force context change */
739 blr
740
741/*
742 * We put a few things here that have to be page-aligned. This stuff
743 * goes at the beginning of the data segment, which is page-aligned.
744 */
745 .data
746 .align 12
747 .globl sdata
748sdata:
749 .globl empty_zero_page
750empty_zero_page:
751 .space 4096
752
753/*
754 * To support >32-bit physical addresses, we use an 8KB pgdir.
755 */
756 .globl swapper_pg_dir
757swapper_pg_dir:
758 .space 8192
759
760/* Reserved 4k for the critical exception stack & 4k for the machine
761 * check stack per CPU for kernel mode exceptions */
762 .section .bss
763 .align 12
764exception_stack_bottom:
765 .space BOOKE_EXCEPTION_STACK_SIZE
766 .globl exception_stack_top
767exception_stack_top:
768
769/*
770 * This space gets a copy of optional info passed to us by the bootstrap
771 * which is used to pass parameters into the kernel like root=/dev/sda1, etc.
772 */
773 .globl cmd_line
774cmd_line:
775 .space 512
776
777/*
778 * Room for two PTE pointers, usually the kernel and current user pointers
779 * to their respective root page table.
780 */
781abatron_pteptrs:
782 .space 8
diff --git a/arch/powerpc/kernel/head_4xx.S b/arch/powerpc/kernel/head_4xx.S
new file mode 100644
index 000000000000..2590e97f5539
--- /dev/null
+++ b/arch/powerpc/kernel/head_4xx.S
@@ -0,0 +1,1022 @@
1/*
2 * Copyright (c) 1995-1996 Gary Thomas <gdt@linuxppc.org>
3 * Initial PowerPC version.
4 * Copyright (c) 1996 Cort Dougan <cort@cs.nmt.edu>
5 * Rewritten for PReP
6 * Copyright (c) 1996 Paul Mackerras <paulus@cs.anu.edu.au>
7 * Low-level exception handers, MMU support, and rewrite.
8 * Copyright (c) 1997 Dan Malek <dmalek@jlc.net>
9 * PowerPC 8xx modifications.
10 * Copyright (c) 1998-1999 TiVo, Inc.
11 * PowerPC 403GCX modifications.
12 * Copyright (c) 1999 Grant Erickson <grant@lcse.umn.edu>
13 * PowerPC 403GCX/405GP modifications.
14 * Copyright 2000 MontaVista Software Inc.
15 * PPC405 modifications
16 * PowerPC 403GCX/405GP modifications.
17 * Author: MontaVista Software, Inc.
18 * frank_rowand@mvista.com or source@mvista.com
19 * debbie_chu@mvista.com
20 *
21 *
22 * Module name: head_4xx.S
23 *
24 * Description:
25 * Kernel execution entry point code.
26 *
27 * This program is free software; you can redistribute it and/or
28 * modify it under the terms of the GNU General Public License
29 * as published by the Free Software Foundation; either version
30 * 2 of the License, or (at your option) any later version.
31 *
32 */
33
34#include <linux/config.h>
35#include <asm/processor.h>
36#include <asm/page.h>
37#include <asm/mmu.h>
38#include <asm/pgtable.h>
39#include <asm/ibm4xx.h>
40#include <asm/cputable.h>
41#include <asm/thread_info.h>
42#include <asm/ppc_asm.h>
43#include <asm/asm-offsets.h>
44
45/* As with the other PowerPC ports, it is expected that when code
46 * execution begins here, the following registers contain valid, yet
47 * optional, information:
48 *
49 * r3 - Board info structure pointer (DRAM, frequency, MAC address, etc.)
50 * r4 - Starting address of the init RAM disk
51 * r5 - Ending address of the init RAM disk
52 * r6 - Start of kernel command line string (e.g. "mem=96m")
53 * r7 - End of kernel command line string
54 *
55 * This is all going to change RSN when we add bi_recs....... -- Dan
56 */
57 .text
58_GLOBAL(_stext)
59_GLOBAL(_start)
60
61 /* Save parameters we are passed.
62 */
63 mr r31,r3
64 mr r30,r4
65 mr r29,r5
66 mr r28,r6
67 mr r27,r7
68
69 /* We have to turn on the MMU right away so we get cache modes
70 * set correctly.
71 */
72 bl initial_mmu
73
74/* We now have the lower 16 Meg mapped into TLB entries, and the caches
75 * ready to work.
76 */
77turn_on_mmu:
78 lis r0,MSR_KERNEL@h
79 ori r0,r0,MSR_KERNEL@l
80 mtspr SPRN_SRR1,r0
81 lis r0,start_here@h
82 ori r0,r0,start_here@l
83 mtspr SPRN_SRR0,r0
84 SYNC
85 rfi /* enables MMU */
86 b . /* prevent prefetch past rfi */
87
88/*
89 * This area is used for temporarily saving registers during the
90 * critical exception prolog.
91 */
92 . = 0xc0
93crit_save:
94_GLOBAL(crit_r10)
95 .space 4
96_GLOBAL(crit_r11)
97 .space 4
98
99/*
100 * Exception vector entry code. This code runs with address translation
101 * turned off (i.e. using physical addresses). We assume SPRG3 has the
102 * physical address of the current task thread_struct.
103 * Note that we have to have decremented r1 before we write to any fields
104 * of the exception frame, since a critical interrupt could occur at any
105 * time, and it will write to the area immediately below the current r1.
106 */
107#define NORMAL_EXCEPTION_PROLOG \
108 mtspr SPRN_SPRG0,r10; /* save two registers to work with */\
109 mtspr SPRN_SPRG1,r11; \
110 mtspr SPRN_SPRG2,r1; \
111 mfcr r10; /* save CR in r10 for now */\
112 mfspr r11,SPRN_SRR1; /* check whether user or kernel */\
113 andi. r11,r11,MSR_PR; \
114 beq 1f; \
115 mfspr r1,SPRN_SPRG3; /* if from user, start at top of */\
116 lwz r1,THREAD_INFO-THREAD(r1); /* this thread's kernel stack */\
117 addi r1,r1,THREAD_SIZE; \
1181: subi r1,r1,INT_FRAME_SIZE; /* Allocate an exception frame */\
119 tophys(r11,r1); \
120 stw r10,_CCR(r11); /* save various registers */\
121 stw r12,GPR12(r11); \
122 stw r9,GPR9(r11); \
123 mfspr r10,SPRN_SPRG0; \
124 stw r10,GPR10(r11); \
125 mfspr r12,SPRN_SPRG1; \
126 stw r12,GPR11(r11); \
127 mflr r10; \
128 stw r10,_LINK(r11); \
129 mfspr r10,SPRN_SPRG2; \
130 mfspr r12,SPRN_SRR0; \
131 stw r10,GPR1(r11); \
132 mfspr r9,SPRN_SRR1; \
133 stw r10,0(r11); \
134 rlwinm r9,r9,0,14,12; /* clear MSR_WE (necessary?) */\
135 stw r0,GPR0(r11); \
136 SAVE_4GPRS(3, r11); \
137 SAVE_2GPRS(7, r11)
138
139/*
140 * Exception prolog for critical exceptions. This is a little different
141 * from the normal exception prolog above since a critical exception
142 * can potentially occur at any point during normal exception processing.
143 * Thus we cannot use the same SPRG registers as the normal prolog above.
144 * Instead we use a couple of words of memory at low physical addresses.
145 * This is OK since we don't support SMP on these processors.
146 */
147#define CRITICAL_EXCEPTION_PROLOG \
148 stw r10,crit_r10@l(0); /* save two registers to work with */\
149 stw r11,crit_r11@l(0); \
150 mfcr r10; /* save CR in r10 for now */\
151 mfspr r11,SPRN_SRR3; /* check whether user or kernel */\
152 andi. r11,r11,MSR_PR; \
153 lis r11,critical_stack_top@h; \
154 ori r11,r11,critical_stack_top@l; \
155 beq 1f; \
156 /* COMING FROM USER MODE */ \
157 mfspr r11,SPRN_SPRG3; /* if from user, start at top of */\
158 lwz r11,THREAD_INFO-THREAD(r11); /* this thread's kernel stack */\
159 addi r11,r11,THREAD_SIZE; \
1601: subi r11,r11,INT_FRAME_SIZE; /* Allocate an exception frame */\
161 tophys(r11,r11); \
162 stw r10,_CCR(r11); /* save various registers */\
163 stw r12,GPR12(r11); \
164 stw r9,GPR9(r11); \
165 mflr r10; \
166 stw r10,_LINK(r11); \
167 mfspr r12,SPRN_DEAR; /* save DEAR and ESR in the frame */\
168 stw r12,_DEAR(r11); /* since they may have had stuff */\
169 mfspr r9,SPRN_ESR; /* in them at the point where the */\
170 stw r9,_ESR(r11); /* exception was taken */\
171 mfspr r12,SPRN_SRR2; \
172 stw r1,GPR1(r11); \
173 mfspr r9,SPRN_SRR3; \
174 stw r1,0(r11); \
175 tovirt(r1,r11); \
176 rlwinm r9,r9,0,14,12; /* clear MSR_WE (necessary?) */\
177 stw r0,GPR0(r11); \
178 SAVE_4GPRS(3, r11); \
179 SAVE_2GPRS(7, r11)
180
181 /*
182 * State at this point:
183 * r9 saved in stack frame, now saved SRR3 & ~MSR_WE
184 * r10 saved in crit_r10 and in stack frame, trashed
185 * r11 saved in crit_r11 and in stack frame,
186 * now phys stack/exception frame pointer
187 * r12 saved in stack frame, now saved SRR2
188 * CR saved in stack frame, CR0.EQ = !SRR3.PR
189 * LR, DEAR, ESR in stack frame
190 * r1 saved in stack frame, now virt stack/excframe pointer
191 * r0, r3-r8 saved in stack frame
192 */
193
194/*
195 * Exception vectors.
196 */
197#define START_EXCEPTION(n, label) \
198 . = n; \
199label:
200
201#define EXCEPTION(n, label, hdlr, xfer) \
202 START_EXCEPTION(n, label); \
203 NORMAL_EXCEPTION_PROLOG; \
204 addi r3,r1,STACK_FRAME_OVERHEAD; \
205 xfer(n, hdlr)
206
207#define CRITICAL_EXCEPTION(n, label, hdlr) \
208 START_EXCEPTION(n, label); \
209 CRITICAL_EXCEPTION_PROLOG; \
210 addi r3,r1,STACK_FRAME_OVERHEAD; \
211 EXC_XFER_TEMPLATE(hdlr, n+2, (MSR_KERNEL & ~(MSR_ME|MSR_DE|MSR_CE)), \
212 NOCOPY, crit_transfer_to_handler, \
213 ret_from_crit_exc)
214
215#define EXC_XFER_TEMPLATE(hdlr, trap, msr, copyee, tfer, ret) \
216 li r10,trap; \
217 stw r10,_TRAP(r11); \
218 lis r10,msr@h; \
219 ori r10,r10,msr@l; \
220 copyee(r10, r9); \
221 bl tfer; \
222 .long hdlr; \
223 .long ret
224
225#define COPY_EE(d, s) rlwimi d,s,0,16,16
226#define NOCOPY(d, s)
227
228#define EXC_XFER_STD(n, hdlr) \
229 EXC_XFER_TEMPLATE(hdlr, n, MSR_KERNEL, NOCOPY, transfer_to_handler_full, \
230 ret_from_except_full)
231
232#define EXC_XFER_LITE(n, hdlr) \
233 EXC_XFER_TEMPLATE(hdlr, n+1, MSR_KERNEL, NOCOPY, transfer_to_handler, \
234 ret_from_except)
235
236#define EXC_XFER_EE(n, hdlr) \
237 EXC_XFER_TEMPLATE(hdlr, n, MSR_KERNEL, COPY_EE, transfer_to_handler_full, \
238 ret_from_except_full)
239
240#define EXC_XFER_EE_LITE(n, hdlr) \
241 EXC_XFER_TEMPLATE(hdlr, n+1, MSR_KERNEL, COPY_EE, transfer_to_handler, \
242 ret_from_except)
243
244
245/*
246 * 0x0100 - Critical Interrupt Exception
247 */
248 CRITICAL_EXCEPTION(0x0100, CriticalInterrupt, unknown_exception)
249
250/*
251 * 0x0200 - Machine Check Exception
252 */
253 CRITICAL_EXCEPTION(0x0200, MachineCheck, machine_check_exception)
254
255/*
256 * 0x0300 - Data Storage Exception
257 * This happens for just a few reasons. U0 set (but we don't do that),
258 * or zone protection fault (user violation, write to protected page).
259 * If this is just an update of modified status, we do that quickly
260 * and exit. Otherwise, we call heavywight functions to do the work.
261 */
262 START_EXCEPTION(0x0300, DataStorage)
263 mtspr SPRN_SPRG0, r10 /* Save some working registers */
264 mtspr SPRN_SPRG1, r11
265#ifdef CONFIG_403GCX
266 stw r12, 0(r0)
267 stw r9, 4(r0)
268 mfcr r11
269 mfspr r12, SPRN_PID
270 stw r11, 8(r0)
271 stw r12, 12(r0)
272#else
273 mtspr SPRN_SPRG4, r12
274 mtspr SPRN_SPRG5, r9
275 mfcr r11
276 mfspr r12, SPRN_PID
277 mtspr SPRN_SPRG7, r11
278 mtspr SPRN_SPRG6, r12
279#endif
280
281 /* First, check if it was a zone fault (which means a user
282 * tried to access a kernel or read-protected page - always
283 * a SEGV). All other faults here must be stores, so no
284 * need to check ESR_DST as well. */
285 mfspr r10, SPRN_ESR
286 andis. r10, r10, ESR_DIZ@h
287 bne 2f
288
289 mfspr r10, SPRN_DEAR /* Get faulting address */
290
291 /* If we are faulting a kernel address, we have to use the
292 * kernel page tables.
293 */
294 lis r11, TASK_SIZE@h
295 cmplw r10, r11
296 blt+ 3f
297 lis r11, swapper_pg_dir@h
298 ori r11, r11, swapper_pg_dir@l
299 li r9, 0
300 mtspr SPRN_PID, r9 /* TLB will have 0 TID */
301 b 4f
302
303 /* Get the PGD for the current thread.
304 */
3053:
306 mfspr r11,SPRN_SPRG3
307 lwz r11,PGDIR(r11)
3084:
309 tophys(r11, r11)
310 rlwimi r11, r10, 12, 20, 29 /* Create L1 (pgdir/pmd) address */
311 lwz r11, 0(r11) /* Get L1 entry */
312 rlwinm. r12, r11, 0, 0, 19 /* Extract L2 (pte) base address */
313 beq 2f /* Bail if no table */
314
315 rlwimi r12, r10, 22, 20, 29 /* Compute PTE address */
316 lwz r11, 0(r12) /* Get Linux PTE */
317
318 andi. r9, r11, _PAGE_RW /* Is it writeable? */
319 beq 2f /* Bail if not */
320
321 /* Update 'changed'.
322 */
323 ori r11, r11, _PAGE_DIRTY|_PAGE_ACCESSED|_PAGE_HWWRITE
324 stw r11, 0(r12) /* Update Linux page table */
325
326 /* Most of the Linux PTE is ready to load into the TLB LO.
327 * We set ZSEL, where only the LS-bit determines user access.
328 * We set execute, because we don't have the granularity to
329 * properly set this at the page level (Linux problem).
330 * If shared is set, we cause a zero PID->TID load.
331 * Many of these bits are software only. Bits we don't set
332 * here we (properly should) assume have the appropriate value.
333 */
334 li r12, 0x0ce2
335 andc r11, r11, r12 /* Make sure 20, 21 are zero */
336
337 /* find the TLB index that caused the fault. It has to be here.
338 */
339 tlbsx r9, 0, r10
340
341 tlbwe r11, r9, TLB_DATA /* Load TLB LO */
342
343 /* Done...restore registers and get out of here.
344 */
345#ifdef CONFIG_403GCX
346 lwz r12, 12(r0)
347 lwz r11, 8(r0)
348 mtspr SPRN_PID, r12
349 mtcr r11
350 lwz r9, 4(r0)
351 lwz r12, 0(r0)
352#else
353 mfspr r12, SPRN_SPRG6
354 mfspr r11, SPRN_SPRG7
355 mtspr SPRN_PID, r12
356 mtcr r11
357 mfspr r9, SPRN_SPRG5
358 mfspr r12, SPRN_SPRG4
359#endif
360 mfspr r11, SPRN_SPRG1
361 mfspr r10, SPRN_SPRG0
362 PPC405_ERR77_SYNC
363 rfi /* Should sync shadow TLBs */
364 b . /* prevent prefetch past rfi */
365
3662:
367 /* The bailout. Restore registers to pre-exception conditions
368 * and call the heavyweights to help us out.
369 */
370#ifdef CONFIG_403GCX
371 lwz r12, 12(r0)
372 lwz r11, 8(r0)
373 mtspr SPRN_PID, r12
374 mtcr r11
375 lwz r9, 4(r0)
376 lwz r12, 0(r0)
377#else
378 mfspr r12, SPRN_SPRG6
379 mfspr r11, SPRN_SPRG7
380 mtspr SPRN_PID, r12
381 mtcr r11
382 mfspr r9, SPRN_SPRG5
383 mfspr r12, SPRN_SPRG4
384#endif
385 mfspr r11, SPRN_SPRG1
386 mfspr r10, SPRN_SPRG0
387 b DataAccess
388
389/*
390 * 0x0400 - Instruction Storage Exception
391 * This is caused by a fetch from non-execute or guarded pages.
392 */
393 START_EXCEPTION(0x0400, InstructionAccess)
394 NORMAL_EXCEPTION_PROLOG
395 mr r4,r12 /* Pass SRR0 as arg2 */
396 li r5,0 /* Pass zero as arg3 */
397 EXC_XFER_EE_LITE(0x400, handle_page_fault)
398
399/* 0x0500 - External Interrupt Exception */
400 EXCEPTION(0x0500, HardwareInterrupt, do_IRQ, EXC_XFER_LITE)
401
402/* 0x0600 - Alignment Exception */
403 START_EXCEPTION(0x0600, Alignment)
404 NORMAL_EXCEPTION_PROLOG
405 mfspr r4,SPRN_DEAR /* Grab the DEAR and save it */
406 stw r4,_DEAR(r11)
407 addi r3,r1,STACK_FRAME_OVERHEAD
408 EXC_XFER_EE(0x600, alignment_exception)
409
410/* 0x0700 - Program Exception */
411 START_EXCEPTION(0x0700, ProgramCheck)
412 NORMAL_EXCEPTION_PROLOG
413 mfspr r4,SPRN_ESR /* Grab the ESR and save it */
414 stw r4,_ESR(r11)
415 addi r3,r1,STACK_FRAME_OVERHEAD
416 EXC_XFER_STD(0x700, program_check_exception)
417
418 EXCEPTION(0x0800, Trap_08, unknown_exception, EXC_XFER_EE)
419 EXCEPTION(0x0900, Trap_09, unknown_exception, EXC_XFER_EE)
420 EXCEPTION(0x0A00, Trap_0A, unknown_exception, EXC_XFER_EE)
421 EXCEPTION(0x0B00, Trap_0B, unknown_exception, EXC_XFER_EE)
422
423/* 0x0C00 - System Call Exception */
424 START_EXCEPTION(0x0C00, SystemCall)
425 NORMAL_EXCEPTION_PROLOG
426 EXC_XFER_EE_LITE(0xc00, DoSyscall)
427
428 EXCEPTION(0x0D00, Trap_0D, unknown_exception, EXC_XFER_EE)
429 EXCEPTION(0x0E00, Trap_0E, unknown_exception, EXC_XFER_EE)
430 EXCEPTION(0x0F00, Trap_0F, unknown_exception, EXC_XFER_EE)
431
432/* 0x1000 - Programmable Interval Timer (PIT) Exception */
433 START_EXCEPTION(0x1000, Decrementer)
434 NORMAL_EXCEPTION_PROLOG
435 lis r0,TSR_PIS@h
436 mtspr SPRN_TSR,r0 /* Clear the PIT exception */
437 addi r3,r1,STACK_FRAME_OVERHEAD
438 EXC_XFER_LITE(0x1000, timer_interrupt)
439
440#if 0
441/* NOTE:
442 * FIT and WDT handlers are not implemented yet.
443 */
444
445/* 0x1010 - Fixed Interval Timer (FIT) Exception
446*/
447 STND_EXCEPTION(0x1010, FITException, unknown_exception)
448
449/* 0x1020 - Watchdog Timer (WDT) Exception
450*/
451#ifdef CONFIG_BOOKE_WDT
452 CRITICAL_EXCEPTION(0x1020, WDTException, WatchdogException)
453#else
454 CRITICAL_EXCEPTION(0x1020, WDTException, unknown_exception)
455#endif
456#endif
457
458/* 0x1100 - Data TLB Miss Exception
459 * As the name implies, translation is not in the MMU, so search the
460 * page tables and fix it. The only purpose of this function is to
461 * load TLB entries from the page table if they exist.
462 */
463 START_EXCEPTION(0x1100, DTLBMiss)
464 mtspr SPRN_SPRG0, r10 /* Save some working registers */
465 mtspr SPRN_SPRG1, r11
466#ifdef CONFIG_403GCX
467 stw r12, 0(r0)
468 stw r9, 4(r0)
469 mfcr r11
470 mfspr r12, SPRN_PID
471 stw r11, 8(r0)
472 stw r12, 12(r0)
473#else
474 mtspr SPRN_SPRG4, r12
475 mtspr SPRN_SPRG5, r9
476 mfcr r11
477 mfspr r12, SPRN_PID
478 mtspr SPRN_SPRG7, r11
479 mtspr SPRN_SPRG6, r12
480#endif
481 mfspr r10, SPRN_DEAR /* Get faulting address */
482
483 /* If we are faulting a kernel address, we have to use the
484 * kernel page tables.
485 */
486 lis r11, TASK_SIZE@h
487 cmplw r10, r11
488 blt+ 3f
489 lis r11, swapper_pg_dir@h
490 ori r11, r11, swapper_pg_dir@l
491 li r9, 0
492 mtspr SPRN_PID, r9 /* TLB will have 0 TID */
493 b 4f
494
495 /* Get the PGD for the current thread.
496 */
4973:
498 mfspr r11,SPRN_SPRG3
499 lwz r11,PGDIR(r11)
5004:
501 tophys(r11, r11)
502 rlwimi r11, r10, 12, 20, 29 /* Create L1 (pgdir/pmd) address */
503 lwz r12, 0(r11) /* Get L1 entry */
504 andi. r9, r12, _PMD_PRESENT /* Check if it points to a PTE page */
505 beq 2f /* Bail if no table */
506
507 rlwimi r12, r10, 22, 20, 29 /* Compute PTE address */
508 lwz r11, 0(r12) /* Get Linux PTE */
509 andi. r9, r11, _PAGE_PRESENT
510 beq 5f
511
512 ori r11, r11, _PAGE_ACCESSED
513 stw r11, 0(r12)
514
515 /* Create TLB tag. This is the faulting address plus a static
516 * set of bits. These are size, valid, E, U0.
517 */
518 li r12, 0x00c0
519 rlwimi r10, r12, 0, 20, 31
520
521 b finish_tlb_load
522
5232: /* Check for possible large-page pmd entry */
524 rlwinm. r9, r12, 2, 22, 24
525 beq 5f
526
527 /* Create TLB tag. This is the faulting address, plus a static
528 * set of bits (valid, E, U0) plus the size from the PMD.
529 */
530 ori r9, r9, 0x40
531 rlwimi r10, r9, 0, 20, 31
532 mr r11, r12
533
534 b finish_tlb_load
535
5365:
537 /* The bailout. Restore registers to pre-exception conditions
538 * and call the heavyweights to help us out.
539 */
540#ifdef CONFIG_403GCX
541 lwz r12, 12(r0)
542 lwz r11, 8(r0)
543 mtspr SPRN_PID, r12
544 mtcr r11
545 lwz r9, 4(r0)
546 lwz r12, 0(r0)
547#else
548 mfspr r12, SPRN_SPRG6
549 mfspr r11, SPRN_SPRG7
550 mtspr SPRN_PID, r12
551 mtcr r11
552 mfspr r9, SPRN_SPRG5
553 mfspr r12, SPRN_SPRG4
554#endif
555 mfspr r11, SPRN_SPRG1
556 mfspr r10, SPRN_SPRG0
557 b DataAccess
558
559/* 0x1200 - Instruction TLB Miss Exception
560 * Nearly the same as above, except we get our information from different
561 * registers and bailout to a different point.
562 */
563 START_EXCEPTION(0x1200, ITLBMiss)
564 mtspr SPRN_SPRG0, r10 /* Save some working registers */
565 mtspr SPRN_SPRG1, r11
566#ifdef CONFIG_403GCX
567 stw r12, 0(r0)
568 stw r9, 4(r0)
569 mfcr r11
570 mfspr r12, SPRN_PID
571 stw r11, 8(r0)
572 stw r12, 12(r0)
573#else
574 mtspr SPRN_SPRG4, r12
575 mtspr SPRN_SPRG5, r9
576 mfcr r11
577 mfspr r12, SPRN_PID
578 mtspr SPRN_SPRG7, r11
579 mtspr SPRN_SPRG6, r12
580#endif
581 mfspr r10, SPRN_SRR0 /* Get faulting address */
582
583 /* If we are faulting a kernel address, we have to use the
584 * kernel page tables.
585 */
586 lis r11, TASK_SIZE@h
587 cmplw r10, r11
588 blt+ 3f
589 lis r11, swapper_pg_dir@h
590 ori r11, r11, swapper_pg_dir@l
591 li r9, 0
592 mtspr SPRN_PID, r9 /* TLB will have 0 TID */
593 b 4f
594
595 /* Get the PGD for the current thread.
596 */
5973:
598 mfspr r11,SPRN_SPRG3
599 lwz r11,PGDIR(r11)
6004:
601 tophys(r11, r11)
602 rlwimi r11, r10, 12, 20, 29 /* Create L1 (pgdir/pmd) address */
603 lwz r12, 0(r11) /* Get L1 entry */
604 andi. r9, r12, _PMD_PRESENT /* Check if it points to a PTE page */
605 beq 2f /* Bail if no table */
606
607 rlwimi r12, r10, 22, 20, 29 /* Compute PTE address */
608 lwz r11, 0(r12) /* Get Linux PTE */
609 andi. r9, r11, _PAGE_PRESENT
610 beq 5f
611
612 ori r11, r11, _PAGE_ACCESSED
613 stw r11, 0(r12)
614
615 /* Create TLB tag. This is the faulting address plus a static
616 * set of bits. These are size, valid, E, U0.
617 */
618 li r12, 0x00c0
619 rlwimi r10, r12, 0, 20, 31
620
621 b finish_tlb_load
622
6232: /* Check for possible large-page pmd entry */
624 rlwinm. r9, r12, 2, 22, 24
625 beq 5f
626
627 /* Create TLB tag. This is the faulting address, plus a static
628 * set of bits (valid, E, U0) plus the size from the PMD.
629 */
630 ori r9, r9, 0x40
631 rlwimi r10, r9, 0, 20, 31
632 mr r11, r12
633
634 b finish_tlb_load
635
6365:
637 /* The bailout. Restore registers to pre-exception conditions
638 * and call the heavyweights to help us out.
639 */
640#ifdef CONFIG_403GCX
641 lwz r12, 12(r0)
642 lwz r11, 8(r0)
643 mtspr SPRN_PID, r12
644 mtcr r11
645 lwz r9, 4(r0)
646 lwz r12, 0(r0)
647#else
648 mfspr r12, SPRN_SPRG6
649 mfspr r11, SPRN_SPRG7
650 mtspr SPRN_PID, r12
651 mtcr r11
652 mfspr r9, SPRN_SPRG5
653 mfspr r12, SPRN_SPRG4
654#endif
655 mfspr r11, SPRN_SPRG1
656 mfspr r10, SPRN_SPRG0
657 b InstructionAccess
658
659 EXCEPTION(0x1300, Trap_13, unknown_exception, EXC_XFER_EE)
660 EXCEPTION(0x1400, Trap_14, unknown_exception, EXC_XFER_EE)
661 EXCEPTION(0x1500, Trap_15, unknown_exception, EXC_XFER_EE)
662 EXCEPTION(0x1600, Trap_16, unknown_exception, EXC_XFER_EE)
663#ifdef CONFIG_IBM405_ERR51
664 /* 405GP errata 51 */
665 START_EXCEPTION(0x1700, Trap_17)
666 b DTLBMiss
667#else
668 EXCEPTION(0x1700, Trap_17, unknown_exception, EXC_XFER_EE)
669#endif
670 EXCEPTION(0x1800, Trap_18, unknown_exception, EXC_XFER_EE)
671 EXCEPTION(0x1900, Trap_19, unknown_exception, EXC_XFER_EE)
672 EXCEPTION(0x1A00, Trap_1A, unknown_exception, EXC_XFER_EE)
673 EXCEPTION(0x1B00, Trap_1B, unknown_exception, EXC_XFER_EE)
674 EXCEPTION(0x1C00, Trap_1C, unknown_exception, EXC_XFER_EE)
675 EXCEPTION(0x1D00, Trap_1D, unknown_exception, EXC_XFER_EE)
676 EXCEPTION(0x1E00, Trap_1E, unknown_exception, EXC_XFER_EE)
677 EXCEPTION(0x1F00, Trap_1F, unknown_exception, EXC_XFER_EE)
678
679/* Check for a single step debug exception while in an exception
680 * handler before state has been saved. This is to catch the case
681 * where an instruction that we are trying to single step causes
682 * an exception (eg ITLB/DTLB miss) and thus the first instruction of
683 * the exception handler generates a single step debug exception.
684 *
685 * If we get a debug trap on the first instruction of an exception handler,
686 * we reset the MSR_DE in the _exception handler's_ MSR (the debug trap is
687 * a critical exception, so we are using SPRN_CSRR1 to manipulate the MSR).
688 * The exception handler was handling a non-critical interrupt, so it will
689 * save (and later restore) the MSR via SPRN_SRR1, which will still have
690 * the MSR_DE bit set.
691 */
692 /* 0x2000 - Debug Exception */
693 START_EXCEPTION(0x2000, DebugTrap)
694 CRITICAL_EXCEPTION_PROLOG
695
696 /*
697 * If this is a single step or branch-taken exception in an
698 * exception entry sequence, it was probably meant to apply to
699 * the code where the exception occurred (since exception entry
700 * doesn't turn off DE automatically). We simulate the effect
701 * of turning off DE on entry to an exception handler by turning
702 * off DE in the SRR3 value and clearing the debug status.
703 */
704 mfspr r10,SPRN_DBSR /* check single-step/branch taken */
705 andis. r10,r10,DBSR_IC@h
706 beq+ 2f
707
708 andi. r10,r9,MSR_IR|MSR_PR /* check supervisor + MMU off */
709 beq 1f /* branch and fix it up */
710
711 mfspr r10,SPRN_SRR2 /* Faulting instruction address */
712 cmplwi r10,0x2100
713 bgt+ 2f /* address above exception vectors */
714
715 /* here it looks like we got an inappropriate debug exception. */
7161: rlwinm r9,r9,0,~MSR_DE /* clear DE in the SRR3 value */
717 lis r10,DBSR_IC@h /* clear the IC event */
718 mtspr SPRN_DBSR,r10
719 /* restore state and get out */
720 lwz r10,_CCR(r11)
721 lwz r0,GPR0(r11)
722 lwz r1,GPR1(r11)
723 mtcrf 0x80,r10
724 mtspr SPRN_SRR2,r12
725 mtspr SPRN_SRR3,r9
726 lwz r9,GPR9(r11)
727 lwz r12,GPR12(r11)
728 lwz r10,crit_r10@l(0)
729 lwz r11,crit_r11@l(0)
730 PPC405_ERR77_SYNC
731 rfci
732 b .
733
734 /* continue normal handling for a critical exception... */
7352: mfspr r4,SPRN_DBSR
736 addi r3,r1,STACK_FRAME_OVERHEAD
737 EXC_XFER_TEMPLATE(DebugException, 0x2002, \
738 (MSR_KERNEL & ~(MSR_ME|MSR_DE|MSR_CE)), \
739 NOCOPY, crit_transfer_to_handler, ret_from_crit_exc)
740
741/*
742 * The other Data TLB exceptions bail out to this point
743 * if they can't resolve the lightweight TLB fault.
744 */
745DataAccess:
746 NORMAL_EXCEPTION_PROLOG
747 mfspr r5,SPRN_ESR /* Grab the ESR, save it, pass arg3 */
748 stw r5,_ESR(r11)
749 mfspr r4,SPRN_DEAR /* Grab the DEAR, save it, pass arg2 */
750 EXC_XFER_EE_LITE(0x300, handle_page_fault)
751
752/* Other PowerPC processors, namely those derived from the 6xx-series
753 * have vectors from 0x2100 through 0x2F00 defined, but marked as reserved.
754 * However, for the 4xx-series processors these are neither defined nor
755 * reserved.
756 */
757
758 /* Damn, I came up one instruction too many to fit into the
759 * exception space :-). Both the instruction and data TLB
760 * miss get to this point to load the TLB.
761 * r10 - TLB_TAG value
762 * r11 - Linux PTE
763 * r12, r9 - avilable to use
764 * PID - loaded with proper value when we get here
765 * Upon exit, we reload everything and RFI.
766 * Actually, it will fit now, but oh well.....a common place
767 * to load the TLB.
768 */
769tlb_4xx_index:
770 .long 0
771finish_tlb_load:
772 /* load the next available TLB index.
773 */
774 lwz r9, tlb_4xx_index@l(0)
775 addi r9, r9, 1
776 andi. r9, r9, (PPC4XX_TLB_SIZE-1)
777 stw r9, tlb_4xx_index@l(0)
778
7796:
780 /*
781 * Clear out the software-only bits in the PTE to generate the
782 * TLB_DATA value. These are the bottom 2 bits of the RPM, the
783 * top 3 bits of the zone field, and M.
784 */
785 li r12, 0x0ce2
786 andc r11, r11, r12
787
788 tlbwe r11, r9, TLB_DATA /* Load TLB LO */
789 tlbwe r10, r9, TLB_TAG /* Load TLB HI */
790
791 /* Done...restore registers and get out of here.
792 */
793#ifdef CONFIG_403GCX
794 lwz r12, 12(r0)
795 lwz r11, 8(r0)
796 mtspr SPRN_PID, r12
797 mtcr r11
798 lwz r9, 4(r0)
799 lwz r12, 0(r0)
800#else
801 mfspr r12, SPRN_SPRG6
802 mfspr r11, SPRN_SPRG7
803 mtspr SPRN_PID, r12
804 mtcr r11
805 mfspr r9, SPRN_SPRG5
806 mfspr r12, SPRN_SPRG4
807#endif
808 mfspr r11, SPRN_SPRG1
809 mfspr r10, SPRN_SPRG0
810 PPC405_ERR77_SYNC
811 rfi /* Should sync shadow TLBs */
812 b . /* prevent prefetch past rfi */
813
814/* extern void giveup_fpu(struct task_struct *prev)
815 *
816 * The PowerPC 4xx family of processors do not have an FPU, so this just
817 * returns.
818 */
819_GLOBAL(giveup_fpu)
820 blr
821
822/* This is where the main kernel code starts.
823 */
824start_here:
825
826 /* ptr to current */
827 lis r2,init_task@h
828 ori r2,r2,init_task@l
829
830 /* ptr to phys current thread */
831 tophys(r4,r2)
832 addi r4,r4,THREAD /* init task's THREAD */
833 mtspr SPRN_SPRG3,r4
834
835 /* stack */
836 lis r1,init_thread_union@ha
837 addi r1,r1,init_thread_union@l
838 li r0,0
839 stwu r0,THREAD_SIZE-STACK_FRAME_OVERHEAD(r1)
840
841 bl early_init /* We have to do this with MMU on */
842
843/*
844 * Decide what sort of machine this is and initialize the MMU.
845 */
846 mr r3,r31
847 mr r4,r30
848 mr r5,r29
849 mr r6,r28
850 mr r7,r27
851 bl machine_init
852 bl MMU_init
853
854/* Go back to running unmapped so we can load up new values
855 * and change to using our exception vectors.
856 * On the 4xx, all we have to do is invalidate the TLB to clear
857 * the old 16M byte TLB mappings.
858 */
859 lis r4,2f@h
860 ori r4,r4,2f@l
861 tophys(r4,r4)
862 lis r3,(MSR_KERNEL & ~(MSR_IR|MSR_DR))@h
863 ori r3,r3,(MSR_KERNEL & ~(MSR_IR|MSR_DR))@l
864 mtspr SPRN_SRR0,r4
865 mtspr SPRN_SRR1,r3
866 rfi
867 b . /* prevent prefetch past rfi */
868
869/* Load up the kernel context */
8702:
871 sync /* Flush to memory before changing TLB */
872 tlbia
873 isync /* Flush shadow TLBs */
874
875 /* set up the PTE pointers for the Abatron bdiGDB.
876 */
877 lis r6, swapper_pg_dir@h
878 ori r6, r6, swapper_pg_dir@l
879 lis r5, abatron_pteptrs@h
880 ori r5, r5, abatron_pteptrs@l
881 stw r5, 0xf0(r0) /* Must match your Abatron config file */
882 tophys(r5,r5)
883 stw r6, 0(r5)
884
885/* Now turn on the MMU for real! */
886 lis r4,MSR_KERNEL@h
887 ori r4,r4,MSR_KERNEL@l
888 lis r3,start_kernel@h
889 ori r3,r3,start_kernel@l
890 mtspr SPRN_SRR0,r3
891 mtspr SPRN_SRR1,r4
892 rfi /* enable MMU and jump to start_kernel */
893 b . /* prevent prefetch past rfi */
894
895/* Set up the initial MMU state so we can do the first level of
896 * kernel initialization. This maps the first 16 MBytes of memory 1:1
897 * virtual to physical and more importantly sets the cache mode.
898 */
899initial_mmu:
900 tlbia /* Invalidate all TLB entries */
901 isync
902
903 /* We should still be executing code at physical address 0x0000xxxx
904 * at this point. However, start_here is at virtual address
905 * 0xC000xxxx. So, set up a TLB mapping to cover this once
906 * translation is enabled.
907 */
908
909 lis r3,KERNELBASE@h /* Load the kernel virtual address */
910 ori r3,r3,KERNELBASE@l
911 tophys(r4,r3) /* Load the kernel physical address */
912
913 iccci r0,r3 /* Invalidate the i-cache before use */
914
915 /* Load the kernel PID.
916 */
917 li r0,0
918 mtspr SPRN_PID,r0
919 sync
920
921 /* Configure and load two entries into TLB slots 62 and 63.
922 * In case we are pinning TLBs, these are reserved in by the
923 * other TLB functions. If not reserving, then it doesn't
924 * matter where they are loaded.
925 */
926 clrrwi r4,r4,10 /* Mask off the real page number */
927 ori r4,r4,(TLB_WR | TLB_EX) /* Set the write and execute bits */
928
929 clrrwi r3,r3,10 /* Mask off the effective page number */
930 ori r3,r3,(TLB_VALID | TLB_PAGESZ(PAGESZ_16M))
931
932 li r0,63 /* TLB slot 63 */
933
934 tlbwe r4,r0,TLB_DATA /* Load the data portion of the entry */
935 tlbwe r3,r0,TLB_TAG /* Load the tag portion of the entry */
936
937#if defined(CONFIG_SERIAL_TEXT_DEBUG) && defined(SERIAL_DEBUG_IO_BASE)
938
939 /* Load a TLB entry for the UART, so that ppc4xx_progress() can use
940 * the UARTs nice and early. We use a 4k real==virtual mapping. */
941
942 lis r3,SERIAL_DEBUG_IO_BASE@h
943 ori r3,r3,SERIAL_DEBUG_IO_BASE@l
944 mr r4,r3
945 clrrwi r4,r4,12
946 ori r4,r4,(TLB_WR|TLB_I|TLB_M|TLB_G)
947
948 clrrwi r3,r3,12
949 ori r3,r3,(TLB_VALID | TLB_PAGESZ(PAGESZ_4K))
950
951 li r0,0 /* TLB slot 0 */
952 tlbwe r4,r0,TLB_DATA
953 tlbwe r3,r0,TLB_TAG
954#endif /* CONFIG_SERIAL_DEBUG_TEXT && SERIAL_DEBUG_IO_BASE */
955
956 isync
957
958 /* Establish the exception vector base
959 */
960 lis r4,KERNELBASE@h /* EVPR only uses the high 16-bits */
961 tophys(r0,r4) /* Use the physical address */
962 mtspr SPRN_EVPR,r0
963
964 blr
965
966_GLOBAL(abort)
967 mfspr r13,SPRN_DBCR0
968 oris r13,r13,DBCR0_RST_SYSTEM@h
969 mtspr SPRN_DBCR0,r13
970
971_GLOBAL(set_context)
972
973#ifdef CONFIG_BDI_SWITCH
974 /* Context switch the PTE pointer for the Abatron BDI2000.
975 * The PGDIR is the second parameter.
976 */
977 lis r5, KERNELBASE@h
978 lwz r5, 0xf0(r5)
979 stw r4, 0x4(r5)
980#endif
981 sync
982 mtspr SPRN_PID,r3
983 isync /* Need an isync to flush shadow */
984 /* TLBs after changing PID */
985 blr
986
987/* We put a few things here that have to be page-aligned. This stuff
988 * goes at the beginning of the data segment, which is page-aligned.
989 */
990 .data
991 .align 12
992 .globl sdata
993sdata:
994 .globl empty_zero_page
995empty_zero_page:
996 .space 4096
997 .globl swapper_pg_dir
998swapper_pg_dir:
999 .space 4096
1000
1001
1002/* Stack for handling critical exceptions from kernel mode */
1003 .section .bss
1004 .align 12
1005exception_stack_bottom:
1006 .space 4096
1007critical_stack_top:
1008 .globl exception_stack_top
1009exception_stack_top:
1010
1011/* This space gets a copy of optional info passed to us by the bootstrap
1012 * which is used to pass parameters into the kernel like root=/dev/sda1, etc.
1013 */
1014 .globl cmd_line
1015cmd_line:
1016 .space 512
1017
1018/* Room for two PTE pointers, usually the kernel and current user pointers
1019 * to their respective root page table.
1020 */
1021abatron_pteptrs:
1022 .space 8
diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S
new file mode 100644
index 000000000000..147215a0d6c0
--- /dev/null
+++ b/arch/powerpc/kernel/head_64.S
@@ -0,0 +1,1957 @@
1/*
2 * arch/ppc64/kernel/head.S
3 *
4 * PowerPC version
5 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
6 *
7 * Rewritten by Cort Dougan (cort@cs.nmt.edu) for PReP
8 * Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu>
9 * Adapted for Power Macintosh by Paul Mackerras.
10 * Low-level exception handlers and MMU support
11 * rewritten by Paul Mackerras.
12 * Copyright (C) 1996 Paul Mackerras.
13 *
14 * Adapted for 64bit PowerPC by Dave Engebretsen, Peter Bergner, and
15 * Mike Corrigan {engebret|bergner|mikejc}@us.ibm.com
16 *
17 * This file contains the low-level support and setup for the
18 * PowerPC-64 platform, including trap and interrupt dispatch.
19 *
20 * This program is free software; you can redistribute it and/or
21 * modify it under the terms of the GNU General Public License
22 * as published by the Free Software Foundation; either version
23 * 2 of the License, or (at your option) any later version.
24 */
25
26#include <linux/config.h>
27#include <linux/threads.h>
28#include <asm/reg.h>
29#include <asm/page.h>
30#include <asm/mmu.h>
31#include <asm/systemcfg.h>
32#include <asm/ppc_asm.h>
33#include <asm/asm-offsets.h>
34#include <asm/bug.h>
35#include <asm/cputable.h>
36#include <asm/setup.h>
37#include <asm/hvcall.h>
38#include <asm/iSeries/LparMap.h>
39#include <asm/thread_info.h>
40
41#ifdef CONFIG_PPC_ISERIES
42#define DO_SOFT_DISABLE
43#endif
44
45/*
46 * We layout physical memory as follows:
47 * 0x0000 - 0x00ff : Secondary processor spin code
48 * 0x0100 - 0x2fff : pSeries Interrupt prologs
49 * 0x3000 - 0x5fff : interrupt support, iSeries and common interrupt prologs
50 * 0x6000 - 0x6fff : Initial (CPU0) segment table
51 * 0x7000 - 0x7fff : FWNMI data area
52 * 0x8000 - : Early init and support code
53 */
54
55/*
56 * SPRG Usage
57 *
58 * Register Definition
59 *
60 * SPRG0 reserved for hypervisor
61 * SPRG1 temp - used to save gpr
62 * SPRG2 temp - used to save gpr
63 * SPRG3 virt addr of paca
64 */
65
66/*
67 * Entering into this code we make the following assumptions:
68 * For pSeries:
69 * 1. The MMU is off & open firmware is running in real mode.
70 * 2. The kernel is entered at __start
71 *
72 * For iSeries:
73 * 1. The MMU is on (as it always is for iSeries)
74 * 2. The kernel is entered at system_reset_iSeries
75 */
76
77 .text
78 .globl _stext
79_stext:
80#ifdef CONFIG_PPC_MULTIPLATFORM
81_GLOBAL(__start)
82 /* NOP this out unconditionally */
83BEGIN_FTR_SECTION
84 b .__start_initialization_multiplatform
85END_FTR_SECTION(0, 1)
86#endif /* CONFIG_PPC_MULTIPLATFORM */
87
88 /* Catch branch to 0 in real mode */
89 trap
90
91#ifdef CONFIG_PPC_ISERIES
92 /*
93 * At offset 0x20, there is a pointer to iSeries LPAR data.
94 * This is required by the hypervisor
95 */
96 . = 0x20
97 .llong hvReleaseData-KERNELBASE
98
99 /*
100 * At offset 0x28 and 0x30 are offsets to the mschunks_map
101 * array (used by the iSeries LPAR debugger to do translation
102 * between physical addresses and absolute addresses) and
103 * to the pidhash table (also used by the debugger)
104 */
105 .llong mschunks_map-KERNELBASE
106 .llong 0 /* pidhash-KERNELBASE SFRXXX */
107
108 /* Offset 0x38 - Pointer to start of embedded System.map */
109 .globl embedded_sysmap_start
110embedded_sysmap_start:
111 .llong 0
112 /* Offset 0x40 - Pointer to end of embedded System.map */
113 .globl embedded_sysmap_end
114embedded_sysmap_end:
115 .llong 0
116
117#endif /* CONFIG_PPC_ISERIES */
118
119 /* Secondary processors spin on this value until it goes to 1. */
120 .globl __secondary_hold_spinloop
121__secondary_hold_spinloop:
122 .llong 0x0
123
124 /* Secondary processors write this value with their cpu # */
125 /* after they enter the spin loop immediately below. */
126 .globl __secondary_hold_acknowledge
127__secondary_hold_acknowledge:
128 .llong 0x0
129
130 . = 0x60
131/*
132 * The following code is used on pSeries to hold secondary processors
133 * in a spin loop after they have been freed from OpenFirmware, but
134 * before the bulk of the kernel has been relocated. This code
135 * is relocated to physical address 0x60 before prom_init is run.
136 * All of it must fit below the first exception vector at 0x100.
137 */
138_GLOBAL(__secondary_hold)
139 mfmsr r24
140 ori r24,r24,MSR_RI
141 mtmsrd r24 /* RI on */
142
143 /* Grab our linux cpu number */
144 mr r24,r3
145
146 /* Tell the master cpu we're here */
147 /* Relocation is off & we are located at an address less */
148 /* than 0x100, so only need to grab low order offset. */
149 std r24,__secondary_hold_acknowledge@l(0)
150 sync
151
152 /* All secondary cpus wait here until told to start. */
153100: ld r4,__secondary_hold_spinloop@l(0)
154 cmpdi 0,r4,1
155 bne 100b
156
157#ifdef CONFIG_HMT
158 b .hmt_init
159#else
160#ifdef CONFIG_SMP
161 mr r3,r24
162 b .pSeries_secondary_smp_init
163#else
164 BUG_OPCODE
165#endif
166#endif
167
168/* This value is used to mark exception frames on the stack. */
169 .section ".toc","aw"
170exception_marker:
171 .tc ID_72656773_68657265[TC],0x7265677368657265
172 .text
173
174/*
175 * The following macros define the code that appears as
176 * the prologue to each of the exception handlers. They
177 * are split into two parts to allow a single kernel binary
178 * to be used for pSeries and iSeries.
179 * LOL. One day... - paulus
180 */
181
182/*
183 * We make as much of the exception code common between native
184 * exception handlers (including pSeries LPAR) and iSeries LPAR
185 * implementations as possible.
186 */
187
188/*
189 * This is the start of the interrupt handlers for pSeries
190 * This code runs with relocation off.
191 */
192#define EX_R9 0
193#define EX_R10 8
194#define EX_R11 16
195#define EX_R12 24
196#define EX_R13 32
197#define EX_SRR0 40
198#define EX_R3 40 /* SLB miss saves R3, but not SRR0 */
199#define EX_DAR 48
200#define EX_LR 48 /* SLB miss saves LR, but not DAR */
201#define EX_DSISR 56
202#define EX_CCR 60
203
204#define EXCEPTION_PROLOG_PSERIES(area, label) \
205 mfspr r13,SPRN_SPRG3; /* get paca address into r13 */ \
206 std r9,area+EX_R9(r13); /* save r9 - r12 */ \
207 std r10,area+EX_R10(r13); \
208 std r11,area+EX_R11(r13); \
209 std r12,area+EX_R12(r13); \
210 mfspr r9,SPRN_SPRG1; \
211 std r9,area+EX_R13(r13); \
212 mfcr r9; \
213 clrrdi r12,r13,32; /* get high part of &label */ \
214 mfmsr r10; \
215 mfspr r11,SPRN_SRR0; /* save SRR0 */ \
216 ori r12,r12,(label)@l; /* virt addr of handler */ \
217 ori r10,r10,MSR_IR|MSR_DR|MSR_RI; \
218 mtspr SPRN_SRR0,r12; \
219 mfspr r12,SPRN_SRR1; /* and SRR1 */ \
220 mtspr SPRN_SRR1,r10; \
221 rfid; \
222 b . /* prevent speculative execution */
223
224/*
225 * This is the start of the interrupt handlers for iSeries
226 * This code runs with relocation on.
227 */
228#define EXCEPTION_PROLOG_ISERIES_1(area) \
229 mfspr r13,SPRN_SPRG3; /* get paca address into r13 */ \
230 std r9,area+EX_R9(r13); /* save r9 - r12 */ \
231 std r10,area+EX_R10(r13); \
232 std r11,area+EX_R11(r13); \
233 std r12,area+EX_R12(r13); \
234 mfspr r9,SPRN_SPRG1; \
235 std r9,area+EX_R13(r13); \
236 mfcr r9
237
238#define EXCEPTION_PROLOG_ISERIES_2 \
239 mfmsr r10; \
240 ld r11,PACALPPACA+LPPACASRR0(r13); \
241 ld r12,PACALPPACA+LPPACASRR1(r13); \
242 ori r10,r10,MSR_RI; \
243 mtmsrd r10,1
244
245/*
246 * The common exception prolog is used for all except a few exceptions
247 * such as a segment miss on a kernel address. We have to be prepared
248 * to take another exception from the point where we first touch the
249 * kernel stack onwards.
250 *
251 * On entry r13 points to the paca, r9-r13 are saved in the paca,
252 * r9 contains the saved CR, r11 and r12 contain the saved SRR0 and
253 * SRR1, and relocation is on.
254 */
255#define EXCEPTION_PROLOG_COMMON(n, area) \
256 andi. r10,r12,MSR_PR; /* See if coming from user */ \
257 mr r10,r1; /* Save r1 */ \
258 subi r1,r1,INT_FRAME_SIZE; /* alloc frame on kernel stack */ \
259 beq- 1f; \
260 ld r1,PACAKSAVE(r13); /* kernel stack to use */ \
2611: cmpdi cr1,r1,0; /* check if r1 is in userspace */ \
262 bge- cr1,bad_stack; /* abort if it is */ \
263 std r9,_CCR(r1); /* save CR in stackframe */ \
264 std r11,_NIP(r1); /* save SRR0 in stackframe */ \
265 std r12,_MSR(r1); /* save SRR1 in stackframe */ \
266 std r10,0(r1); /* make stack chain pointer */ \
267 std r0,GPR0(r1); /* save r0 in stackframe */ \
268 std r10,GPR1(r1); /* save r1 in stackframe */ \
269 std r2,GPR2(r1); /* save r2 in stackframe */ \
270 SAVE_4GPRS(3, r1); /* save r3 - r6 in stackframe */ \
271 SAVE_2GPRS(7, r1); /* save r7, r8 in stackframe */ \
272 ld r9,area+EX_R9(r13); /* move r9, r10 to stackframe */ \
273 ld r10,area+EX_R10(r13); \
274 std r9,GPR9(r1); \
275 std r10,GPR10(r1); \
276 ld r9,area+EX_R11(r13); /* move r11 - r13 to stackframe */ \
277 ld r10,area+EX_R12(r13); \
278 ld r11,area+EX_R13(r13); \
279 std r9,GPR11(r1); \
280 std r10,GPR12(r1); \
281 std r11,GPR13(r1); \
282 ld r2,PACATOC(r13); /* get kernel TOC into r2 */ \
283 mflr r9; /* save LR in stackframe */ \
284 std r9,_LINK(r1); \
285 mfctr r10; /* save CTR in stackframe */ \
286 std r10,_CTR(r1); \
287 mfspr r11,SPRN_XER; /* save XER in stackframe */ \
288 std r11,_XER(r1); \
289 li r9,(n)+1; \
290 std r9,_TRAP(r1); /* set trap number */ \
291 li r10,0; \
292 ld r11,exception_marker@toc(r2); \
293 std r10,RESULT(r1); /* clear regs->result */ \
294 std r11,STACK_FRAME_OVERHEAD-16(r1); /* mark the frame */
295
296/*
297 * Exception vectors.
298 */
299#define STD_EXCEPTION_PSERIES(n, label) \
300 . = n; \
301 .globl label##_pSeries; \
302label##_pSeries: \
303 HMT_MEDIUM; \
304 mtspr SPRN_SPRG1,r13; /* save r13 */ \
305 RUNLATCH_ON(r13); \
306 EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, label##_common)
307
308#define STD_EXCEPTION_ISERIES(n, label, area) \
309 .globl label##_iSeries; \
310label##_iSeries: \
311 HMT_MEDIUM; \
312 mtspr SPRN_SPRG1,r13; /* save r13 */ \
313 RUNLATCH_ON(r13); \
314 EXCEPTION_PROLOG_ISERIES_1(area); \
315 EXCEPTION_PROLOG_ISERIES_2; \
316 b label##_common
317
318#define MASKABLE_EXCEPTION_ISERIES(n, label) \
319 .globl label##_iSeries; \
320label##_iSeries: \
321 HMT_MEDIUM; \
322 mtspr SPRN_SPRG1,r13; /* save r13 */ \
323 RUNLATCH_ON(r13); \
324 EXCEPTION_PROLOG_ISERIES_1(PACA_EXGEN); \
325 lbz r10,PACAPROCENABLED(r13); \
326 cmpwi 0,r10,0; \
327 beq- label##_iSeries_masked; \
328 EXCEPTION_PROLOG_ISERIES_2; \
329 b label##_common; \
330
331#ifdef DO_SOFT_DISABLE
332#define DISABLE_INTS \
333 lbz r10,PACAPROCENABLED(r13); \
334 li r11,0; \
335 std r10,SOFTE(r1); \
336 mfmsr r10; \
337 stb r11,PACAPROCENABLED(r13); \
338 ori r10,r10,MSR_EE; \
339 mtmsrd r10,1
340
341#define ENABLE_INTS \
342 lbz r10,PACAPROCENABLED(r13); \
343 mfmsr r11; \
344 std r10,SOFTE(r1); \
345 ori r11,r11,MSR_EE; \
346 mtmsrd r11,1
347
348#else /* hard enable/disable interrupts */
349#define DISABLE_INTS
350
351#define ENABLE_INTS \
352 ld r12,_MSR(r1); \
353 mfmsr r11; \
354 rlwimi r11,r12,0,MSR_EE; \
355 mtmsrd r11,1
356
357#endif
358
359#define STD_EXCEPTION_COMMON(trap, label, hdlr) \
360 .align 7; \
361 .globl label##_common; \
362label##_common: \
363 EXCEPTION_PROLOG_COMMON(trap, PACA_EXGEN); \
364 DISABLE_INTS; \
365 bl .save_nvgprs; \
366 addi r3,r1,STACK_FRAME_OVERHEAD; \
367 bl hdlr; \
368 b .ret_from_except
369
370#define STD_EXCEPTION_COMMON_LITE(trap, label, hdlr) \
371 .align 7; \
372 .globl label##_common; \
373label##_common: \
374 EXCEPTION_PROLOG_COMMON(trap, PACA_EXGEN); \
375 DISABLE_INTS; \
376 addi r3,r1,STACK_FRAME_OVERHEAD; \
377 bl hdlr; \
378 b .ret_from_except_lite
379
380/*
381 * Start of pSeries system interrupt routines
382 */
383 . = 0x100
384 .globl __start_interrupts
385__start_interrupts:
386
387 STD_EXCEPTION_PSERIES(0x100, system_reset)
388
389 . = 0x200
390_machine_check_pSeries:
391 HMT_MEDIUM
392 mtspr SPRN_SPRG1,r13 /* save r13 */
393 RUNLATCH_ON(r13)
394 EXCEPTION_PROLOG_PSERIES(PACA_EXMC, machine_check_common)
395
396 . = 0x300
397 .globl data_access_pSeries
398data_access_pSeries:
399 HMT_MEDIUM
400 mtspr SPRN_SPRG1,r13
401BEGIN_FTR_SECTION
402 mtspr SPRN_SPRG2,r12
403 mfspr r13,SPRN_DAR
404 mfspr r12,SPRN_DSISR
405 srdi r13,r13,60
406 rlwimi r13,r12,16,0x20
407 mfcr r12
408 cmpwi r13,0x2c
409 beq .do_stab_bolted_pSeries
410 mtcrf 0x80,r12
411 mfspr r12,SPRN_SPRG2
412END_FTR_SECTION_IFCLR(CPU_FTR_SLB)
413 EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, data_access_common)
414
415 . = 0x380
416 .globl data_access_slb_pSeries
417data_access_slb_pSeries:
418 HMT_MEDIUM
419 mtspr SPRN_SPRG1,r13
420 RUNLATCH_ON(r13)
421 mfspr r13,SPRN_SPRG3 /* get paca address into r13 */
422 std r9,PACA_EXSLB+EX_R9(r13) /* save r9 - r12 */
423 std r10,PACA_EXSLB+EX_R10(r13)
424 std r11,PACA_EXSLB+EX_R11(r13)
425 std r12,PACA_EXSLB+EX_R12(r13)
426 std r3,PACA_EXSLB+EX_R3(r13)
427 mfspr r9,SPRN_SPRG1
428 std r9,PACA_EXSLB+EX_R13(r13)
429 mfcr r9
430 mfspr r12,SPRN_SRR1 /* and SRR1 */
431 mfspr r3,SPRN_DAR
432 b .do_slb_miss /* Rel. branch works in real mode */
433
434 STD_EXCEPTION_PSERIES(0x400, instruction_access)
435
436 . = 0x480
437 .globl instruction_access_slb_pSeries
438instruction_access_slb_pSeries:
439 HMT_MEDIUM
440 mtspr SPRN_SPRG1,r13
441 RUNLATCH_ON(r13)
442 mfspr r13,SPRN_SPRG3 /* get paca address into r13 */
443 std r9,PACA_EXSLB+EX_R9(r13) /* save r9 - r12 */
444 std r10,PACA_EXSLB+EX_R10(r13)
445 std r11,PACA_EXSLB+EX_R11(r13)
446 std r12,PACA_EXSLB+EX_R12(r13)
447 std r3,PACA_EXSLB+EX_R3(r13)
448 mfspr r9,SPRN_SPRG1
449 std r9,PACA_EXSLB+EX_R13(r13)
450 mfcr r9
451 mfspr r12,SPRN_SRR1 /* and SRR1 */
452 mfspr r3,SPRN_SRR0 /* SRR0 is faulting address */
453 b .do_slb_miss /* Rel. branch works in real mode */
454
455 STD_EXCEPTION_PSERIES(0x500, hardware_interrupt)
456 STD_EXCEPTION_PSERIES(0x600, alignment)
457 STD_EXCEPTION_PSERIES(0x700, program_check)
458 STD_EXCEPTION_PSERIES(0x800, fp_unavailable)
459 STD_EXCEPTION_PSERIES(0x900, decrementer)
460 STD_EXCEPTION_PSERIES(0xa00, trap_0a)
461 STD_EXCEPTION_PSERIES(0xb00, trap_0b)
462
463 . = 0xc00
464 .globl system_call_pSeries
465system_call_pSeries:
466 HMT_MEDIUM
467 RUNLATCH_ON(r9)
468 mr r9,r13
469 mfmsr r10
470 mfspr r13,SPRN_SPRG3
471 mfspr r11,SPRN_SRR0
472 clrrdi r12,r13,32
473 oris r12,r12,system_call_common@h
474 ori r12,r12,system_call_common@l
475 mtspr SPRN_SRR0,r12
476 ori r10,r10,MSR_IR|MSR_DR|MSR_RI
477 mfspr r12,SPRN_SRR1
478 mtspr SPRN_SRR1,r10
479 rfid
480 b . /* prevent speculative execution */
481
482 STD_EXCEPTION_PSERIES(0xd00, single_step)
483 STD_EXCEPTION_PSERIES(0xe00, trap_0e)
484
485 /* We need to deal with the Altivec unavailable exception
486 * here which is at 0xf20, thus in the middle of the
487 * prolog code of the PerformanceMonitor one. A little
488 * trickery is thus necessary
489 */
490 . = 0xf00
491 b performance_monitor_pSeries
492
493 STD_EXCEPTION_PSERIES(0xf20, altivec_unavailable)
494
495 STD_EXCEPTION_PSERIES(0x1300, instruction_breakpoint)
496 STD_EXCEPTION_PSERIES(0x1700, altivec_assist)
497
498 . = 0x3000
499
500/*** pSeries interrupt support ***/
501
502 /* moved from 0xf00 */
503 STD_EXCEPTION_PSERIES(., performance_monitor)
504
505 .align 7
506_GLOBAL(do_stab_bolted_pSeries)
507 mtcrf 0x80,r12
508 mfspr r12,SPRN_SPRG2
509 EXCEPTION_PROLOG_PSERIES(PACA_EXSLB, .do_stab_bolted)
510
511/*
512 * Vectors for the FWNMI option. Share common code.
513 */
514 .globl system_reset_fwnmi
515system_reset_fwnmi:
516 HMT_MEDIUM
517 mtspr SPRN_SPRG1,r13 /* save r13 */
518 RUNLATCH_ON(r13)
519 EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, system_reset_common)
520
521 .globl machine_check_fwnmi
522machine_check_fwnmi:
523 HMT_MEDIUM
524 mtspr SPRN_SPRG1,r13 /* save r13 */
525 RUNLATCH_ON(r13)
526 EXCEPTION_PROLOG_PSERIES(PACA_EXMC, machine_check_common)
527
528#ifdef CONFIG_PPC_ISERIES
529/*** ISeries-LPAR interrupt handlers ***/
530
531 STD_EXCEPTION_ISERIES(0x200, machine_check, PACA_EXMC)
532
533 .globl data_access_iSeries
534data_access_iSeries:
535 mtspr SPRN_SPRG1,r13
536BEGIN_FTR_SECTION
537 mtspr SPRN_SPRG2,r12
538 mfspr r13,SPRN_DAR
539 mfspr r12,SPRN_DSISR
540 srdi r13,r13,60
541 rlwimi r13,r12,16,0x20
542 mfcr r12
543 cmpwi r13,0x2c
544 beq .do_stab_bolted_iSeries
545 mtcrf 0x80,r12
546 mfspr r12,SPRN_SPRG2
547END_FTR_SECTION_IFCLR(CPU_FTR_SLB)
548 EXCEPTION_PROLOG_ISERIES_1(PACA_EXGEN)
549 EXCEPTION_PROLOG_ISERIES_2
550 b data_access_common
551
552.do_stab_bolted_iSeries:
553 mtcrf 0x80,r12
554 mfspr r12,SPRN_SPRG2
555 EXCEPTION_PROLOG_ISERIES_1(PACA_EXSLB)
556 EXCEPTION_PROLOG_ISERIES_2
557 b .do_stab_bolted
558
559 .globl data_access_slb_iSeries
560data_access_slb_iSeries:
561 mtspr SPRN_SPRG1,r13 /* save r13 */
562 EXCEPTION_PROLOG_ISERIES_1(PACA_EXSLB)
563 std r3,PACA_EXSLB+EX_R3(r13)
564 ld r12,PACALPPACA+LPPACASRR1(r13)
565 mfspr r3,SPRN_DAR
566 b .do_slb_miss
567
568 STD_EXCEPTION_ISERIES(0x400, instruction_access, PACA_EXGEN)
569
570 .globl instruction_access_slb_iSeries
571instruction_access_slb_iSeries:
572 mtspr SPRN_SPRG1,r13 /* save r13 */
573 EXCEPTION_PROLOG_ISERIES_1(PACA_EXSLB)
574 std r3,PACA_EXSLB+EX_R3(r13)
575 ld r12,PACALPPACA+LPPACASRR1(r13)
576 ld r3,PACALPPACA+LPPACASRR0(r13)
577 b .do_slb_miss
578
579 MASKABLE_EXCEPTION_ISERIES(0x500, hardware_interrupt)
580 STD_EXCEPTION_ISERIES(0x600, alignment, PACA_EXGEN)
581 STD_EXCEPTION_ISERIES(0x700, program_check, PACA_EXGEN)
582 STD_EXCEPTION_ISERIES(0x800, fp_unavailable, PACA_EXGEN)
583 MASKABLE_EXCEPTION_ISERIES(0x900, decrementer)
584 STD_EXCEPTION_ISERIES(0xa00, trap_0a, PACA_EXGEN)
585 STD_EXCEPTION_ISERIES(0xb00, trap_0b, PACA_EXGEN)
586
587 .globl system_call_iSeries
588system_call_iSeries:
589 mr r9,r13
590 mfspr r13,SPRN_SPRG3
591 EXCEPTION_PROLOG_ISERIES_2
592 b system_call_common
593
594 STD_EXCEPTION_ISERIES( 0xd00, single_step, PACA_EXGEN)
595 STD_EXCEPTION_ISERIES( 0xe00, trap_0e, PACA_EXGEN)
596 STD_EXCEPTION_ISERIES( 0xf00, performance_monitor, PACA_EXGEN)
597
598 .globl system_reset_iSeries
599system_reset_iSeries:
600 mfspr r13,SPRN_SPRG3 /* Get paca address */
601 mfmsr r24
602 ori r24,r24,MSR_RI
603 mtmsrd r24 /* RI on */
604 lhz r24,PACAPACAINDEX(r13) /* Get processor # */
605 cmpwi 0,r24,0 /* Are we processor 0? */
606 beq .__start_initialization_iSeries /* Start up the first processor */
607 mfspr r4,SPRN_CTRLF
608 li r5,CTRL_RUNLATCH /* Turn off the run light */
609 andc r4,r4,r5
610 mtspr SPRN_CTRLT,r4
611
6121:
613 HMT_LOW
614#ifdef CONFIG_SMP
615 lbz r23,PACAPROCSTART(r13) /* Test if this processor
616 * should start */
617 sync
618 LOADADDR(r3,current_set)
619 sldi r28,r24,3 /* get current_set[cpu#] */
620 ldx r3,r3,r28
621 addi r1,r3,THREAD_SIZE
622 subi r1,r1,STACK_FRAME_OVERHEAD
623
624 cmpwi 0,r23,0
625 beq iSeries_secondary_smp_loop /* Loop until told to go */
626 bne .__secondary_start /* Loop until told to go */
627iSeries_secondary_smp_loop:
628 /* Let the Hypervisor know we are alive */
629 /* 8002 is a call to HvCallCfg::getLps, a harmless Hypervisor function */
630 lis r3,0x8002
631 rldicr r3,r3,32,15 /* r0 = (r3 << 32) & 0xffff000000000000 */
632#else /* CONFIG_SMP */
633 /* Yield the processor. This is required for non-SMP kernels
634 which are running on multi-threaded machines. */
635 lis r3,0x8000
636 rldicr r3,r3,32,15 /* r3 = (r3 << 32) & 0xffff000000000000 */
637 addi r3,r3,18 /* r3 = 0x8000000000000012 which is "yield" */
638 li r4,0 /* "yield timed" */
639 li r5,-1 /* "yield forever" */
640#endif /* CONFIG_SMP */
641 li r0,-1 /* r0=-1 indicates a Hypervisor call */
642 sc /* Invoke the hypervisor via a system call */
643 mfspr r13,SPRN_SPRG3 /* Put r13 back ???? */
644 b 1b /* If SMP not configured, secondaries
645 * loop forever */
646
647 .globl decrementer_iSeries_masked
648decrementer_iSeries_masked:
649 li r11,1
650 stb r11,PACALPPACA+LPPACADECRINT(r13)
651 lwz r12,PACADEFAULTDECR(r13)
652 mtspr SPRN_DEC,r12
653 /* fall through */
654
655 .globl hardware_interrupt_iSeries_masked
656hardware_interrupt_iSeries_masked:
657 mtcrf 0x80,r9 /* Restore regs */
658 ld r11,PACALPPACA+LPPACASRR0(r13)
659 ld r12,PACALPPACA+LPPACASRR1(r13)
660 mtspr SPRN_SRR0,r11
661 mtspr SPRN_SRR1,r12
662 ld r9,PACA_EXGEN+EX_R9(r13)
663 ld r10,PACA_EXGEN+EX_R10(r13)
664 ld r11,PACA_EXGEN+EX_R11(r13)
665 ld r12,PACA_EXGEN+EX_R12(r13)
666 ld r13,PACA_EXGEN+EX_R13(r13)
667 rfid
668 b . /* prevent speculative execution */
669#endif /* CONFIG_PPC_ISERIES */
670
671/*** Common interrupt handlers ***/
672
673 STD_EXCEPTION_COMMON(0x100, system_reset, .system_reset_exception)
674
675 /*
676 * Machine check is different because we use a different
677 * save area: PACA_EXMC instead of PACA_EXGEN.
678 */
679 .align 7
680 .globl machine_check_common
681machine_check_common:
682 EXCEPTION_PROLOG_COMMON(0x200, PACA_EXMC)
683 DISABLE_INTS
684 bl .save_nvgprs
685 addi r3,r1,STACK_FRAME_OVERHEAD
686 bl .machine_check_exception
687 b .ret_from_except
688
689 STD_EXCEPTION_COMMON_LITE(0x900, decrementer, .timer_interrupt)
690 STD_EXCEPTION_COMMON(0xa00, trap_0a, .unknown_exception)
691 STD_EXCEPTION_COMMON(0xb00, trap_0b, .unknown_exception)
692 STD_EXCEPTION_COMMON(0xd00, single_step, .single_step_exception)
693 STD_EXCEPTION_COMMON(0xe00, trap_0e, .unknown_exception)
694 STD_EXCEPTION_COMMON(0xf00, performance_monitor, .performance_monitor_exception)
695 STD_EXCEPTION_COMMON(0x1300, instruction_breakpoint, .instruction_breakpoint_exception)
696#ifdef CONFIG_ALTIVEC
697 STD_EXCEPTION_COMMON(0x1700, altivec_assist, .altivec_assist_exception)
698#else
699 STD_EXCEPTION_COMMON(0x1700, altivec_assist, .unknown_exception)
700#endif
701
702/*
703 * Here we have detected that the kernel stack pointer is bad.
704 * R9 contains the saved CR, r13 points to the paca,
705 * r10 contains the (bad) kernel stack pointer,
706 * r11 and r12 contain the saved SRR0 and SRR1.
707 * We switch to using an emergency stack, save the registers there,
708 * and call kernel_bad_stack(), which panics.
709 */
710bad_stack:
711 ld r1,PACAEMERGSP(r13)
712 subi r1,r1,64+INT_FRAME_SIZE
713 std r9,_CCR(r1)
714 std r10,GPR1(r1)
715 std r11,_NIP(r1)
716 std r12,_MSR(r1)
717 mfspr r11,SPRN_DAR
718 mfspr r12,SPRN_DSISR
719 std r11,_DAR(r1)
720 std r12,_DSISR(r1)
721 mflr r10
722 mfctr r11
723 mfxer r12
724 std r10,_LINK(r1)
725 std r11,_CTR(r1)
726 std r12,_XER(r1)
727 SAVE_GPR(0,r1)
728 SAVE_GPR(2,r1)
729 SAVE_4GPRS(3,r1)
730 SAVE_2GPRS(7,r1)
731 SAVE_10GPRS(12,r1)
732 SAVE_10GPRS(22,r1)
733 addi r11,r1,INT_FRAME_SIZE
734 std r11,0(r1)
735 li r12,0
736 std r12,0(r11)
737 ld r2,PACATOC(r13)
7381: addi r3,r1,STACK_FRAME_OVERHEAD
739 bl .kernel_bad_stack
740 b 1b
741
742/*
743 * Return from an exception with minimal checks.
744 * The caller is assumed to have done EXCEPTION_PROLOG_COMMON.
745 * If interrupts have been enabled, or anything has been
746 * done that might have changed the scheduling status of
747 * any task or sent any task a signal, you should use
748 * ret_from_except or ret_from_except_lite instead of this.
749 */
750 .globl fast_exception_return
751fast_exception_return:
752 ld r12,_MSR(r1)
753 ld r11,_NIP(r1)
754 andi. r3,r12,MSR_RI /* check if RI is set */
755 beq- unrecov_fer
756 ld r3,_CCR(r1)
757 ld r4,_LINK(r1)
758 ld r5,_CTR(r1)
759 ld r6,_XER(r1)
760 mtcr r3
761 mtlr r4
762 mtctr r5
763 mtxer r6
764 REST_GPR(0, r1)
765 REST_8GPRS(2, r1)
766
767 mfmsr r10
768 clrrdi r10,r10,2 /* clear RI (LE is 0 already) */
769 mtmsrd r10,1
770
771 mtspr SPRN_SRR1,r12
772 mtspr SPRN_SRR0,r11
773 REST_4GPRS(10, r1)
774 ld r1,GPR1(r1)
775 rfid
776 b . /* prevent speculative execution */
777
778unrecov_fer:
779 bl .save_nvgprs
7801: addi r3,r1,STACK_FRAME_OVERHEAD
781 bl .unrecoverable_exception
782 b 1b
783
784/*
785 * Here r13 points to the paca, r9 contains the saved CR,
786 * SRR0 and SRR1 are saved in r11 and r12,
787 * r9 - r13 are saved in paca->exgen.
788 */
789 .align 7
790 .globl data_access_common
791data_access_common:
792 RUNLATCH_ON(r10) /* It wont fit in the 0x300 handler */
793 mfspr r10,SPRN_DAR
794 std r10,PACA_EXGEN+EX_DAR(r13)
795 mfspr r10,SPRN_DSISR
796 stw r10,PACA_EXGEN+EX_DSISR(r13)
797 EXCEPTION_PROLOG_COMMON(0x300, PACA_EXGEN)
798 ld r3,PACA_EXGEN+EX_DAR(r13)
799 lwz r4,PACA_EXGEN+EX_DSISR(r13)
800 li r5,0x300
801 b .do_hash_page /* Try to handle as hpte fault */
802
803 .align 7
804 .globl instruction_access_common
805instruction_access_common:
806 EXCEPTION_PROLOG_COMMON(0x400, PACA_EXGEN)
807 ld r3,_NIP(r1)
808 andis. r4,r12,0x5820
809 li r5,0x400
810 b .do_hash_page /* Try to handle as hpte fault */
811
812 .align 7
813 .globl hardware_interrupt_common
814 .globl hardware_interrupt_entry
815hardware_interrupt_common:
816 EXCEPTION_PROLOG_COMMON(0x500, PACA_EXGEN)
817hardware_interrupt_entry:
818 DISABLE_INTS
819 addi r3,r1,STACK_FRAME_OVERHEAD
820 bl .do_IRQ
821 b .ret_from_except_lite
822
823 .align 7
824 .globl alignment_common
825alignment_common:
826 mfspr r10,SPRN_DAR
827 std r10,PACA_EXGEN+EX_DAR(r13)
828 mfspr r10,SPRN_DSISR
829 stw r10,PACA_EXGEN+EX_DSISR(r13)
830 EXCEPTION_PROLOG_COMMON(0x600, PACA_EXGEN)
831 ld r3,PACA_EXGEN+EX_DAR(r13)
832 lwz r4,PACA_EXGEN+EX_DSISR(r13)
833 std r3,_DAR(r1)
834 std r4,_DSISR(r1)
835 bl .save_nvgprs
836 addi r3,r1,STACK_FRAME_OVERHEAD
837 ENABLE_INTS
838 bl .alignment_exception
839 b .ret_from_except
840
841 .align 7
842 .globl program_check_common
843program_check_common:
844 EXCEPTION_PROLOG_COMMON(0x700, PACA_EXGEN)
845 bl .save_nvgprs
846 addi r3,r1,STACK_FRAME_OVERHEAD
847 ENABLE_INTS
848 bl .program_check_exception
849 b .ret_from_except
850
851 .align 7
852 .globl fp_unavailable_common
853fp_unavailable_common:
854 EXCEPTION_PROLOG_COMMON(0x800, PACA_EXGEN)
855 bne .load_up_fpu /* if from user, just load it up */
856 bl .save_nvgprs
857 addi r3,r1,STACK_FRAME_OVERHEAD
858 ENABLE_INTS
859 bl .kernel_fp_unavailable_exception
860 BUG_OPCODE
861
862 .align 7
863 .globl altivec_unavailable_common
864altivec_unavailable_common:
865 EXCEPTION_PROLOG_COMMON(0xf20, PACA_EXGEN)
866#ifdef CONFIG_ALTIVEC
867BEGIN_FTR_SECTION
868 bne .load_up_altivec /* if from user, just load it up */
869END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
870#endif
871 bl .save_nvgprs
872 addi r3,r1,STACK_FRAME_OVERHEAD
873 ENABLE_INTS
874 bl .altivec_unavailable_exception
875 b .ret_from_except
876
877#ifdef CONFIG_ALTIVEC
878/*
879 * load_up_altivec(unused, unused, tsk)
880 * Disable VMX for the task which had it previously,
881 * and save its vector registers in its thread_struct.
882 * Enables the VMX for use in the kernel on return.
883 * On SMP we know the VMX is free, since we give it up every
884 * switch (ie, no lazy save of the vector registers).
885 * On entry: r13 == 'current' && last_task_used_altivec != 'current'
886 */
887_STATIC(load_up_altivec)
888 mfmsr r5 /* grab the current MSR */
889 oris r5,r5,MSR_VEC@h
890 mtmsrd r5 /* enable use of VMX now */
891 isync
892
893/*
894 * For SMP, we don't do lazy VMX switching because it just gets too
895 * horrendously complex, especially when a task switches from one CPU
896 * to another. Instead we call giveup_altvec in switch_to.
897 * VRSAVE isn't dealt with here, that is done in the normal context
898 * switch code. Note that we could rely on vrsave value to eventually
899 * avoid saving all of the VREGs here...
900 */
901#ifndef CONFIG_SMP
902 ld r3,last_task_used_altivec@got(r2)
903 ld r4,0(r3)
904 cmpdi 0,r4,0
905 beq 1f
906 /* Save VMX state to last_task_used_altivec's THREAD struct */
907 addi r4,r4,THREAD
908 SAVE_32VRS(0,r5,r4)
909 mfvscr vr0
910 li r10,THREAD_VSCR
911 stvx vr0,r10,r4
912 /* Disable VMX for last_task_used_altivec */
913 ld r5,PT_REGS(r4)
914 ld r4,_MSR-STACK_FRAME_OVERHEAD(r5)
915 lis r6,MSR_VEC@h
916 andc r4,r4,r6
917 std r4,_MSR-STACK_FRAME_OVERHEAD(r5)
9181:
919#endif /* CONFIG_SMP */
920 /* Hack: if we get an altivec unavailable trap with VRSAVE
921 * set to all zeros, we assume this is a broken application
922 * that fails to set it properly, and thus we switch it to
923 * all 1's
924 */
925 mfspr r4,SPRN_VRSAVE
926 cmpdi 0,r4,0
927 bne+ 1f
928 li r4,-1
929 mtspr SPRN_VRSAVE,r4
9301:
931 /* enable use of VMX after return */
932 ld r4,PACACURRENT(r13)
933 addi r5,r4,THREAD /* Get THREAD */
934 oris r12,r12,MSR_VEC@h
935 std r12,_MSR(r1)
936 li r4,1
937 li r10,THREAD_VSCR
938 stw r4,THREAD_USED_VR(r5)
939 lvx vr0,r10,r5
940 mtvscr vr0
941 REST_32VRS(0,r4,r5)
942#ifndef CONFIG_SMP
943 /* Update last_task_used_math to 'current' */
944 subi r4,r5,THREAD /* Back to 'current' */
945 std r4,0(r3)
946#endif /* CONFIG_SMP */
947 /* restore registers and return */
948 b fast_exception_return
949#endif /* CONFIG_ALTIVEC */
950
951/*
952 * Hash table stuff
953 */
954 .align 7
955_GLOBAL(do_hash_page)
956 std r3,_DAR(r1)
957 std r4,_DSISR(r1)
958
959 andis. r0,r4,0xa450 /* weird error? */
960 bne- .handle_page_fault /* if not, try to insert a HPTE */
961BEGIN_FTR_SECTION
962 andis. r0,r4,0x0020 /* Is it a segment table fault? */
963 bne- .do_ste_alloc /* If so handle it */
964END_FTR_SECTION_IFCLR(CPU_FTR_SLB)
965
966 /*
967 * We need to set the _PAGE_USER bit if MSR_PR is set or if we are
968 * accessing a userspace segment (even from the kernel). We assume
969 * kernel addresses always have the high bit set.
970 */
971 rlwinm r4,r4,32-25+9,31-9,31-9 /* DSISR_STORE -> _PAGE_RW */
972 rotldi r0,r3,15 /* Move high bit into MSR_PR posn */
973 orc r0,r12,r0 /* MSR_PR | ~high_bit */
974 rlwimi r4,r0,32-13,30,30 /* becomes _PAGE_USER access bit */
975 ori r4,r4,1 /* add _PAGE_PRESENT */
976 rlwimi r4,r5,22+2,31-2,31-2 /* Set _PAGE_EXEC if trap is 0x400 */
977
978 /*
979 * On iSeries, we soft-disable interrupts here, then
980 * hard-enable interrupts so that the hash_page code can spin on
981 * the hash_table_lock without problems on a shared processor.
982 */
983 DISABLE_INTS
984
985 /*
986 * r3 contains the faulting address
987 * r4 contains the required access permissions
988 * r5 contains the trap number
989 *
990 * at return r3 = 0 for success
991 */
992 bl .hash_page /* build HPTE if possible */
993 cmpdi r3,0 /* see if hash_page succeeded */
994
995#ifdef DO_SOFT_DISABLE
996 /*
997 * If we had interrupts soft-enabled at the point where the
998 * DSI/ISI occurred, and an interrupt came in during hash_page,
999 * handle it now.
1000 * We jump to ret_from_except_lite rather than fast_exception_return
1001 * because ret_from_except_lite will check for and handle pending
1002 * interrupts if necessary.
1003 */
1004 beq .ret_from_except_lite
1005 /* For a hash failure, we don't bother re-enabling interrupts */
1006 ble- 12f
1007
1008 /*
1009 * hash_page couldn't handle it, set soft interrupt enable back
1010 * to what it was before the trap. Note that .local_irq_restore
1011 * handles any interrupts pending at this point.
1012 */
1013 ld r3,SOFTE(r1)
1014 bl .local_irq_restore
1015 b 11f
1016#else
1017 beq fast_exception_return /* Return from exception on success */
1018 ble- 12f /* Failure return from hash_page */
1019
1020 /* fall through */
1021#endif
1022
1023/* Here we have a page fault that hash_page can't handle. */
1024_GLOBAL(handle_page_fault)
1025 ENABLE_INTS
102611: ld r4,_DAR(r1)
1027 ld r5,_DSISR(r1)
1028 addi r3,r1,STACK_FRAME_OVERHEAD
1029 bl .do_page_fault
1030 cmpdi r3,0
1031 beq+ .ret_from_except_lite
1032 bl .save_nvgprs
1033 mr r5,r3
1034 addi r3,r1,STACK_FRAME_OVERHEAD
1035 lwz r4,_DAR(r1)
1036 bl .bad_page_fault
1037 b .ret_from_except
1038
1039/* We have a page fault that hash_page could handle but HV refused
1040 * the PTE insertion
1041 */
104212: bl .save_nvgprs
1043 addi r3,r1,STACK_FRAME_OVERHEAD
1044 lwz r4,_DAR(r1)
1045 bl .low_hash_fault
1046 b .ret_from_except
1047
1048 /* here we have a segment miss */
1049_GLOBAL(do_ste_alloc)
1050 bl .ste_allocate /* try to insert stab entry */
1051 cmpdi r3,0
1052 beq+ fast_exception_return
1053 b .handle_page_fault
1054
1055/*
1056 * r13 points to the PACA, r9 contains the saved CR,
1057 * r11 and r12 contain the saved SRR0 and SRR1.
1058 * r9 - r13 are saved in paca->exslb.
1059 * We assume we aren't going to take any exceptions during this procedure.
1060 * We assume (DAR >> 60) == 0xc.
1061 */
1062 .align 7
1063_GLOBAL(do_stab_bolted)
1064 stw r9,PACA_EXSLB+EX_CCR(r13) /* save CR in exc. frame */
1065 std r11,PACA_EXSLB+EX_SRR0(r13) /* save SRR0 in exc. frame */
1066
1067 /* Hash to the primary group */
1068 ld r10,PACASTABVIRT(r13)
1069 mfspr r11,SPRN_DAR
1070 srdi r11,r11,28
1071 rldimi r10,r11,7,52 /* r10 = first ste of the group */
1072
1073 /* Calculate VSID */
1074 /* This is a kernel address, so protovsid = ESID */
1075 ASM_VSID_SCRAMBLE(r11, r9)
1076 rldic r9,r11,12,16 /* r9 = vsid << 12 */
1077
1078 /* Search the primary group for a free entry */
10791: ld r11,0(r10) /* Test valid bit of the current ste */
1080 andi. r11,r11,0x80
1081 beq 2f
1082 addi r10,r10,16
1083 andi. r11,r10,0x70
1084 bne 1b
1085
1086 /* Stick for only searching the primary group for now. */
1087 /* At least for now, we use a very simple random castout scheme */
1088 /* Use the TB as a random number ; OR in 1 to avoid entry 0 */
1089 mftb r11
1090 rldic r11,r11,4,57 /* r11 = (r11 << 4) & 0x70 */
1091 ori r11,r11,0x10
1092
1093 /* r10 currently points to an ste one past the group of interest */
1094 /* make it point to the randomly selected entry */
1095 subi r10,r10,128
1096 or r10,r10,r11 /* r10 is the entry to invalidate */
1097
1098 isync /* mark the entry invalid */
1099 ld r11,0(r10)
1100 rldicl r11,r11,56,1 /* clear the valid bit */
1101 rotldi r11,r11,8
1102 std r11,0(r10)
1103 sync
1104
1105 clrrdi r11,r11,28 /* Get the esid part of the ste */
1106 slbie r11
1107
11082: std r9,8(r10) /* Store the vsid part of the ste */
1109 eieio
1110
1111 mfspr r11,SPRN_DAR /* Get the new esid */
1112 clrrdi r11,r11,28 /* Permits a full 32b of ESID */
1113 ori r11,r11,0x90 /* Turn on valid and kp */
1114 std r11,0(r10) /* Put new entry back into the stab */
1115
1116 sync
1117
1118 /* All done -- return from exception. */
1119 lwz r9,PACA_EXSLB+EX_CCR(r13) /* get saved CR */
1120 ld r11,PACA_EXSLB+EX_SRR0(r13) /* get saved SRR0 */
1121
1122 andi. r10,r12,MSR_RI
1123 beq- unrecov_slb
1124
1125 mtcrf 0x80,r9 /* restore CR */
1126
1127 mfmsr r10
1128 clrrdi r10,r10,2
1129 mtmsrd r10,1
1130
1131 mtspr SPRN_SRR0,r11
1132 mtspr SPRN_SRR1,r12
1133 ld r9,PACA_EXSLB+EX_R9(r13)
1134 ld r10,PACA_EXSLB+EX_R10(r13)
1135 ld r11,PACA_EXSLB+EX_R11(r13)
1136 ld r12,PACA_EXSLB+EX_R12(r13)
1137 ld r13,PACA_EXSLB+EX_R13(r13)
1138 rfid
1139 b . /* prevent speculative execution */
1140
1141/*
1142 * r13 points to the PACA, r9 contains the saved CR,
1143 * r11 and r12 contain the saved SRR0 and SRR1.
1144 * r3 has the faulting address
1145 * r9 - r13 are saved in paca->exslb.
1146 * r3 is saved in paca->slb_r3
1147 * We assume we aren't going to take any exceptions during this procedure.
1148 */
1149_GLOBAL(do_slb_miss)
1150 mflr r10
1151
1152 stw r9,PACA_EXSLB+EX_CCR(r13) /* save CR in exc. frame */
1153 std r10,PACA_EXSLB+EX_LR(r13) /* save LR */
1154
1155 bl .slb_allocate /* handle it */
1156
1157 /* All done -- return from exception. */
1158
1159 ld r10,PACA_EXSLB+EX_LR(r13)
1160 ld r3,PACA_EXSLB+EX_R3(r13)
1161 lwz r9,PACA_EXSLB+EX_CCR(r13) /* get saved CR */
1162#ifdef CONFIG_PPC_ISERIES
1163 ld r11,PACALPPACA+LPPACASRR0(r13) /* get SRR0 value */
1164#endif /* CONFIG_PPC_ISERIES */
1165
1166 mtlr r10
1167
1168 andi. r10,r12,MSR_RI /* check for unrecoverable exception */
1169 beq- unrecov_slb
1170
1171.machine push
1172.machine "power4"
1173 mtcrf 0x80,r9
1174 mtcrf 0x01,r9 /* slb_allocate uses cr0 and cr7 */
1175.machine pop
1176
1177#ifdef CONFIG_PPC_ISERIES
1178 mtspr SPRN_SRR0,r11
1179 mtspr SPRN_SRR1,r12
1180#endif /* CONFIG_PPC_ISERIES */
1181 ld r9,PACA_EXSLB+EX_R9(r13)
1182 ld r10,PACA_EXSLB+EX_R10(r13)
1183 ld r11,PACA_EXSLB+EX_R11(r13)
1184 ld r12,PACA_EXSLB+EX_R12(r13)
1185 ld r13,PACA_EXSLB+EX_R13(r13)
1186 rfid
1187 b . /* prevent speculative execution */
1188
1189unrecov_slb:
1190 EXCEPTION_PROLOG_COMMON(0x4100, PACA_EXSLB)
1191 DISABLE_INTS
1192 bl .save_nvgprs
11931: addi r3,r1,STACK_FRAME_OVERHEAD
1194 bl .unrecoverable_exception
1195 b 1b
1196
1197/*
1198 * Space for CPU0's segment table.
1199 *
1200 * On iSeries, the hypervisor must fill in at least one entry before
1201 * we get control (with relocate on). The address is give to the hv
1202 * as a page number (see xLparMap in lpardata.c), so this must be at a
1203 * fixed address (the linker can't compute (u64)&initial_stab >>
1204 * PAGE_SHIFT).
1205 */
1206 . = STAB0_PHYS_ADDR /* 0x6000 */
1207 .globl initial_stab
1208initial_stab:
1209 .space 4096
1210
1211/*
1212 * Data area reserved for FWNMI option.
1213 * This address (0x7000) is fixed by the RPA.
1214 */
1215 .= 0x7000
1216 .globl fwnmi_data_area
1217fwnmi_data_area:
1218
1219 /* iSeries does not use the FWNMI stuff, so it is safe to put
1220 * this here, even if we later allow kernels that will boot on
1221 * both pSeries and iSeries */
1222#ifdef CONFIG_PPC_ISERIES
1223 . = LPARMAP_PHYS
1224#include "lparmap.s"
1225/*
1226 * This ".text" is here for old compilers that generate a trailing
1227 * .note section when compiling .c files to .s
1228 */
1229 .text
1230#endif /* CONFIG_PPC_ISERIES */
1231
1232 . = 0x8000
1233
1234/*
1235 * On pSeries, secondary processors spin in the following code.
1236 * At entry, r3 = this processor's number (physical cpu id)
1237 */
1238_GLOBAL(pSeries_secondary_smp_init)
1239 mr r24,r3
1240
1241 /* turn on 64-bit mode */
1242 bl .enable_64b_mode
1243 isync
1244
1245 /* Copy some CPU settings from CPU 0 */
1246 bl .__restore_cpu_setup
1247
1248 /* Set up a paca value for this processor. Since we have the
1249 * physical cpu id in r24, we need to search the pacas to find
1250 * which logical id maps to our physical one.
1251 */
1252 LOADADDR(r13, paca) /* Get base vaddr of paca array */
1253 li r5,0 /* logical cpu id */
12541: lhz r6,PACAHWCPUID(r13) /* Load HW procid from paca */
1255 cmpw r6,r24 /* Compare to our id */
1256 beq 2f
1257 addi r13,r13,PACA_SIZE /* Loop to next PACA on miss */
1258 addi r5,r5,1
1259 cmpwi r5,NR_CPUS
1260 blt 1b
1261
1262 mr r3,r24 /* not found, copy phys to r3 */
1263 b .kexec_wait /* next kernel might do better */
1264
12652: mtspr SPRN_SPRG3,r13 /* Save vaddr of paca in SPRG3 */
1266 /* From now on, r24 is expected to be logical cpuid */
1267 mr r24,r5
12683: HMT_LOW
1269 lbz r23,PACAPROCSTART(r13) /* Test if this processor should */
1270 /* start. */
1271 sync
1272
1273 /* Create a temp kernel stack for use before relocation is on. */
1274 ld r1,PACAEMERGSP(r13)
1275 subi r1,r1,STACK_FRAME_OVERHEAD
1276
1277 cmpwi 0,r23,0
1278#ifdef CONFIG_SMP
1279 bne .__secondary_start
1280#endif
1281 b 3b /* Loop until told to go */
1282
1283#ifdef CONFIG_PPC_ISERIES
1284_STATIC(__start_initialization_iSeries)
1285 /* Clear out the BSS */
1286 LOADADDR(r11,__bss_stop)
1287 LOADADDR(r8,__bss_start)
1288 sub r11,r11,r8 /* bss size */
1289 addi r11,r11,7 /* round up to an even double word */
1290 rldicl. r11,r11,61,3 /* shift right by 3 */
1291 beq 4f
1292 addi r8,r8,-8
1293 li r0,0
1294 mtctr r11 /* zero this many doublewords */
12953: stdu r0,8(r8)
1296 bdnz 3b
12974:
1298 LOADADDR(r1,init_thread_union)
1299 addi r1,r1,THREAD_SIZE
1300 li r0,0
1301 stdu r0,-STACK_FRAME_OVERHEAD(r1)
1302
1303 LOADADDR(r3,cpu_specs)
1304 LOADADDR(r4,cur_cpu_spec)
1305 li r5,0
1306 bl .identify_cpu
1307
1308 LOADADDR(r2,__toc_start)
1309 addi r2,r2,0x4000
1310 addi r2,r2,0x4000
1311
1312 bl .iSeries_early_setup
1313 bl .early_setup
1314
1315 /* relocation is on at this point */
1316
1317 b .start_here_common
1318#endif /* CONFIG_PPC_ISERIES */
1319
1320#ifdef CONFIG_PPC_MULTIPLATFORM
1321
1322_STATIC(__mmu_off)
1323 mfmsr r3
1324 andi. r0,r3,MSR_IR|MSR_DR
1325 beqlr
1326 andc r3,r3,r0
1327 mtspr SPRN_SRR0,r4
1328 mtspr SPRN_SRR1,r3
1329 sync
1330 rfid
1331 b . /* prevent speculative execution */
1332
1333
1334/*
1335 * Here is our main kernel entry point. We support currently 2 kind of entries
1336 * depending on the value of r5.
1337 *
1338 * r5 != NULL -> OF entry, we go to prom_init, "legacy" parameter content
1339 * in r3...r7
1340 *
1341 * r5 == NULL -> kexec style entry. r3 is a physical pointer to the
1342 * DT block, r4 is a physical pointer to the kernel itself
1343 *
1344 */
1345_GLOBAL(__start_initialization_multiplatform)
1346 /*
1347 * Are we booted from a PROM Of-type client-interface ?
1348 */
1349 cmpldi cr0,r5,0
1350 bne .__boot_from_prom /* yes -> prom */
1351
1352 /* Save parameters */
1353 mr r31,r3
1354 mr r30,r4
1355
1356 /* Make sure we are running in 64 bits mode */
1357 bl .enable_64b_mode
1358
1359 /* Setup some critical 970 SPRs before switching MMU off */
1360 bl .__970_cpu_preinit
1361
1362 /* cpu # */
1363 li r24,0
1364
1365 /* Switch off MMU if not already */
1366 LOADADDR(r4, .__after_prom_start - KERNELBASE)
1367 add r4,r4,r30
1368 bl .__mmu_off
1369 b .__after_prom_start
1370
1371_STATIC(__boot_from_prom)
1372 /* Save parameters */
1373 mr r31,r3
1374 mr r30,r4
1375 mr r29,r5
1376 mr r28,r6
1377 mr r27,r7
1378
1379 /* Make sure we are running in 64 bits mode */
1380 bl .enable_64b_mode
1381
1382 /* put a relocation offset into r3 */
1383 bl .reloc_offset
1384
1385 LOADADDR(r2,__toc_start)
1386 addi r2,r2,0x4000
1387 addi r2,r2,0x4000
1388
1389 /* Relocate the TOC from a virt addr to a real addr */
1390 add r2,r2,r3
1391
1392 /* Restore parameters */
1393 mr r3,r31
1394 mr r4,r30
1395 mr r5,r29
1396 mr r6,r28
1397 mr r7,r27
1398
1399 /* Do all of the interaction with OF client interface */
1400 bl .prom_init
1401 /* We never return */
1402 trap
1403
1404/*
1405 * At this point, r3 contains the physical address we are running at,
1406 * returned by prom_init()
1407 */
1408_STATIC(__after_prom_start)
1409
1410/*
1411 * We need to run with __start at physical address 0.
1412 * This will leave some code in the first 256B of
1413 * real memory, which are reserved for software use.
1414 * The remainder of the first page is loaded with the fixed
1415 * interrupt vectors. The next two pages are filled with
1416 * unknown exception placeholders.
1417 *
1418 * Note: This process overwrites the OF exception vectors.
1419 * r26 == relocation offset
1420 * r27 == KERNELBASE
1421 */
1422 bl .reloc_offset
1423 mr r26,r3
1424 SET_REG_TO_CONST(r27,KERNELBASE)
1425
1426 li r3,0 /* target addr */
1427
1428 // XXX FIXME: Use phys returned by OF (r30)
1429 add r4,r27,r26 /* source addr */
1430 /* current address of _start */
1431 /* i.e. where we are running */
1432 /* the source addr */
1433
1434 LOADADDR(r5,copy_to_here) /* # bytes of memory to copy */
1435 sub r5,r5,r27
1436
1437 li r6,0x100 /* Start offset, the first 0x100 */
1438 /* bytes were copied earlier. */
1439
1440 bl .copy_and_flush /* copy the first n bytes */
1441 /* this includes the code being */
1442 /* executed here. */
1443
1444 LOADADDR(r0, 4f) /* Jump to the copy of this code */
1445 mtctr r0 /* that we just made/relocated */
1446 bctr
1447
14484: LOADADDR(r5,klimit)
1449 add r5,r5,r26
1450 ld r5,0(r5) /* get the value of klimit */
1451 sub r5,r5,r27
1452 bl .copy_and_flush /* copy the rest */
1453 b .start_here_multiplatform
1454
1455#endif /* CONFIG_PPC_MULTIPLATFORM */
1456
1457/*
1458 * Copy routine used to copy the kernel to start at physical address 0
1459 * and flush and invalidate the caches as needed.
1460 * r3 = dest addr, r4 = source addr, r5 = copy limit, r6 = start offset
1461 * on exit, r3, r4, r5 are unchanged, r6 is updated to be >= r5.
1462 *
1463 * Note: this routine *only* clobbers r0, r6 and lr
1464 */
1465_GLOBAL(copy_and_flush)
1466 addi r5,r5,-8
1467 addi r6,r6,-8
14684: li r0,16 /* Use the least common */
1469 /* denominator cache line */
1470 /* size. This results in */
1471 /* extra cache line flushes */
1472 /* but operation is correct. */
1473 /* Can't get cache line size */
1474 /* from NACA as it is being */
1475 /* moved too. */
1476
1477 mtctr r0 /* put # words/line in ctr */
14783: addi r6,r6,8 /* copy a cache line */
1479 ldx r0,r6,r4
1480 stdx r0,r6,r3
1481 bdnz 3b
1482 dcbst r6,r3 /* write it to memory */
1483 sync
1484 icbi r6,r3 /* flush the icache line */
1485 cmpld 0,r6,r5
1486 blt 4b
1487 sync
1488 addi r5,r5,8
1489 addi r6,r6,8
1490 blr
1491
1492.align 8
1493copy_to_here:
1494
1495#ifdef CONFIG_SMP
1496#ifdef CONFIG_PPC_PMAC
1497/*
1498 * On PowerMac, secondary processors starts from the reset vector, which
1499 * is temporarily turned into a call to one of the functions below.
1500 */
1501 .section ".text";
1502 .align 2 ;
1503
1504 .globl __secondary_start_pmac_0
1505__secondary_start_pmac_0:
1506 /* NB the entries for cpus 0, 1, 2 must each occupy 8 bytes. */
1507 li r24,0
1508 b 1f
1509 li r24,1
1510 b 1f
1511 li r24,2
1512 b 1f
1513 li r24,3
15141:
1515
1516_GLOBAL(pmac_secondary_start)
1517 /* turn on 64-bit mode */
1518 bl .enable_64b_mode
1519 isync
1520
1521 /* Copy some CPU settings from CPU 0 */
1522 bl .__restore_cpu_setup
1523
1524 /* pSeries do that early though I don't think we really need it */
1525 mfmsr r3
1526 ori r3,r3,MSR_RI
1527 mtmsrd r3 /* RI on */
1528
1529 /* Set up a paca value for this processor. */
1530 LOADADDR(r4, paca) /* Get base vaddr of paca array */
1531 mulli r13,r24,PACA_SIZE /* Calculate vaddr of right paca */
1532 add r13,r13,r4 /* for this processor. */
1533 mtspr SPRN_SPRG3,r13 /* Save vaddr of paca in SPRG3 */
1534
1535 /* Create a temp kernel stack for use before relocation is on. */
1536 ld r1,PACAEMERGSP(r13)
1537 subi r1,r1,STACK_FRAME_OVERHEAD
1538
1539 b .__secondary_start
1540
1541#endif /* CONFIG_PPC_PMAC */
1542
1543/*
1544 * This function is called after the master CPU has released the
1545 * secondary processors. The execution environment is relocation off.
1546 * The paca for this processor has the following fields initialized at
1547 * this point:
1548 * 1. Processor number
1549 * 2. Segment table pointer (virtual address)
1550 * On entry the following are set:
1551 * r1 = stack pointer. vaddr for iSeries, raddr (temp stack) for pSeries
1552 * r24 = cpu# (in Linux terms)
1553 * r13 = paca virtual address
1554 * SPRG3 = paca virtual address
1555 */
1556_GLOBAL(__secondary_start)
1557
1558 HMT_MEDIUM /* Set thread priority to MEDIUM */
1559
1560 ld r2,PACATOC(r13)
1561 li r6,0
1562 stb r6,PACAPROCENABLED(r13)
1563
1564#ifndef CONFIG_PPC_ISERIES
1565 /* Initialize the page table pointer register. */
1566 LOADADDR(r6,_SDR1)
1567 ld r6,0(r6) /* get the value of _SDR1 */
1568 mtspr SPRN_SDR1,r6 /* set the htab location */
1569#endif
1570 /* Initialize the first segment table (or SLB) entry */
1571 ld r3,PACASTABVIRT(r13) /* get addr of segment table */
1572 bl .stab_initialize
1573
1574 /* Initialize the kernel stack. Just a repeat for iSeries. */
1575 LOADADDR(r3,current_set)
1576 sldi r28,r24,3 /* get current_set[cpu#] */
1577 ldx r1,r3,r28
1578 addi r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD
1579 std r1,PACAKSAVE(r13)
1580
1581 ld r3,PACASTABREAL(r13) /* get raddr of segment table */
1582 ori r4,r3,1 /* turn on valid bit */
1583
1584#ifdef CONFIG_PPC_ISERIES
1585 li r0,-1 /* hypervisor call */
1586 li r3,1
1587 sldi r3,r3,63 /* 0x8000000000000000 */
1588 ori r3,r3,4 /* 0x8000000000000004 */
1589 sc /* HvCall_setASR */
1590#else
1591 /* set the ASR */
1592 ld r3,systemcfg@got(r2) /* r3 = ptr to systemcfg */
1593 ld r3,0(r3)
1594 lwz r3,PLATFORM(r3) /* r3 = platform flags */
1595 andi. r3,r3,PLATFORM_LPAR /* Test if bit 0 is set (LPAR bit) */
1596 beq 98f /* branch if result is 0 */
1597 mfspr r3,SPRN_PVR
1598 srwi r3,r3,16
1599 cmpwi r3,0x37 /* SStar */
1600 beq 97f
1601 cmpwi r3,0x36 /* IStar */
1602 beq 97f
1603 cmpwi r3,0x34 /* Pulsar */
1604 bne 98f
160597: li r3,H_SET_ASR /* hcall = H_SET_ASR */
1606 HVSC /* Invoking hcall */
1607 b 99f
160898: /* !(rpa hypervisor) || !(star) */
1609 mtasr r4 /* set the stab location */
161099:
1611#endif
1612 li r7,0
1613 mtlr r7
1614
1615 /* enable MMU and jump to start_secondary */
1616 LOADADDR(r3,.start_secondary_prolog)
1617 SET_REG_TO_CONST(r4, MSR_KERNEL)
1618#ifdef DO_SOFT_DISABLE
1619 ori r4,r4,MSR_EE
1620#endif
1621 mtspr SPRN_SRR0,r3
1622 mtspr SPRN_SRR1,r4
1623 rfid
1624 b . /* prevent speculative execution */
1625
1626/*
1627 * Running with relocation on at this point. All we want to do is
1628 * zero the stack back-chain pointer before going into C code.
1629 */
1630_GLOBAL(start_secondary_prolog)
1631 li r3,0
1632 std r3,0(r1) /* Zero the stack frame pointer */
1633 bl .start_secondary
1634#endif
1635
1636/*
1637 * This subroutine clobbers r11 and r12
1638 */
1639_GLOBAL(enable_64b_mode)
1640 mfmsr r11 /* grab the current MSR */
1641 li r12,1
1642 rldicr r12,r12,MSR_SF_LG,(63-MSR_SF_LG)
1643 or r11,r11,r12
1644 li r12,1
1645 rldicr r12,r12,MSR_ISF_LG,(63-MSR_ISF_LG)
1646 or r11,r11,r12
1647 mtmsrd r11
1648 isync
1649 blr
1650
1651#ifdef CONFIG_PPC_MULTIPLATFORM
1652/*
1653 * This is where the main kernel code starts.
1654 */
1655_STATIC(start_here_multiplatform)
1656 /* get a new offset, now that the kernel has moved. */
1657 bl .reloc_offset
1658 mr r26,r3
1659
1660 /* Clear out the BSS. It may have been done in prom_init,
1661 * already but that's irrelevant since prom_init will soon
1662 * be detached from the kernel completely. Besides, we need
1663 * to clear it now for kexec-style entry.
1664 */
1665 LOADADDR(r11,__bss_stop)
1666 LOADADDR(r8,__bss_start)
1667 sub r11,r11,r8 /* bss size */
1668 addi r11,r11,7 /* round up to an even double word */
1669 rldicl. r11,r11,61,3 /* shift right by 3 */
1670 beq 4f
1671 addi r8,r8,-8
1672 li r0,0
1673 mtctr r11 /* zero this many doublewords */
16743: stdu r0,8(r8)
1675 bdnz 3b
16764:
1677
1678 mfmsr r6
1679 ori r6,r6,MSR_RI
1680 mtmsrd r6 /* RI on */
1681
1682#ifdef CONFIG_HMT
1683 /* Start up the second thread on cpu 0 */
1684 mfspr r3,SPRN_PVR
1685 srwi r3,r3,16
1686 cmpwi r3,0x34 /* Pulsar */
1687 beq 90f
1688 cmpwi r3,0x36 /* Icestar */
1689 beq 90f
1690 cmpwi r3,0x37 /* SStar */
1691 beq 90f
1692 b 91f /* HMT not supported */
169390: li r3,0
1694 bl .hmt_start_secondary
169591:
1696#endif
1697
1698 /* The following gets the stack and TOC set up with the regs */
1699 /* pointing to the real addr of the kernel stack. This is */
1700 /* all done to support the C function call below which sets */
1701 /* up the htab. This is done because we have relocated the */
1702 /* kernel but are still running in real mode. */
1703
1704 LOADADDR(r3,init_thread_union)
1705 add r3,r3,r26
1706
1707 /* set up a stack pointer (physical address) */
1708 addi r1,r3,THREAD_SIZE
1709 li r0,0
1710 stdu r0,-STACK_FRAME_OVERHEAD(r1)
1711
1712 /* set up the TOC (physical address) */
1713 LOADADDR(r2,__toc_start)
1714 addi r2,r2,0x4000
1715 addi r2,r2,0x4000
1716 add r2,r2,r26
1717
1718 LOADADDR(r3,cpu_specs)
1719 add r3,r3,r26
1720 LOADADDR(r4,cur_cpu_spec)
1721 add r4,r4,r26
1722 mr r5,r26
1723 bl .identify_cpu
1724
1725 /* Save some low level config HIDs of CPU0 to be copied to
1726 * other CPUs later on, or used for suspend/resume
1727 */
1728 bl .__save_cpu_setup
1729 sync
1730
1731 /* Setup a valid physical PACA pointer in SPRG3 for early_setup
1732 * note that boot_cpuid can always be 0 nowadays since there is
1733 * nowhere it can be initialized differently before we reach this
1734 * code
1735 */
1736 LOADADDR(r27, boot_cpuid)
1737 add r27,r27,r26
1738 lwz r27,0(r27)
1739
1740 LOADADDR(r24, paca) /* Get base vaddr of paca array */
1741 mulli r13,r27,PACA_SIZE /* Calculate vaddr of right paca */
1742 add r13,r13,r24 /* for this processor. */
1743 add r13,r13,r26 /* convert to physical addr */
1744 mtspr SPRN_SPRG3,r13 /* PPPBBB: Temp... -Peter */
1745
1746 /* Do very early kernel initializations, including initial hash table,
1747 * stab and slb setup before we turn on relocation. */
1748
1749 /* Restore parameters passed from prom_init/kexec */
1750 mr r3,r31
1751 bl .early_setup
1752
1753 /* set the ASR */
1754 ld r3,PACASTABREAL(r13)
1755 ori r4,r3,1 /* turn on valid bit */
1756 ld r3,systemcfg@got(r2) /* r3 = ptr to systemcfg */
1757 ld r3,0(r3)
1758 lwz r3,PLATFORM(r3) /* r3 = platform flags */
1759 andi. r3,r3,PLATFORM_LPAR /* Test if bit 0 is set (LPAR bit) */
1760 beq 98f /* branch if result is 0 */
1761 mfspr r3,SPRN_PVR
1762 srwi r3,r3,16
1763 cmpwi r3,0x37 /* SStar */
1764 beq 97f
1765 cmpwi r3,0x36 /* IStar */
1766 beq 97f
1767 cmpwi r3,0x34 /* Pulsar */
1768 bne 98f
176997: li r3,H_SET_ASR /* hcall = H_SET_ASR */
1770 HVSC /* Invoking hcall */
1771 b 99f
177298: /* !(rpa hypervisor) || !(star) */
1773 mtasr r4 /* set the stab location */
177499:
1775 /* Set SDR1 (hash table pointer) */
1776 ld r3,systemcfg@got(r2) /* r3 = ptr to systemcfg */
1777 ld r3,0(r3)
1778 lwz r3,PLATFORM(r3) /* r3 = platform flags */
1779 /* Test if bit 0 is set (LPAR bit) */
1780 andi. r3,r3,PLATFORM_LPAR
1781 bne 98f /* branch if result is !0 */
1782 LOADADDR(r6,_SDR1) /* Only if NOT LPAR */
1783 add r6,r6,r26
1784 ld r6,0(r6) /* get the value of _SDR1 */
1785 mtspr SPRN_SDR1,r6 /* set the htab location */
178698:
1787 LOADADDR(r3,.start_here_common)
1788 SET_REG_TO_CONST(r4, MSR_KERNEL)
1789 mtspr SPRN_SRR0,r3
1790 mtspr SPRN_SRR1,r4
1791 rfid
1792 b . /* prevent speculative execution */
1793#endif /* CONFIG_PPC_MULTIPLATFORM */
1794
1795 /* This is where all platforms converge execution */
1796_STATIC(start_here_common)
1797 /* relocation is on at this point */
1798
1799 /* The following code sets up the SP and TOC now that we are */
1800 /* running with translation enabled. */
1801
1802 LOADADDR(r3,init_thread_union)
1803
1804 /* set up the stack */
1805 addi r1,r3,THREAD_SIZE
1806 li r0,0
1807 stdu r0,-STACK_FRAME_OVERHEAD(r1)
1808
1809 /* Apply the CPUs-specific fixups (nop out sections not relevant
1810 * to this CPU
1811 */
1812 li r3,0
1813 bl .do_cpu_ftr_fixups
1814
1815 LOADADDR(r26, boot_cpuid)
1816 lwz r26,0(r26)
1817
1818 LOADADDR(r24, paca) /* Get base vaddr of paca array */
1819 mulli r13,r26,PACA_SIZE /* Calculate vaddr of right paca */
1820 add r13,r13,r24 /* for this processor. */
1821 mtspr SPRN_SPRG3,r13
1822
1823 /* ptr to current */
1824 LOADADDR(r4,init_task)
1825 std r4,PACACURRENT(r13)
1826
1827 /* Load the TOC */
1828 ld r2,PACATOC(r13)
1829 std r1,PACAKSAVE(r13)
1830
1831 bl .setup_system
1832
1833 /* Load up the kernel context */
18345:
1835#ifdef DO_SOFT_DISABLE
1836 li r5,0
1837 stb r5,PACAPROCENABLED(r13) /* Soft Disabled */
1838 mfmsr r5
1839 ori r5,r5,MSR_EE /* Hard Enabled */
1840 mtmsrd r5
1841#endif
1842
1843 bl .start_kernel
1844
1845_GLOBAL(hmt_init)
1846#ifdef CONFIG_HMT
1847 LOADADDR(r5, hmt_thread_data)
1848 mfspr r7,SPRN_PVR
1849 srwi r7,r7,16
1850 cmpwi r7,0x34 /* Pulsar */
1851 beq 90f
1852 cmpwi r7,0x36 /* Icestar */
1853 beq 91f
1854 cmpwi r7,0x37 /* SStar */
1855 beq 91f
1856 b 101f
185790: mfspr r6,SPRN_PIR
1858 andi. r6,r6,0x1f
1859 b 92f
186091: mfspr r6,SPRN_PIR
1861 andi. r6,r6,0x3ff
186292: sldi r4,r24,3
1863 stwx r6,r5,r4
1864 bl .hmt_start_secondary
1865 b 101f
1866
1867__hmt_secondary_hold:
1868 LOADADDR(r5, hmt_thread_data)
1869 clrldi r5,r5,4
1870 li r7,0
1871 mfspr r6,SPRN_PIR
1872 mfspr r8,SPRN_PVR
1873 srwi r8,r8,16
1874 cmpwi r8,0x34
1875 bne 93f
1876 andi. r6,r6,0x1f
1877 b 103f
187893: andi. r6,r6,0x3f
1879
1880103: lwzx r8,r5,r7
1881 cmpw r8,r6
1882 beq 104f
1883 addi r7,r7,8
1884 b 103b
1885
1886104: addi r7,r7,4
1887 lwzx r9,r5,r7
1888 mr r24,r9
1889101:
1890#endif
1891 mr r3,r24
1892 b .pSeries_secondary_smp_init
1893
1894#ifdef CONFIG_HMT
1895_GLOBAL(hmt_start_secondary)
1896 LOADADDR(r4,__hmt_secondary_hold)
1897 clrldi r4,r4,4
1898 mtspr SPRN_NIADORM, r4
1899 mfspr r4, SPRN_MSRDORM
1900 li r5, -65
1901 and r4, r4, r5
1902 mtspr SPRN_MSRDORM, r4
1903 lis r4,0xffef
1904 ori r4,r4,0x7403
1905 mtspr SPRN_TSC, r4
1906 li r4,0x1f4
1907 mtspr SPRN_TST, r4
1908 mfspr r4, SPRN_HID0
1909 ori r4, r4, 0x1
1910 mtspr SPRN_HID0, r4
1911 mfspr r4, SPRN_CTRLF
1912 oris r4, r4, 0x40
1913 mtspr SPRN_CTRLT, r4
1914 blr
1915#endif
1916
1917#if defined(CONFIG_KEXEC) || defined(CONFIG_SMP)
1918_GLOBAL(smp_release_cpus)
1919 /* All secondary cpus are spinning on a common
1920 * spinloop, release them all now so they can start
1921 * to spin on their individual paca spinloops.
1922 * For non SMP kernels, the secondary cpus never
1923 * get out of the common spinloop.
1924 * XXX This does nothing useful on iSeries, secondaries are
1925 * already waiting on their paca.
1926 */
1927 li r3,1
1928 LOADADDR(r5,__secondary_hold_spinloop)
1929 std r3,0(r5)
1930 sync
1931 blr
1932#endif /* CONFIG_SMP */
1933
1934
1935/*
1936 * We put a few things here that have to be page-aligned.
1937 * This stuff goes at the beginning of the bss, which is page-aligned.
1938 */
1939 .section ".bss"
1940
1941 .align PAGE_SHIFT
1942
1943 .globl empty_zero_page
1944empty_zero_page:
1945 .space PAGE_SIZE
1946
1947 .globl swapper_pg_dir
1948swapper_pg_dir:
1949 .space PAGE_SIZE
1950
1951/*
1952 * This space gets a copy of optional info passed to us by the bootstrap
1953 * Used to pass parameters into the kernel like root=/dev/sda1, etc.
1954 */
1955 .globl cmd_line
1956cmd_line:
1957 .space COMMAND_LINE_SIZE
diff --git a/arch/powerpc/kernel/head_8xx.S b/arch/powerpc/kernel/head_8xx.S
new file mode 100644
index 000000000000..bc6d1ac55235
--- /dev/null
+++ b/arch/powerpc/kernel/head_8xx.S
@@ -0,0 +1,860 @@
1/*
2 * arch/ppc/kernel/except_8xx.S
3 *
4 * PowerPC version
5 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
6 * Rewritten by Cort Dougan (cort@cs.nmt.edu) for PReP
7 * Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu>
8 * Low-level exception handlers and MMU support
9 * rewritten by Paul Mackerras.
10 * Copyright (C) 1996 Paul Mackerras.
11 * MPC8xx modifications by Dan Malek
12 * Copyright (C) 1997 Dan Malek (dmalek@jlc.net).
13 *
14 * This file contains low-level support and setup for PowerPC 8xx
15 * embedded processors, including trap and interrupt dispatch.
16 *
17 * This program is free software; you can redistribute it and/or
18 * modify it under the terms of the GNU General Public License
19 * as published by the Free Software Foundation; either version
20 * 2 of the License, or (at your option) any later version.
21 *
22 */
23
24#include <linux/config.h>
25#include <asm/processor.h>
26#include <asm/page.h>
27#include <asm/mmu.h>
28#include <asm/cache.h>
29#include <asm/pgtable.h>
30#include <asm/cputable.h>
31#include <asm/thread_info.h>
32#include <asm/ppc_asm.h>
33#include <asm/asm-offsets.h>
34
35/* Macro to make the code more readable. */
36#ifdef CONFIG_8xx_CPU6
37#define DO_8xx_CPU6(val, reg) \
38 li reg, val; \
39 stw reg, 12(r0); \
40 lwz reg, 12(r0);
41#else
42#define DO_8xx_CPU6(val, reg)
43#endif
44 .text
45 .globl _stext
46_stext:
47 .text
48 .globl _start
49_start:
50
51/* MPC8xx
52 * This port was done on an MBX board with an 860. Right now I only
53 * support an ELF compressed (zImage) boot from EPPC-Bug because the
54 * code there loads up some registers before calling us:
55 * r3: ptr to board info data
56 * r4: initrd_start or if no initrd then 0
57 * r5: initrd_end - unused if r4 is 0
58 * r6: Start of command line string
59 * r7: End of command line string
60 *
61 * I decided to use conditional compilation instead of checking PVR and
62 * adding more processor specific branches around code I don't need.
63 * Since this is an embedded processor, I also appreciate any memory
64 * savings I can get.
65 *
66 * The MPC8xx does not have any BATs, but it supports large page sizes.
67 * We first initialize the MMU to support 8M byte pages, then load one
68 * entry into each of the instruction and data TLBs to map the first
69 * 8M 1:1. I also mapped an additional I/O space 1:1 so we can get to
70 * the "internal" processor registers before MMU_init is called.
71 *
72 * The TLB code currently contains a major hack. Since I use the condition
73 * code register, I have to save and restore it. I am out of registers, so
74 * I just store it in memory location 0 (the TLB handlers are not reentrant).
75 * To avoid making any decisions, I need to use the "segment" valid bit
76 * in the first level table, but that would require many changes to the
77 * Linux page directory/table functions that I don't want to do right now.
78 *
79 * I used to use SPRG2 for a temporary register in the TLB handler, but it
80 * has since been put to other uses. I now use a hack to save a register
81 * and the CCR at memory location 0.....Someday I'll fix this.....
82 * -- Dan
83 */
84 .globl __start
85__start:
86 mr r31,r3 /* save parameters */
87 mr r30,r4
88 mr r29,r5
89 mr r28,r6
90 mr r27,r7
91
92 /* We have to turn on the MMU right away so we get cache modes
93 * set correctly.
94 */
95 bl initial_mmu
96
97/* We now have the lower 8 Meg mapped into TLB entries, and the caches
98 * ready to work.
99 */
100
101turn_on_mmu:
102 mfmsr r0
103 ori r0,r0,MSR_DR|MSR_IR
104 mtspr SPRN_SRR1,r0
105 lis r0,start_here@h
106 ori r0,r0,start_here@l
107 mtspr SPRN_SRR0,r0
108 SYNC
109 rfi /* enables MMU */
110
111/*
112 * Exception entry code. This code runs with address translation
113 * turned off, i.e. using physical addresses.
114 * We assume sprg3 has the physical address of the current
115 * task's thread_struct.
116 */
117#define EXCEPTION_PROLOG \
118 mtspr SPRN_SPRG0,r10; \
119 mtspr SPRN_SPRG1,r11; \
120 mfcr r10; \
121 EXCEPTION_PROLOG_1; \
122 EXCEPTION_PROLOG_2
123
124#define EXCEPTION_PROLOG_1 \
125 mfspr r11,SPRN_SRR1; /* check whether user or kernel */ \
126 andi. r11,r11,MSR_PR; \
127 tophys(r11,r1); /* use tophys(r1) if kernel */ \
128 beq 1f; \
129 mfspr r11,SPRN_SPRG3; \
130 lwz r11,THREAD_INFO-THREAD(r11); \
131 addi r11,r11,THREAD_SIZE; \
132 tophys(r11,r11); \
1331: subi r11,r11,INT_FRAME_SIZE /* alloc exc. frame */
134
135
136#define EXCEPTION_PROLOG_2 \
137 CLR_TOP32(r11); \
138 stw r10,_CCR(r11); /* save registers */ \
139 stw r12,GPR12(r11); \
140 stw r9,GPR9(r11); \
141 mfspr r10,SPRN_SPRG0; \
142 stw r10,GPR10(r11); \
143 mfspr r12,SPRN_SPRG1; \
144 stw r12,GPR11(r11); \
145 mflr r10; \
146 stw r10,_LINK(r11); \
147 mfspr r12,SPRN_SRR0; \
148 mfspr r9,SPRN_SRR1; \
149 stw r1,GPR1(r11); \
150 stw r1,0(r11); \
151 tovirt(r1,r11); /* set new kernel sp */ \
152 li r10,MSR_KERNEL & ~(MSR_IR|MSR_DR); /* can take exceptions */ \
153 MTMSRD(r10); /* (except for mach check in rtas) */ \
154 stw r0,GPR0(r11); \
155 SAVE_4GPRS(3, r11); \
156 SAVE_2GPRS(7, r11)
157
158/*
159 * Note: code which follows this uses cr0.eq (set if from kernel),
160 * r11, r12 (SRR0), and r9 (SRR1).
161 *
162 * Note2: once we have set r1 we are in a position to take exceptions
163 * again, and we could thus set MSR:RI at that point.
164 */
165
166/*
167 * Exception vectors.
168 */
169#define EXCEPTION(n, label, hdlr, xfer) \
170 . = n; \
171label: \
172 EXCEPTION_PROLOG; \
173 addi r3,r1,STACK_FRAME_OVERHEAD; \
174 xfer(n, hdlr)
175
176#define EXC_XFER_TEMPLATE(n, hdlr, trap, copyee, tfer, ret) \
177 li r10,trap; \
178 stw r10,_TRAP(r11); \
179 li r10,MSR_KERNEL; \
180 copyee(r10, r9); \
181 bl tfer; \
182i##n: \
183 .long hdlr; \
184 .long ret
185
186#define COPY_EE(d, s) rlwimi d,s,0,16,16
187#define NOCOPY(d, s)
188
189#define EXC_XFER_STD(n, hdlr) \
190 EXC_XFER_TEMPLATE(n, hdlr, n, NOCOPY, transfer_to_handler_full, \
191 ret_from_except_full)
192
193#define EXC_XFER_LITE(n, hdlr) \
194 EXC_XFER_TEMPLATE(n, hdlr, n+1, NOCOPY, transfer_to_handler, \
195 ret_from_except)
196
197#define EXC_XFER_EE(n, hdlr) \
198 EXC_XFER_TEMPLATE(n, hdlr, n, COPY_EE, transfer_to_handler_full, \
199 ret_from_except_full)
200
201#define EXC_XFER_EE_LITE(n, hdlr) \
202 EXC_XFER_TEMPLATE(n, hdlr, n+1, COPY_EE, transfer_to_handler, \
203 ret_from_except)
204
205/* System reset */
206 EXCEPTION(0x100, Reset, unknown_exception, EXC_XFER_STD)
207
208/* Machine check */
209 . = 0x200
210MachineCheck:
211 EXCEPTION_PROLOG
212 mfspr r4,SPRN_DAR
213 stw r4,_DAR(r11)
214 mfspr r5,SPRN_DSISR
215 stw r5,_DSISR(r11)
216 addi r3,r1,STACK_FRAME_OVERHEAD
217 EXC_XFER_STD(0x200, machine_check_exception)
218
219/* Data access exception.
220 * This is "never generated" by the MPC8xx. We jump to it for other
221 * translation errors.
222 */
223 . = 0x300
224DataAccess:
225 EXCEPTION_PROLOG
226 mfspr r10,SPRN_DSISR
227 stw r10,_DSISR(r11)
228 mr r5,r10
229 mfspr r4,SPRN_DAR
230 EXC_XFER_EE_LITE(0x300, handle_page_fault)
231
232/* Instruction access exception.
233 * This is "never generated" by the MPC8xx. We jump to it for other
234 * translation errors.
235 */
236 . = 0x400
237InstructionAccess:
238 EXCEPTION_PROLOG
239 mr r4,r12
240 mr r5,r9
241 EXC_XFER_EE_LITE(0x400, handle_page_fault)
242
243/* External interrupt */
244 EXCEPTION(0x500, HardwareInterrupt, do_IRQ, EXC_XFER_LITE)
245
246/* Alignment exception */
247 . = 0x600
248Alignment:
249 EXCEPTION_PROLOG
250 mfspr r4,SPRN_DAR
251 stw r4,_DAR(r11)
252 mfspr r5,SPRN_DSISR
253 stw r5,_DSISR(r11)
254 addi r3,r1,STACK_FRAME_OVERHEAD
255 EXC_XFER_EE(0x600, alignment_exception)
256
257/* Program check exception */
258 EXCEPTION(0x700, ProgramCheck, program_check_exception, EXC_XFER_STD)
259
260/* No FPU on MPC8xx. This exception is not supposed to happen.
261*/
262 EXCEPTION(0x800, FPUnavailable, unknown_exception, EXC_XFER_STD)
263
264/* Decrementer */
265 EXCEPTION(0x900, Decrementer, timer_interrupt, EXC_XFER_LITE)
266
267 EXCEPTION(0xa00, Trap_0a, unknown_exception, EXC_XFER_EE)
268 EXCEPTION(0xb00, Trap_0b, unknown_exception, EXC_XFER_EE)
269
270/* System call */
271 . = 0xc00
272SystemCall:
273 EXCEPTION_PROLOG
274 EXC_XFER_EE_LITE(0xc00, DoSyscall)
275
276/* Single step - not used on 601 */
277 EXCEPTION(0xd00, SingleStep, single_step_exception, EXC_XFER_STD)
278 EXCEPTION(0xe00, Trap_0e, unknown_exception, EXC_XFER_EE)
279 EXCEPTION(0xf00, Trap_0f, unknown_exception, EXC_XFER_EE)
280
281/* On the MPC8xx, this is a software emulation interrupt. It occurs
282 * for all unimplemented and illegal instructions.
283 */
284 EXCEPTION(0x1000, SoftEmu, SoftwareEmulation, EXC_XFER_STD)
285
286 . = 0x1100
287/*
288 * For the MPC8xx, this is a software tablewalk to load the instruction
289 * TLB. It is modelled after the example in the Motorola manual. The task
290 * switch loads the M_TWB register with the pointer to the first level table.
291 * If we discover there is no second level table (value is zero) or if there
292 * is an invalid pte, we load that into the TLB, which causes another fault
293 * into the TLB Error interrupt where we can handle such problems.
294 * We have to use the MD_xxx registers for the tablewalk because the
295 * equivalent MI_xxx registers only perform the attribute functions.
296 */
297InstructionTLBMiss:
298#ifdef CONFIG_8xx_CPU6
299 stw r3, 8(r0)
300#endif
301 DO_8xx_CPU6(0x3f80, r3)
302 mtspr SPRN_M_TW, r10 /* Save a couple of working registers */
303 mfcr r10
304 stw r10, 0(r0)
305 stw r11, 4(r0)
306 mfspr r10, SPRN_SRR0 /* Get effective address of fault */
307 DO_8xx_CPU6(0x3780, r3)
308 mtspr SPRN_MD_EPN, r10 /* Have to use MD_EPN for walk, MI_EPN can't */
309 mfspr r10, SPRN_M_TWB /* Get level 1 table entry address */
310
311 /* If we are faulting a kernel address, we have to use the
312 * kernel page tables.
313 */
314 andi. r11, r10, 0x0800 /* Address >= 0x80000000 */
315 beq 3f
316 lis r11, swapper_pg_dir@h
317 ori r11, r11, swapper_pg_dir@l
318 rlwimi r10, r11, 0, 2, 19
3193:
320 lwz r11, 0(r10) /* Get the level 1 entry */
321 rlwinm. r10, r11,0,0,19 /* Extract page descriptor page address */
322 beq 2f /* If zero, don't try to find a pte */
323
324 /* We have a pte table, so load the MI_TWC with the attributes
325 * for this "segment."
326 */
327 ori r11,r11,1 /* Set valid bit */
328 DO_8xx_CPU6(0x2b80, r3)
329 mtspr SPRN_MI_TWC, r11 /* Set segment attributes */
330 DO_8xx_CPU6(0x3b80, r3)
331 mtspr SPRN_MD_TWC, r11 /* Load pte table base address */
332 mfspr r11, SPRN_MD_TWC /* ....and get the pte address */
333 lwz r10, 0(r11) /* Get the pte */
334
335 ori r10, r10, _PAGE_ACCESSED
336 stw r10, 0(r11)
337
338 /* The Linux PTE won't go exactly into the MMU TLB.
339 * Software indicator bits 21, 22 and 28 must be clear.
340 * Software indicator bits 24, 25, 26, and 27 must be
341 * set. All other Linux PTE bits control the behavior
342 * of the MMU.
343 */
3442: li r11, 0x00f0
345 rlwimi r10, r11, 0, 24, 28 /* Set 24-27, clear 28 */
346 DO_8xx_CPU6(0x2d80, r3)
347 mtspr SPRN_MI_RPN, r10 /* Update TLB entry */
348
349 mfspr r10, SPRN_M_TW /* Restore registers */
350 lwz r11, 0(r0)
351 mtcr r11
352 lwz r11, 4(r0)
353#ifdef CONFIG_8xx_CPU6
354 lwz r3, 8(r0)
355#endif
356 rfi
357
358 . = 0x1200
359DataStoreTLBMiss:
360#ifdef CONFIG_8xx_CPU6
361 stw r3, 8(r0)
362#endif
363 DO_8xx_CPU6(0x3f80, r3)
364 mtspr SPRN_M_TW, r10 /* Save a couple of working registers */
365 mfcr r10
366 stw r10, 0(r0)
367 stw r11, 4(r0)
368 mfspr r10, SPRN_M_TWB /* Get level 1 table entry address */
369
370 /* If we are faulting a kernel address, we have to use the
371 * kernel page tables.
372 */
373 andi. r11, r10, 0x0800
374 beq 3f
375 lis r11, swapper_pg_dir@h
376 ori r11, r11, swapper_pg_dir@l
377 rlwimi r10, r11, 0, 2, 19
3783:
379 lwz r11, 0(r10) /* Get the level 1 entry */
380 rlwinm. r10, r11,0,0,19 /* Extract page descriptor page address */
381 beq 2f /* If zero, don't try to find a pte */
382
383 /* We have a pte table, so load fetch the pte from the table.
384 */
385 ori r11, r11, 1 /* Set valid bit in physical L2 page */
386 DO_8xx_CPU6(0x3b80, r3)
387 mtspr SPRN_MD_TWC, r11 /* Load pte table base address */
388 mfspr r10, SPRN_MD_TWC /* ....and get the pte address */
389 lwz r10, 0(r10) /* Get the pte */
390
391 /* Insert the Guarded flag into the TWC from the Linux PTE.
392 * It is bit 27 of both the Linux PTE and the TWC (at least
393 * I got that right :-). It will be better when we can put
394 * this into the Linux pgd/pmd and load it in the operation
395 * above.
396 */
397 rlwimi r11, r10, 0, 27, 27
398 DO_8xx_CPU6(0x3b80, r3)
399 mtspr SPRN_MD_TWC, r11
400
401 mfspr r11, SPRN_MD_TWC /* get the pte address again */
402 ori r10, r10, _PAGE_ACCESSED
403 stw r10, 0(r11)
404
405 /* The Linux PTE won't go exactly into the MMU TLB.
406 * Software indicator bits 21, 22 and 28 must be clear.
407 * Software indicator bits 24, 25, 26, and 27 must be
408 * set. All other Linux PTE bits control the behavior
409 * of the MMU.
410 */
4112: li r11, 0x00f0
412 rlwimi r10, r11, 0, 24, 28 /* Set 24-27, clear 28 */
413 DO_8xx_CPU6(0x3d80, r3)
414 mtspr SPRN_MD_RPN, r10 /* Update TLB entry */
415
416 mfspr r10, SPRN_M_TW /* Restore registers */
417 lwz r11, 0(r0)
418 mtcr r11
419 lwz r11, 4(r0)
420#ifdef CONFIG_8xx_CPU6
421 lwz r3, 8(r0)
422#endif
423 rfi
424
425/* This is an instruction TLB error on the MPC8xx. This could be due
426 * to many reasons, such as executing guarded memory or illegal instruction
427 * addresses. There is nothing to do but handle a big time error fault.
428 */
429 . = 0x1300
430InstructionTLBError:
431 b InstructionAccess
432
433/* This is the data TLB error on the MPC8xx. This could be due to
434 * many reasons, including a dirty update to a pte. We can catch that
435 * one here, but anything else is an error. First, we track down the
436 * Linux pte. If it is valid, write access is allowed, but the
437 * page dirty bit is not set, we will set it and reload the TLB. For
438 * any other case, we bail out to a higher level function that can
439 * handle it.
440 */
441 . = 0x1400
442DataTLBError:
443#ifdef CONFIG_8xx_CPU6
444 stw r3, 8(r0)
445#endif
446 DO_8xx_CPU6(0x3f80, r3)
447 mtspr SPRN_M_TW, r10 /* Save a couple of working registers */
448 mfcr r10
449 stw r10, 0(r0)
450 stw r11, 4(r0)
451
452 /* First, make sure this was a store operation.
453 */
454 mfspr r10, SPRN_DSISR
455 andis. r11, r10, 0x0200 /* If set, indicates store op */
456 beq 2f
457
458 /* The EA of a data TLB miss is automatically stored in the MD_EPN
459 * register. The EA of a data TLB error is automatically stored in
460 * the DAR, but not the MD_EPN register. We must copy the 20 most
461 * significant bits of the EA from the DAR to MD_EPN before we
462 * start walking the page tables. We also need to copy the CASID
463 * value from the M_CASID register.
464 * Addendum: The EA of a data TLB error is _supposed_ to be stored
465 * in DAR, but it seems that this doesn't happen in some cases, such
466 * as when the error is due to a dcbi instruction to a page with a
467 * TLB that doesn't have the changed bit set. In such cases, there
468 * does not appear to be any way to recover the EA of the error
469 * since it is neither in DAR nor MD_EPN. As a workaround, the
470 * _PAGE_HWWRITE bit is set for all kernel data pages when the PTEs
471 * are initialized in mapin_ram(). This will avoid the problem,
472 * assuming we only use the dcbi instruction on kernel addresses.
473 */
474 mfspr r10, SPRN_DAR
475 rlwinm r11, r10, 0, 0, 19
476 ori r11, r11, MD_EVALID
477 mfspr r10, SPRN_M_CASID
478 rlwimi r11, r10, 0, 28, 31
479 DO_8xx_CPU6(0x3780, r3)
480 mtspr SPRN_MD_EPN, r11
481
482 mfspr r10, SPRN_M_TWB /* Get level 1 table entry address */
483
484 /* If we are faulting a kernel address, we have to use the
485 * kernel page tables.
486 */
487 andi. r11, r10, 0x0800
488 beq 3f
489 lis r11, swapper_pg_dir@h
490 ori r11, r11, swapper_pg_dir@l
491 rlwimi r10, r11, 0, 2, 19
4923:
493 lwz r11, 0(r10) /* Get the level 1 entry */
494 rlwinm. r10, r11,0,0,19 /* Extract page descriptor page address */
495 beq 2f /* If zero, bail */
496
497 /* We have a pte table, so fetch the pte from the table.
498 */
499 ori r11, r11, 1 /* Set valid bit in physical L2 page */
500 DO_8xx_CPU6(0x3b80, r3)
501 mtspr SPRN_MD_TWC, r11 /* Load pte table base address */
502 mfspr r11, SPRN_MD_TWC /* ....and get the pte address */
503 lwz r10, 0(r11) /* Get the pte */
504
505 andi. r11, r10, _PAGE_RW /* Is it writeable? */
506 beq 2f /* Bail out if not */
507
508 /* Update 'changed', among others.
509 */
510 ori r10, r10, _PAGE_DIRTY|_PAGE_ACCESSED|_PAGE_HWWRITE
511 mfspr r11, SPRN_MD_TWC /* Get pte address again */
512 stw r10, 0(r11) /* and update pte in table */
513
514 /* The Linux PTE won't go exactly into the MMU TLB.
515 * Software indicator bits 21, 22 and 28 must be clear.
516 * Software indicator bits 24, 25, 26, and 27 must be
517 * set. All other Linux PTE bits control the behavior
518 * of the MMU.
519 */
520 li r11, 0x00f0
521 rlwimi r10, r11, 0, 24, 28 /* Set 24-27, clear 28 */
522 DO_8xx_CPU6(0x3d80, r3)
523 mtspr SPRN_MD_RPN, r10 /* Update TLB entry */
524
525 mfspr r10, SPRN_M_TW /* Restore registers */
526 lwz r11, 0(r0)
527 mtcr r11
528 lwz r11, 4(r0)
529#ifdef CONFIG_8xx_CPU6
530 lwz r3, 8(r0)
531#endif
532 rfi
5332:
534 mfspr r10, SPRN_M_TW /* Restore registers */
535 lwz r11, 0(r0)
536 mtcr r11
537 lwz r11, 4(r0)
538#ifdef CONFIG_8xx_CPU6
539 lwz r3, 8(r0)
540#endif
541 b DataAccess
542
543 EXCEPTION(0x1500, Trap_15, unknown_exception, EXC_XFER_EE)
544 EXCEPTION(0x1600, Trap_16, unknown_exception, EXC_XFER_EE)
545 EXCEPTION(0x1700, Trap_17, unknown_exception, EXC_XFER_EE)
546 EXCEPTION(0x1800, Trap_18, unknown_exception, EXC_XFER_EE)
547 EXCEPTION(0x1900, Trap_19, unknown_exception, EXC_XFER_EE)
548 EXCEPTION(0x1a00, Trap_1a, unknown_exception, EXC_XFER_EE)
549 EXCEPTION(0x1b00, Trap_1b, unknown_exception, EXC_XFER_EE)
550
551/* On the MPC8xx, these next four traps are used for development
552 * support of breakpoints and such. Someday I will get around to
553 * using them.
554 */
555 EXCEPTION(0x1c00, Trap_1c, unknown_exception, EXC_XFER_EE)
556 EXCEPTION(0x1d00, Trap_1d, unknown_exception, EXC_XFER_EE)
557 EXCEPTION(0x1e00, Trap_1e, unknown_exception, EXC_XFER_EE)
558 EXCEPTION(0x1f00, Trap_1f, unknown_exception, EXC_XFER_EE)
559
560 . = 0x2000
561
562 .globl giveup_fpu
563giveup_fpu:
564 blr
565
566/*
567 * This is where the main kernel code starts.
568 */
569start_here:
570 /* ptr to current */
571 lis r2,init_task@h
572 ori r2,r2,init_task@l
573
574 /* ptr to phys current thread */
575 tophys(r4,r2)
576 addi r4,r4,THREAD /* init task's THREAD */
577 mtspr SPRN_SPRG3,r4
578 li r3,0
579 mtspr SPRN_SPRG2,r3 /* 0 => r1 has kernel sp */
580
581 /* stack */
582 lis r1,init_thread_union@ha
583 addi r1,r1,init_thread_union@l
584 li r0,0
585 stwu r0,THREAD_SIZE-STACK_FRAME_OVERHEAD(r1)
586
587 bl early_init /* We have to do this with MMU on */
588
589/*
590 * Decide what sort of machine this is and initialize the MMU.
591 */
592 mr r3,r31
593 mr r4,r30
594 mr r5,r29
595 mr r6,r28
596 mr r7,r27
597 bl machine_init
598 bl MMU_init
599
600/*
601 * Go back to running unmapped so we can load up new values
602 * and change to using our exception vectors.
603 * On the 8xx, all we have to do is invalidate the TLB to clear
604 * the old 8M byte TLB mappings and load the page table base register.
605 */
606 /* The right way to do this would be to track it down through
607 * init's THREAD like the context switch code does, but this is
608 * easier......until someone changes init's static structures.
609 */
610 lis r6, swapper_pg_dir@h
611 ori r6, r6, swapper_pg_dir@l
612 tophys(r6,r6)
613#ifdef CONFIG_8xx_CPU6
614 lis r4, cpu6_errata_word@h
615 ori r4, r4, cpu6_errata_word@l
616 li r3, 0x3980
617 stw r3, 12(r4)
618 lwz r3, 12(r4)
619#endif
620 mtspr SPRN_M_TWB, r6
621 lis r4,2f@h
622 ori r4,r4,2f@l
623 tophys(r4,r4)
624 li r3,MSR_KERNEL & ~(MSR_IR|MSR_DR)
625 mtspr SPRN_SRR0,r4
626 mtspr SPRN_SRR1,r3
627 rfi
628/* Load up the kernel context */
6292:
630 SYNC /* Force all PTE updates to finish */
631 tlbia /* Clear all TLB entries */
632 sync /* wait for tlbia/tlbie to finish */
633 TLBSYNC /* ... on all CPUs */
634
635 /* set up the PTE pointers for the Abatron bdiGDB.
636 */
637 tovirt(r6,r6)
638 lis r5, abatron_pteptrs@h
639 ori r5, r5, abatron_pteptrs@l
640 stw r5, 0xf0(r0) /* Must match your Abatron config file */
641 tophys(r5,r5)
642 stw r6, 0(r5)
643
644/* Now turn on the MMU for real! */
645 li r4,MSR_KERNEL
646 lis r3,start_kernel@h
647 ori r3,r3,start_kernel@l
648 mtspr SPRN_SRR0,r3
649 mtspr SPRN_SRR1,r4
650 rfi /* enable MMU and jump to start_kernel */
651
652/* Set up the initial MMU state so we can do the first level of
653 * kernel initialization. This maps the first 8 MBytes of memory 1:1
654 * virtual to physical. Also, set the cache mode since that is defined
655 * by TLB entries and perform any additional mapping (like of the IMMR).
656 * If configured to pin some TLBs, we pin the first 8 Mbytes of kernel,
657 * 24 Mbytes of data, and the 8M IMMR space. Anything not covered by
658 * these mappings is mapped by page tables.
659 */
660initial_mmu:
661 tlbia /* Invalidate all TLB entries */
662#ifdef CONFIG_PIN_TLB
663 lis r8, MI_RSV4I@h
664 ori r8, r8, 0x1c00
665#else
666 li r8, 0
667#endif
668 mtspr SPRN_MI_CTR, r8 /* Set instruction MMU control */
669
670#ifdef CONFIG_PIN_TLB
671 lis r10, (MD_RSV4I | MD_RESETVAL)@h
672 ori r10, r10, 0x1c00
673 mr r8, r10
674#else
675 lis r10, MD_RESETVAL@h
676#endif
677#ifndef CONFIG_8xx_COPYBACK
678 oris r10, r10, MD_WTDEF@h
679#endif
680 mtspr SPRN_MD_CTR, r10 /* Set data TLB control */
681
682 /* Now map the lower 8 Meg into the TLBs. For this quick hack,
683 * we can load the instruction and data TLB registers with the
684 * same values.
685 */
686 lis r8, KERNELBASE@h /* Create vaddr for TLB */
687 ori r8, r8, MI_EVALID /* Mark it valid */
688 mtspr SPRN_MI_EPN, r8
689 mtspr SPRN_MD_EPN, r8
690 li r8, MI_PS8MEG /* Set 8M byte page */
691 ori r8, r8, MI_SVALID /* Make it valid */
692 mtspr SPRN_MI_TWC, r8
693 mtspr SPRN_MD_TWC, r8
694 li r8, MI_BOOTINIT /* Create RPN for address 0 */
695 mtspr SPRN_MI_RPN, r8 /* Store TLB entry */
696 mtspr SPRN_MD_RPN, r8
697 lis r8, MI_Kp@h /* Set the protection mode */
698 mtspr SPRN_MI_AP, r8
699 mtspr SPRN_MD_AP, r8
700
701 /* Map another 8 MByte at the IMMR to get the processor
702 * internal registers (among other things).
703 */
704#ifdef CONFIG_PIN_TLB
705 addi r10, r10, 0x0100
706 mtspr SPRN_MD_CTR, r10
707#endif
708 mfspr r9, 638 /* Get current IMMR */
709 andis. r9, r9, 0xff80 /* Get 8Mbyte boundary */
710
711 mr r8, r9 /* Create vaddr for TLB */
712 ori r8, r8, MD_EVALID /* Mark it valid */
713 mtspr SPRN_MD_EPN, r8
714 li r8, MD_PS8MEG /* Set 8M byte page */
715 ori r8, r8, MD_SVALID /* Make it valid */
716 mtspr SPRN_MD_TWC, r8
717 mr r8, r9 /* Create paddr for TLB */
718 ori r8, r8, MI_BOOTINIT|0x2 /* Inhibit cache -- Cort */
719 mtspr SPRN_MD_RPN, r8
720
721#ifdef CONFIG_PIN_TLB
722 /* Map two more 8M kernel data pages.
723 */
724 addi r10, r10, 0x0100
725 mtspr SPRN_MD_CTR, r10
726
727 lis r8, KERNELBASE@h /* Create vaddr for TLB */
728 addis r8, r8, 0x0080 /* Add 8M */
729 ori r8, r8, MI_EVALID /* Mark it valid */
730 mtspr SPRN_MD_EPN, r8
731 li r9, MI_PS8MEG /* Set 8M byte page */
732 ori r9, r9, MI_SVALID /* Make it valid */
733 mtspr SPRN_MD_TWC, r9
734 li r11, MI_BOOTINIT /* Create RPN for address 0 */
735 addis r11, r11, 0x0080 /* Add 8M */
736 mtspr SPRN_MD_RPN, r8
737
738 addis r8, r8, 0x0080 /* Add 8M */
739 mtspr SPRN_MD_EPN, r8
740 mtspr SPRN_MD_TWC, r9
741 addis r11, r11, 0x0080 /* Add 8M */
742 mtspr SPRN_MD_RPN, r8
743#endif
744
745 /* Since the cache is enabled according to the information we
746 * just loaded into the TLB, invalidate and enable the caches here.
747 * We should probably check/set other modes....later.
748 */
749 lis r8, IDC_INVALL@h
750 mtspr SPRN_IC_CST, r8
751 mtspr SPRN_DC_CST, r8
752 lis r8, IDC_ENABLE@h
753 mtspr SPRN_IC_CST, r8
754#ifdef CONFIG_8xx_COPYBACK
755 mtspr SPRN_DC_CST, r8
756#else
757 /* For a debug option, I left this here to easily enable
758 * the write through cache mode
759 */
760 lis r8, DC_SFWT@h
761 mtspr SPRN_DC_CST, r8
762 lis r8, IDC_ENABLE@h
763 mtspr SPRN_DC_CST, r8
764#endif
765 blr
766
767
768/*
769 * Set up to use a given MMU context.
770 * r3 is context number, r4 is PGD pointer.
771 *
772 * We place the physical address of the new task page directory loaded
773 * into the MMU base register, and set the ASID compare register with
774 * the new "context."
775 */
776_GLOBAL(set_context)
777
778#ifdef CONFIG_BDI_SWITCH
779 /* Context switch the PTE pointer for the Abatron BDI2000.
780 * The PGDIR is passed as second argument.
781 */
782 lis r5, KERNELBASE@h
783 lwz r5, 0xf0(r5)
784 stw r4, 0x4(r5)
785#endif
786
787#ifdef CONFIG_8xx_CPU6
788 lis r6, cpu6_errata_word@h
789 ori r6, r6, cpu6_errata_word@l
790 tophys (r4, r4)
791 li r7, 0x3980
792 stw r7, 12(r6)
793 lwz r7, 12(r6)
794 mtspr SPRN_M_TWB, r4 /* Update MMU base address */
795 li r7, 0x3380
796 stw r7, 12(r6)
797 lwz r7, 12(r6)
798 mtspr SPRN_M_CASID, r3 /* Update context */
799#else
800 mtspr SPRN_M_CASID,r3 /* Update context */
801 tophys (r4, r4)
802 mtspr SPRN_M_TWB, r4 /* and pgd */
803#endif
804 SYNC
805 blr
806
807#ifdef CONFIG_8xx_CPU6
808/* It's here because it is unique to the 8xx.
809 * It is important we get called with interrupts disabled. I used to
810 * do that, but it appears that all code that calls this already had
811 * interrupt disabled.
812 */
813 .globl set_dec_cpu6
814set_dec_cpu6:
815 lis r7, cpu6_errata_word@h
816 ori r7, r7, cpu6_errata_word@l
817 li r4, 0x2c00
818 stw r4, 8(r7)
819 lwz r4, 8(r7)
820 mtspr 22, r3 /* Update Decrementer */
821 SYNC
822 blr
823#endif
824
825/*
826 * We put a few things here that have to be page-aligned.
827 * This stuff goes at the beginning of the data segment,
828 * which is page-aligned.
829 */
830 .data
831 .globl sdata
832sdata:
833 .globl empty_zero_page
834empty_zero_page:
835 .space 4096
836
837 .globl swapper_pg_dir
838swapper_pg_dir:
839 .space 4096
840
841/*
842 * This space gets a copy of optional info passed to us by the bootstrap
843 * Used to pass parameters into the kernel like root=/dev/sda1, etc.
844 */
845 .globl cmd_line
846cmd_line:
847 .space 512
848
849/* Room for two PTE table poiners, usually the kernel and current user
850 * pointer to their respective root page table (pgdir).
851 */
852abatron_pteptrs:
853 .space 8
854
855#ifdef CONFIG_8xx_CPU6
856 .globl cpu6_errata_word
857cpu6_errata_word:
858 .space 16
859#endif
860
diff --git a/arch/powerpc/kernel/head_fsl_booke.S b/arch/powerpc/kernel/head_fsl_booke.S
new file mode 100644
index 000000000000..5063c603fad4
--- /dev/null
+++ b/arch/powerpc/kernel/head_fsl_booke.S
@@ -0,0 +1,1063 @@
1/*
2 * arch/ppc/kernel/head_fsl_booke.S
3 *
4 * Kernel execution entry point code.
5 *
6 * Copyright (c) 1995-1996 Gary Thomas <gdt@linuxppc.org>
7 * Initial PowerPC version.
8 * Copyright (c) 1996 Cort Dougan <cort@cs.nmt.edu>
9 * Rewritten for PReP
10 * Copyright (c) 1996 Paul Mackerras <paulus@cs.anu.edu.au>
11 * Low-level exception handers, MMU support, and rewrite.
12 * Copyright (c) 1997 Dan Malek <dmalek@jlc.net>
13 * PowerPC 8xx modifications.
14 * Copyright (c) 1998-1999 TiVo, Inc.
15 * PowerPC 403GCX modifications.
16 * Copyright (c) 1999 Grant Erickson <grant@lcse.umn.edu>
17 * PowerPC 403GCX/405GP modifications.
18 * Copyright 2000 MontaVista Software Inc.
19 * PPC405 modifications
20 * PowerPC 403GCX/405GP modifications.
21 * Author: MontaVista Software, Inc.
22 * frank_rowand@mvista.com or source@mvista.com
23 * debbie_chu@mvista.com
24 * Copyright 2002-2004 MontaVista Software, Inc.
25 * PowerPC 44x support, Matt Porter <mporter@kernel.crashing.org>
26 * Copyright 2004 Freescale Semiconductor, Inc
27 * PowerPC e500 modifications, Kumar Gala <kumar.gala@freescale.com>
28 *
29 * This program is free software; you can redistribute it and/or modify it
30 * under the terms of the GNU General Public License as published by the
31 * Free Software Foundation; either version 2 of the License, or (at your
32 * option) any later version.
33 */
34
35#include <linux/config.h>
36#include <linux/threads.h>
37#include <asm/processor.h>
38#include <asm/page.h>
39#include <asm/mmu.h>
40#include <asm/pgtable.h>
41#include <asm/cputable.h>
42#include <asm/thread_info.h>
43#include <asm/ppc_asm.h>
44#include <asm/asm-offsets.h>
45#include "head_booke.h"
46
47/* As with the other PowerPC ports, it is expected that when code
48 * execution begins here, the following registers contain valid, yet
49 * optional, information:
50 *
51 * r3 - Board info structure pointer (DRAM, frequency, MAC address, etc.)
52 * r4 - Starting address of the init RAM disk
53 * r5 - Ending address of the init RAM disk
54 * r6 - Start of kernel command line string (e.g. "mem=128")
55 * r7 - End of kernel command line string
56 *
57 */
58 .text
59_GLOBAL(_stext)
60_GLOBAL(_start)
61 /*
62 * Reserve a word at a fixed location to store the address
63 * of abatron_pteptrs
64 */
65 nop
66/*
67 * Save parameters we are passed
68 */
69 mr r31,r3
70 mr r30,r4
71 mr r29,r5
72 mr r28,r6
73 mr r27,r7
74 li r24,0 /* CPU number */
75
76/* We try to not make any assumptions about how the boot loader
77 * setup or used the TLBs. We invalidate all mappings from the
78 * boot loader and load a single entry in TLB1[0] to map the
79 * first 16M of kernel memory. Any boot info passed from the
80 * bootloader needs to live in this first 16M.
81 *
82 * Requirement on bootloader:
83 * - The page we're executing in needs to reside in TLB1 and
84 * have IPROT=1. If not an invalidate broadcast could
85 * evict the entry we're currently executing in.
86 *
87 * r3 = Index of TLB1 were executing in
88 * r4 = Current MSR[IS]
89 * r5 = Index of TLB1 temp mapping
90 *
91 * Later in mapin_ram we will correctly map lowmem, and resize TLB1[0]
92 * if needed
93 */
94
95/* 1. Find the index of the entry we're executing in */
96 bl invstr /* Find our address */
97invstr: mflr r6 /* Make it accessible */
98 mfmsr r7
99 rlwinm r4,r7,27,31,31 /* extract MSR[IS] */
100 mfspr r7, SPRN_PID0
101 slwi r7,r7,16
102 or r7,r7,r4
103 mtspr SPRN_MAS6,r7
104 tlbsx 0,r6 /* search MSR[IS], SPID=PID0 */
105#ifndef CONFIG_E200
106 mfspr r7,SPRN_MAS1
107 andis. r7,r7,MAS1_VALID@h
108 bne match_TLB
109 mfspr r7,SPRN_PID1
110 slwi r7,r7,16
111 or r7,r7,r4
112 mtspr SPRN_MAS6,r7
113 tlbsx 0,r6 /* search MSR[IS], SPID=PID1 */
114 mfspr r7,SPRN_MAS1
115 andis. r7,r7,MAS1_VALID@h
116 bne match_TLB
117 mfspr r7, SPRN_PID2
118 slwi r7,r7,16
119 or r7,r7,r4
120 mtspr SPRN_MAS6,r7
121 tlbsx 0,r6 /* Fall through, we had to match */
122#endif
123match_TLB:
124 mfspr r7,SPRN_MAS0
125 rlwinm r3,r7,16,20,31 /* Extract MAS0(Entry) */
126
127 mfspr r7,SPRN_MAS1 /* Insure IPROT set */
128 oris r7,r7,MAS1_IPROT@h
129 mtspr SPRN_MAS1,r7
130 tlbwe
131
132/* 2. Invalidate all entries except the entry we're executing in */
133 mfspr r9,SPRN_TLB1CFG
134 andi. r9,r9,0xfff
135 li r6,0 /* Set Entry counter to 0 */
1361: lis r7,0x1000 /* Set MAS0(TLBSEL) = 1 */
137 rlwimi r7,r6,16,4,15 /* Setup MAS0 = TLBSEL | ESEL(r6) */
138 mtspr SPRN_MAS0,r7
139 tlbre
140 mfspr r7,SPRN_MAS1
141 rlwinm r7,r7,0,2,31 /* Clear MAS1 Valid and IPROT */
142 cmpw r3,r6
143 beq skpinv /* Dont update the current execution TLB */
144 mtspr SPRN_MAS1,r7
145 tlbwe
146 isync
147skpinv: addi r6,r6,1 /* Increment */
148 cmpw r6,r9 /* Are we done? */
149 bne 1b /* If not, repeat */
150
151 /* Invalidate TLB0 */
152 li r6,0x04
153 tlbivax 0,r6
154#ifdef CONFIG_SMP
155 tlbsync
156#endif
157 /* Invalidate TLB1 */
158 li r6,0x0c
159 tlbivax 0,r6
160#ifdef CONFIG_SMP
161 tlbsync
162#endif
163 msync
164
165/* 3. Setup a temp mapping and jump to it */
166 andi. r5, r3, 0x1 /* Find an entry not used and is non-zero */
167 addi r5, r5, 0x1
168 lis r7,0x1000 /* Set MAS0(TLBSEL) = 1 */
169 rlwimi r7,r3,16,4,15 /* Setup MAS0 = TLBSEL | ESEL(r3) */
170 mtspr SPRN_MAS0,r7
171 tlbre
172
173 /* Just modify the entry ID and EPN for the temp mapping */
174 lis r7,0x1000 /* Set MAS0(TLBSEL) = 1 */
175 rlwimi r7,r5,16,4,15 /* Setup MAS0 = TLBSEL | ESEL(r5) */
176 mtspr SPRN_MAS0,r7
177 xori r6,r4,1 /* Setup TMP mapping in the other Address space */
178 slwi r6,r6,12
179 oris r6,r6,(MAS1_VALID|MAS1_IPROT)@h
180 ori r6,r6,(MAS1_TSIZE(BOOKE_PAGESZ_4K))@l
181 mtspr SPRN_MAS1,r6
182 mfspr r6,SPRN_MAS2
183 li r7,0 /* temp EPN = 0 */
184 rlwimi r7,r6,0,20,31
185 mtspr SPRN_MAS2,r7
186 tlbwe
187
188 xori r6,r4,1
189 slwi r6,r6,5 /* setup new context with other address space */
190 bl 1f /* Find our address */
1911: mflr r9
192 rlwimi r7,r9,0,20,31
193 addi r7,r7,24
194 mtspr SPRN_SRR0,r7
195 mtspr SPRN_SRR1,r6
196 rfi
197
198/* 4. Clear out PIDs & Search info */
199 li r6,0
200 mtspr SPRN_PID0,r6
201#ifndef CONFIG_E200
202 mtspr SPRN_PID1,r6
203 mtspr SPRN_PID2,r6
204#endif
205 mtspr SPRN_MAS6,r6
206
207/* 5. Invalidate mapping we started in */
208 lis r7,0x1000 /* Set MAS0(TLBSEL) = 1 */
209 rlwimi r7,r3,16,4,15 /* Setup MAS0 = TLBSEL | ESEL(r3) */
210 mtspr SPRN_MAS0,r7
211 tlbre
212 li r6,0
213 mtspr SPRN_MAS1,r6
214 tlbwe
215 /* Invalidate TLB1 */
216 li r9,0x0c
217 tlbivax 0,r9
218#ifdef CONFIG_SMP
219 tlbsync
220#endif
221 msync
222
223/* 6. Setup KERNELBASE mapping in TLB1[0] */
224 lis r6,0x1000 /* Set MAS0(TLBSEL) = TLB1(1), ESEL = 0 */
225 mtspr SPRN_MAS0,r6
226 lis r6,(MAS1_VALID|MAS1_IPROT)@h
227 ori r6,r6,(MAS1_TSIZE(BOOKE_PAGESZ_16M))@l
228 mtspr SPRN_MAS1,r6
229 li r7,0
230 lis r6,KERNELBASE@h
231 ori r6,r6,KERNELBASE@l
232 rlwimi r6,r7,0,20,31
233 mtspr SPRN_MAS2,r6
234 li r7,(MAS3_SX|MAS3_SW|MAS3_SR)
235 mtspr SPRN_MAS3,r7
236 tlbwe
237
238/* 7. Jump to KERNELBASE mapping */
239 lis r7,MSR_KERNEL@h
240 ori r7,r7,MSR_KERNEL@l
241 bl 1f /* Find our address */
2421: mflr r9
243 rlwimi r6,r9,0,20,31
244 addi r6,r6,24
245 mtspr SPRN_SRR0,r6
246 mtspr SPRN_SRR1,r7
247 rfi /* start execution out of TLB1[0] entry */
248
249/* 8. Clear out the temp mapping */
250 lis r7,0x1000 /* Set MAS0(TLBSEL) = 1 */
251 rlwimi r7,r5,16,4,15 /* Setup MAS0 = TLBSEL | ESEL(r5) */
252 mtspr SPRN_MAS0,r7
253 tlbre
254 mtspr SPRN_MAS1,r8
255 tlbwe
256 /* Invalidate TLB1 */
257 li r9,0x0c
258 tlbivax 0,r9
259#ifdef CONFIG_SMP
260 tlbsync
261#endif
262 msync
263
264 /* Establish the interrupt vector offsets */
265 SET_IVOR(0, CriticalInput);
266 SET_IVOR(1, MachineCheck);
267 SET_IVOR(2, DataStorage);
268 SET_IVOR(3, InstructionStorage);
269 SET_IVOR(4, ExternalInput);
270 SET_IVOR(5, Alignment);
271 SET_IVOR(6, Program);
272 SET_IVOR(7, FloatingPointUnavailable);
273 SET_IVOR(8, SystemCall);
274 SET_IVOR(9, AuxillaryProcessorUnavailable);
275 SET_IVOR(10, Decrementer);
276 SET_IVOR(11, FixedIntervalTimer);
277 SET_IVOR(12, WatchdogTimer);
278 SET_IVOR(13, DataTLBError);
279 SET_IVOR(14, InstructionTLBError);
280 SET_IVOR(15, Debug);
281 SET_IVOR(32, SPEUnavailable);
282 SET_IVOR(33, SPEFloatingPointData);
283 SET_IVOR(34, SPEFloatingPointRound);
284#ifndef CONFIG_E200
285 SET_IVOR(35, PerformanceMonitor);
286#endif
287
288 /* Establish the interrupt vector base */
289 lis r4,interrupt_base@h /* IVPR only uses the high 16-bits */
290 mtspr SPRN_IVPR,r4
291
292 /* Setup the defaults for TLB entries */
293 li r2,(MAS4_TSIZED(BOOKE_PAGESZ_4K))@l
294#ifdef CONFIG_E200
295 oris r2,r2,MAS4_TLBSELD(1)@h
296#endif
297 mtspr SPRN_MAS4, r2
298
299#if 0
300 /* Enable DOZE */
301 mfspr r2,SPRN_HID0
302 oris r2,r2,HID0_DOZE@h
303 mtspr SPRN_HID0, r2
304#endif
305#ifdef CONFIG_E200
306 /* enable dedicated debug exception handling resources (Debug APU) */
307 mfspr r2,SPRN_HID0
308 ori r2,r2,HID0_DAPUEN@l
309 mtspr SPRN_HID0,r2
310#endif
311
312#if !defined(CONFIG_BDI_SWITCH)
313 /*
314 * The Abatron BDI JTAG debugger does not tolerate others
315 * mucking with the debug registers.
316 */
317 lis r2,DBCR0_IDM@h
318 mtspr SPRN_DBCR0,r2
319 /* clear any residual debug events */
320 li r2,-1
321 mtspr SPRN_DBSR,r2
322#endif
323
324 /*
325 * This is where the main kernel code starts.
326 */
327
328 /* ptr to current */
329 lis r2,init_task@h
330 ori r2,r2,init_task@l
331
332 /* ptr to current thread */
333 addi r4,r2,THREAD /* init task's THREAD */
334 mtspr SPRN_SPRG3,r4
335
336 /* stack */
337 lis r1,init_thread_union@h
338 ori r1,r1,init_thread_union@l
339 li r0,0
340 stwu r0,THREAD_SIZE-STACK_FRAME_OVERHEAD(r1)
341
342 bl early_init
343
344 mfspr r3,SPRN_TLB1CFG
345 andi. r3,r3,0xfff
346 lis r4,num_tlbcam_entries@ha
347 stw r3,num_tlbcam_entries@l(r4)
348/*
349 * Decide what sort of machine this is and initialize the MMU.
350 */
351 mr r3,r31
352 mr r4,r30
353 mr r5,r29
354 mr r6,r28
355 mr r7,r27
356 bl machine_init
357 bl MMU_init
358
359 /* Setup PTE pointers for the Abatron bdiGDB */
360 lis r6, swapper_pg_dir@h
361 ori r6, r6, swapper_pg_dir@l
362 lis r5, abatron_pteptrs@h
363 ori r5, r5, abatron_pteptrs@l
364 lis r4, KERNELBASE@h
365 ori r4, r4, KERNELBASE@l
366 stw r5, 0(r4) /* Save abatron_pteptrs at a fixed location */
367 stw r6, 0(r5)
368
369 /* Let's move on */
370 lis r4,start_kernel@h
371 ori r4,r4,start_kernel@l
372 lis r3,MSR_KERNEL@h
373 ori r3,r3,MSR_KERNEL@l
374 mtspr SPRN_SRR0,r4
375 mtspr SPRN_SRR1,r3
376 rfi /* change context and jump to start_kernel */
377
378/* Macros to hide the PTE size differences
379 *
380 * FIND_PTE -- walks the page tables given EA & pgdir pointer
381 * r10 -- EA of fault
382 * r11 -- PGDIR pointer
383 * r12 -- free
384 * label 2: is the bailout case
385 *
386 * if we find the pte (fall through):
387 * r11 is low pte word
388 * r12 is pointer to the pte
389 */
390#ifdef CONFIG_PTE_64BIT
391#define PTE_FLAGS_OFFSET 4
392#define FIND_PTE \
393 rlwinm r12, r10, 13, 19, 29; /* Compute pgdir/pmd offset */ \
394 lwzx r11, r12, r11; /* Get pgd/pmd entry */ \
395 rlwinm. r12, r11, 0, 0, 20; /* Extract pt base address */ \
396 beq 2f; /* Bail if no table */ \
397 rlwimi r12, r10, 23, 20, 28; /* Compute pte address */ \
398 lwz r11, 4(r12); /* Get pte entry */
399#else
400#define PTE_FLAGS_OFFSET 0
401#define FIND_PTE \
402 rlwimi r11, r10, 12, 20, 29; /* Create L1 (pgdir/pmd) address */ \
403 lwz r11, 0(r11); /* Get L1 entry */ \
404 rlwinm. r12, r11, 0, 0, 19; /* Extract L2 (pte) base address */ \
405 beq 2f; /* Bail if no table */ \
406 rlwimi r12, r10, 22, 20, 29; /* Compute PTE address */ \
407 lwz r11, 0(r12); /* Get Linux PTE */
408#endif
409
410/*
411 * Interrupt vector entry code
412 *
413 * The Book E MMUs are always on so we don't need to handle
414 * interrupts in real mode as with previous PPC processors. In
415 * this case we handle interrupts in the kernel virtual address
416 * space.
417 *
418 * Interrupt vectors are dynamically placed relative to the
419 * interrupt prefix as determined by the address of interrupt_base.
420 * The interrupt vectors offsets are programmed using the labels
421 * for each interrupt vector entry.
422 *
423 * Interrupt vectors must be aligned on a 16 byte boundary.
424 * We align on a 32 byte cache line boundary for good measure.
425 */
426
427interrupt_base:
428 /* Critical Input Interrupt */
429 CRITICAL_EXCEPTION(0x0100, CriticalInput, unknown_exception)
430
431 /* Machine Check Interrupt */
432#ifdef CONFIG_E200
433 /* no RFMCI, MCSRRs on E200 */
434 CRITICAL_EXCEPTION(0x0200, MachineCheck, machine_check_exception)
435#else
436 MCHECK_EXCEPTION(0x0200, MachineCheck, machine_check_exception)
437#endif
438
439 /* Data Storage Interrupt */
440 START_EXCEPTION(DataStorage)
441 mtspr SPRN_SPRG0, r10 /* Save some working registers */
442 mtspr SPRN_SPRG1, r11
443 mtspr SPRN_SPRG4W, r12
444 mtspr SPRN_SPRG5W, r13
445 mfcr r11
446 mtspr SPRN_SPRG7W, r11
447
448 /*
449 * Check if it was a store fault, if not then bail
450 * because a user tried to access a kernel or
451 * read-protected page. Otherwise, get the
452 * offending address and handle it.
453 */
454 mfspr r10, SPRN_ESR
455 andis. r10, r10, ESR_ST@h
456 beq 2f
457
458 mfspr r10, SPRN_DEAR /* Get faulting address */
459
460 /* If we are faulting a kernel address, we have to use the
461 * kernel page tables.
462 */
463 lis r11, TASK_SIZE@h
464 ori r11, r11, TASK_SIZE@l
465 cmplw 0, r10, r11
466 bge 2f
467
468 /* Get the PGD for the current thread */
4693:
470 mfspr r11,SPRN_SPRG3
471 lwz r11,PGDIR(r11)
4724:
473 FIND_PTE
474
475 /* Are _PAGE_USER & _PAGE_RW set & _PAGE_HWWRITE not? */
476 andi. r13, r11, _PAGE_RW|_PAGE_USER|_PAGE_HWWRITE
477 cmpwi 0, r13, _PAGE_RW|_PAGE_USER
478 bne 2f /* Bail if not */
479
480 /* Update 'changed'. */
481 ori r11, r11, _PAGE_DIRTY|_PAGE_ACCESSED|_PAGE_HWWRITE
482 stw r11, PTE_FLAGS_OFFSET(r12) /* Update Linux page table */
483
484 /* MAS2 not updated as the entry does exist in the tlb, this
485 fault taken to detect state transition (eg: COW -> DIRTY)
486 */
487 andi. r11, r11, _PAGE_HWEXEC
488 rlwimi r11, r11, 31, 27, 27 /* SX <- _PAGE_HWEXEC */
489 ori r11, r11, (MAS3_UW|MAS3_SW|MAS3_UR|MAS3_SR)@l /* set static perms */
490
491 /* update search PID in MAS6, AS = 0 */
492 mfspr r12, SPRN_PID0
493 slwi r12, r12, 16
494 mtspr SPRN_MAS6, r12
495
496 /* find the TLB index that caused the fault. It has to be here. */
497 tlbsx 0, r10
498
499 /* only update the perm bits, assume the RPN is fine */
500 mfspr r12, SPRN_MAS3
501 rlwimi r12, r11, 0, 20, 31
502 mtspr SPRN_MAS3,r12
503 tlbwe
504
505 /* Done...restore registers and get out of here. */
506 mfspr r11, SPRN_SPRG7R
507 mtcr r11
508 mfspr r13, SPRN_SPRG5R
509 mfspr r12, SPRN_SPRG4R
510 mfspr r11, SPRN_SPRG1
511 mfspr r10, SPRN_SPRG0
512 rfi /* Force context change */
513
5142:
515 /*
516 * The bailout. Restore registers to pre-exception conditions
517 * and call the heavyweights to help us out.
518 */
519 mfspr r11, SPRN_SPRG7R
520 mtcr r11
521 mfspr r13, SPRN_SPRG5R
522 mfspr r12, SPRN_SPRG4R
523 mfspr r11, SPRN_SPRG1
524 mfspr r10, SPRN_SPRG0
525 b data_access
526
527 /* Instruction Storage Interrupt */
528 INSTRUCTION_STORAGE_EXCEPTION
529
530 /* External Input Interrupt */
531 EXCEPTION(0x0500, ExternalInput, do_IRQ, EXC_XFER_LITE)
532
533 /* Alignment Interrupt */
534 ALIGNMENT_EXCEPTION
535
536 /* Program Interrupt */
537 PROGRAM_EXCEPTION
538
539 /* Floating Point Unavailable Interrupt */
540#ifdef CONFIG_PPC_FPU
541 FP_UNAVAILABLE_EXCEPTION
542#else
543#ifdef CONFIG_E200
544 /* E200 treats 'normal' floating point instructions as FP Unavail exception */
545 EXCEPTION(0x0800, FloatingPointUnavailable, program_check_exception, EXC_XFER_EE)
546#else
547 EXCEPTION(0x0800, FloatingPointUnavailable, unknown_exception, EXC_XFER_EE)
548#endif
549#endif
550
551 /* System Call Interrupt */
552 START_EXCEPTION(SystemCall)
553 NORMAL_EXCEPTION_PROLOG
554 EXC_XFER_EE_LITE(0x0c00, DoSyscall)
555
556 /* Auxillary Processor Unavailable Interrupt */
557 EXCEPTION(0x2900, AuxillaryProcessorUnavailable, unknown_exception, EXC_XFER_EE)
558
559 /* Decrementer Interrupt */
560 DECREMENTER_EXCEPTION
561
562 /* Fixed Internal Timer Interrupt */
563 /* TODO: Add FIT support */
564 EXCEPTION(0x3100, FixedIntervalTimer, unknown_exception, EXC_XFER_EE)
565
566 /* Watchdog Timer Interrupt */
567#ifdef CONFIG_BOOKE_WDT
568 CRITICAL_EXCEPTION(0x3200, WatchdogTimer, WatchdogException)
569#else
570 CRITICAL_EXCEPTION(0x3200, WatchdogTimer, unknown_exception)
571#endif
572
573 /* Data TLB Error Interrupt */
574 START_EXCEPTION(DataTLBError)
575 mtspr SPRN_SPRG0, r10 /* Save some working registers */
576 mtspr SPRN_SPRG1, r11
577 mtspr SPRN_SPRG4W, r12
578 mtspr SPRN_SPRG5W, r13
579 mfcr r11
580 mtspr SPRN_SPRG7W, r11
581 mfspr r10, SPRN_DEAR /* Get faulting address */
582
583 /* If we are faulting a kernel address, we have to use the
584 * kernel page tables.
585 */
586 lis r11, TASK_SIZE@h
587 ori r11, r11, TASK_SIZE@l
588 cmplw 5, r10, r11
589 blt 5, 3f
590 lis r11, swapper_pg_dir@h
591 ori r11, r11, swapper_pg_dir@l
592
593 mfspr r12,SPRN_MAS1 /* Set TID to 0 */
594 rlwinm r12,r12,0,16,1
595 mtspr SPRN_MAS1,r12
596
597 b 4f
598
599 /* Get the PGD for the current thread */
6003:
601 mfspr r11,SPRN_SPRG3
602 lwz r11,PGDIR(r11)
603
6044:
605 FIND_PTE
606 andi. r13, r11, _PAGE_PRESENT /* Is the page present? */
607 beq 2f /* Bail if not present */
608
609#ifdef CONFIG_PTE_64BIT
610 lwz r13, 0(r12)
611#endif
612 ori r11, r11, _PAGE_ACCESSED
613 stw r11, PTE_FLAGS_OFFSET(r12)
614
615 /* Jump to common tlb load */
616 b finish_tlb_load
6172:
618 /* The bailout. Restore registers to pre-exception conditions
619 * and call the heavyweights to help us out.
620 */
621 mfspr r11, SPRN_SPRG7R
622 mtcr r11
623 mfspr r13, SPRN_SPRG5R
624 mfspr r12, SPRN_SPRG4R
625 mfspr r11, SPRN_SPRG1
626 mfspr r10, SPRN_SPRG0
627 b data_access
628
629 /* Instruction TLB Error Interrupt */
630 /*
631 * Nearly the same as above, except we get our
632 * information from different registers and bailout
633 * to a different point.
634 */
635 START_EXCEPTION(InstructionTLBError)
636 mtspr SPRN_SPRG0, r10 /* Save some working registers */
637 mtspr SPRN_SPRG1, r11
638 mtspr SPRN_SPRG4W, r12
639 mtspr SPRN_SPRG5W, r13
640 mfcr r11
641 mtspr SPRN_SPRG7W, r11
642 mfspr r10, SPRN_SRR0 /* Get faulting address */
643
644 /* If we are faulting a kernel address, we have to use the
645 * kernel page tables.
646 */
647 lis r11, TASK_SIZE@h
648 ori r11, r11, TASK_SIZE@l
649 cmplw 5, r10, r11
650 blt 5, 3f
651 lis r11, swapper_pg_dir@h
652 ori r11, r11, swapper_pg_dir@l
653
654 mfspr r12,SPRN_MAS1 /* Set TID to 0 */
655 rlwinm r12,r12,0,16,1
656 mtspr SPRN_MAS1,r12
657
658 b 4f
659
660 /* Get the PGD for the current thread */
6613:
662 mfspr r11,SPRN_SPRG3
663 lwz r11,PGDIR(r11)
664
6654:
666 FIND_PTE
667 andi. r13, r11, _PAGE_PRESENT /* Is the page present? */
668 beq 2f /* Bail if not present */
669
670#ifdef CONFIG_PTE_64BIT
671 lwz r13, 0(r12)
672#endif
673 ori r11, r11, _PAGE_ACCESSED
674 stw r11, PTE_FLAGS_OFFSET(r12)
675
676 /* Jump to common TLB load point */
677 b finish_tlb_load
678
6792:
680 /* The bailout. Restore registers to pre-exception conditions
681 * and call the heavyweights to help us out.
682 */
683 mfspr r11, SPRN_SPRG7R
684 mtcr r11
685 mfspr r13, SPRN_SPRG5R
686 mfspr r12, SPRN_SPRG4R
687 mfspr r11, SPRN_SPRG1
688 mfspr r10, SPRN_SPRG0
689 b InstructionStorage
690
691#ifdef CONFIG_SPE
692 /* SPE Unavailable */
693 START_EXCEPTION(SPEUnavailable)
694 NORMAL_EXCEPTION_PROLOG
695 bne load_up_spe
696 addi r3,r1,STACK_FRAME_OVERHEAD
697 EXC_XFER_EE_LITE(0x2010, KernelSPE)
698#else
699 EXCEPTION(0x2020, SPEUnavailable, unknown_exception, EXC_XFER_EE)
700#endif /* CONFIG_SPE */
701
702 /* SPE Floating Point Data */
703#ifdef CONFIG_SPE
704 EXCEPTION(0x2030, SPEFloatingPointData, SPEFloatingPointException, EXC_XFER_EE);
705#else
706 EXCEPTION(0x2040, SPEFloatingPointData, unknown_exception, EXC_XFER_EE)
707#endif /* CONFIG_SPE */
708
709 /* SPE Floating Point Round */
710 EXCEPTION(0x2050, SPEFloatingPointRound, unknown_exception, EXC_XFER_EE)
711
712 /* Performance Monitor */
713 EXCEPTION(0x2060, PerformanceMonitor, performance_monitor_exception, EXC_XFER_STD)
714
715
716 /* Debug Interrupt */
717 DEBUG_EXCEPTION
718
719/*
720 * Local functions
721 */
722
723 /*
724 * Data TLB exceptions will bail out to this point
725 * if they can't resolve the lightweight TLB fault.
726 */
727data_access:
728 NORMAL_EXCEPTION_PROLOG
729 mfspr r5,SPRN_ESR /* Grab the ESR, save it, pass arg3 */
730 stw r5,_ESR(r11)
731 mfspr r4,SPRN_DEAR /* Grab the DEAR, save it, pass arg2 */
732 andis. r10,r5,(ESR_ILK|ESR_DLK)@h
733 bne 1f
734 EXC_XFER_EE_LITE(0x0300, handle_page_fault)
7351:
736 addi r3,r1,STACK_FRAME_OVERHEAD
737 EXC_XFER_EE_LITE(0x0300, CacheLockingException)
738
739/*
740
741 * Both the instruction and data TLB miss get to this
742 * point to load the TLB.
743 * r10 - EA of fault
744 * r11 - TLB (info from Linux PTE)
745 * r12, r13 - available to use
746 * CR5 - results of addr < TASK_SIZE
747 * MAS0, MAS1 - loaded with proper value when we get here
748 * MAS2, MAS3 - will need additional info from Linux PTE
749 * Upon exit, we reload everything and RFI.
750 */
751finish_tlb_load:
752 /*
753 * We set execute, because we don't have the granularity to
754 * properly set this at the page level (Linux problem).
755 * Many of these bits are software only. Bits we don't set
756 * here we (properly should) assume have the appropriate value.
757 */
758
759 mfspr r12, SPRN_MAS2
760#ifdef CONFIG_PTE_64BIT
761 rlwimi r12, r11, 26, 24, 31 /* extract ...WIMGE from pte */
762#else
763 rlwimi r12, r11, 26, 27, 31 /* extract WIMGE from pte */
764#endif
765 mtspr SPRN_MAS2, r12
766
767 bge 5, 1f
768
769 /* is user addr */
770 andi. r12, r11, (_PAGE_USER | _PAGE_HWWRITE | _PAGE_HWEXEC)
771 andi. r10, r11, _PAGE_USER /* Test for _PAGE_USER */
772 srwi r10, r12, 1
773 or r12, r12, r10 /* Copy user perms into supervisor */
774 iseleq r12, 0, r12
775 b 2f
776
777 /* is kernel addr */
7781: rlwinm r12, r11, 31, 29, 29 /* Extract _PAGE_HWWRITE into SW */
779 ori r12, r12, (MAS3_SX | MAS3_SR)
780
781#ifdef CONFIG_PTE_64BIT
7822: rlwimi r12, r13, 24, 0, 7 /* grab RPN[32:39] */
783 rlwimi r12, r11, 24, 8, 19 /* grab RPN[40:51] */
784 mtspr SPRN_MAS3, r12
785BEGIN_FTR_SECTION
786 srwi r10, r13, 8 /* grab RPN[8:31] */
787 mtspr SPRN_MAS7, r10
788END_FTR_SECTION_IFSET(CPU_FTR_BIG_PHYS)
789#else
7902: rlwimi r11, r12, 0, 20, 31 /* Extract RPN from PTE and merge with perms */
791 mtspr SPRN_MAS3, r11
792#endif
793#ifdef CONFIG_E200
794 /* Round robin TLB1 entries assignment */
795 mfspr r12, SPRN_MAS0
796
797 /* Extract TLB1CFG(NENTRY) */
798 mfspr r11, SPRN_TLB1CFG
799 andi. r11, r11, 0xfff
800
801 /* Extract MAS0(NV) */
802 andi. r13, r12, 0xfff
803 addi r13, r13, 1
804 cmpw 0, r13, r11
805 addi r12, r12, 1
806
807 /* check if we need to wrap */
808 blt 7f
809
810 /* wrap back to first free tlbcam entry */
811 lis r13, tlbcam_index@ha
812 lwz r13, tlbcam_index@l(r13)
813 rlwimi r12, r13, 0, 20, 31
8147:
815 mtspr SPRN_MAS0,r12
816#endif /* CONFIG_E200 */
817
818 tlbwe
819
820 /* Done...restore registers and get out of here. */
821 mfspr r11, SPRN_SPRG7R
822 mtcr r11
823 mfspr r13, SPRN_SPRG5R
824 mfspr r12, SPRN_SPRG4R
825 mfspr r11, SPRN_SPRG1
826 mfspr r10, SPRN_SPRG0
827 rfi /* Force context change */
828
829#ifdef CONFIG_SPE
830/* Note that the SPE support is closely modeled after the AltiVec
831 * support. Changes to one are likely to be applicable to the
832 * other! */
833load_up_spe:
834/*
835 * Disable SPE for the task which had SPE previously,
836 * and save its SPE registers in its thread_struct.
837 * Enables SPE for use in the kernel on return.
838 * On SMP we know the SPE units are free, since we give it up every
839 * switch. -- Kumar
840 */
841 mfmsr r5
842 oris r5,r5,MSR_SPE@h
843 mtmsr r5 /* enable use of SPE now */
844 isync
845/*
846 * For SMP, we don't do lazy SPE switching because it just gets too
847 * horrendously complex, especially when a task switches from one CPU
848 * to another. Instead we call giveup_spe in switch_to.
849 */
850#ifndef CONFIG_SMP
851 lis r3,last_task_used_spe@ha
852 lwz r4,last_task_used_spe@l(r3)
853 cmpi 0,r4,0
854 beq 1f
855 addi r4,r4,THREAD /* want THREAD of last_task_used_spe */
856 SAVE_32EVRS(0,r10,r4)
857 evxor evr10, evr10, evr10 /* clear out evr10 */
858 evmwumiaa evr10, evr10, evr10 /* evr10 <- ACC = 0 * 0 + ACC */
859 li r5,THREAD_ACC
860 evstddx evr10, r4, r5 /* save off accumulator */
861 lwz r5,PT_REGS(r4)
862 lwz r4,_MSR-STACK_FRAME_OVERHEAD(r5)
863 lis r10,MSR_SPE@h
864 andc r4,r4,r10 /* disable SPE for previous task */
865 stw r4,_MSR-STACK_FRAME_OVERHEAD(r5)
8661:
867#endif /* CONFIG_SMP */
868 /* enable use of SPE after return */
869 oris r9,r9,MSR_SPE@h
870 mfspr r5,SPRN_SPRG3 /* current task's THREAD (phys) */
871 li r4,1
872 li r10,THREAD_ACC
873 stw r4,THREAD_USED_SPE(r5)
874 evlddx evr4,r10,r5
875 evmra evr4,evr4
876 REST_32EVRS(0,r10,r5)
877#ifndef CONFIG_SMP
878 subi r4,r5,THREAD
879 stw r4,last_task_used_spe@l(r3)
880#endif /* CONFIG_SMP */
881 /* restore registers and return */
8822: REST_4GPRS(3, r11)
883 lwz r10,_CCR(r11)
884 REST_GPR(1, r11)
885 mtcr r10
886 lwz r10,_LINK(r11)
887 mtlr r10
888 REST_GPR(10, r11)
889 mtspr SPRN_SRR1,r9
890 mtspr SPRN_SRR0,r12
891 REST_GPR(9, r11)
892 REST_GPR(12, r11)
893 lwz r11,GPR11(r11)
894 SYNC
895 rfi
896
897/*
898 * SPE unavailable trap from kernel - print a message, but let
899 * the task use SPE in the kernel until it returns to user mode.
900 */
901KernelSPE:
902 lwz r3,_MSR(r1)
903 oris r3,r3,MSR_SPE@h
904 stw r3,_MSR(r1) /* enable use of SPE after return */
905 lis r3,87f@h
906 ori r3,r3,87f@l
907 mr r4,r2 /* current */
908 lwz r5,_NIP(r1)
909 bl printk
910 b ret_from_except
91187: .string "SPE used in kernel (task=%p, pc=%x) \n"
912 .align 4,0
913
914#endif /* CONFIG_SPE */
915
916/*
917 * Global functions
918 */
919
920/*
921 * extern void loadcam_entry(unsigned int index)
922 *
923 * Load TLBCAM[index] entry in to the L2 CAM MMU
924 */
925_GLOBAL(loadcam_entry)
926 lis r4,TLBCAM@ha
927 addi r4,r4,TLBCAM@l
928 mulli r5,r3,20
929 add r3,r5,r4
930 lwz r4,0(r3)
931 mtspr SPRN_MAS0,r4
932 lwz r4,4(r3)
933 mtspr SPRN_MAS1,r4
934 lwz r4,8(r3)
935 mtspr SPRN_MAS2,r4
936 lwz r4,12(r3)
937 mtspr SPRN_MAS3,r4
938 tlbwe
939 isync
940 blr
941
942/*
943 * extern void giveup_altivec(struct task_struct *prev)
944 *
945 * The e500 core does not have an AltiVec unit.
946 */
947_GLOBAL(giveup_altivec)
948 blr
949
950#ifdef CONFIG_SPE
951/*
952 * extern void giveup_spe(struct task_struct *prev)
953 *
954 */
955_GLOBAL(giveup_spe)
956 mfmsr r5
957 oris r5,r5,MSR_SPE@h
958 SYNC
959 mtmsr r5 /* enable use of SPE now */
960 isync
961 cmpi 0,r3,0
962 beqlr- /* if no previous owner, done */
963 addi r3,r3,THREAD /* want THREAD of task */
964 lwz r5,PT_REGS(r3)
965 cmpi 0,r5,0
966 SAVE_32EVRS(0, r4, r3)
967 evxor evr6, evr6, evr6 /* clear out evr6 */
968 evmwumiaa evr6, evr6, evr6 /* evr6 <- ACC = 0 * 0 + ACC */
969 li r4,THREAD_ACC
970 evstddx evr6, r4, r3 /* save off accumulator */
971 mfspr r6,SPRN_SPEFSCR
972 stw r6,THREAD_SPEFSCR(r3) /* save spefscr register value */
973 beq 1f
974 lwz r4,_MSR-STACK_FRAME_OVERHEAD(r5)
975 lis r3,MSR_SPE@h
976 andc r4,r4,r3 /* disable SPE for previous task */
977 stw r4,_MSR-STACK_FRAME_OVERHEAD(r5)
9781:
979#ifndef CONFIG_SMP
980 li r5,0
981 lis r4,last_task_used_spe@ha
982 stw r5,last_task_used_spe@l(r4)
983#endif /* CONFIG_SMP */
984 blr
985#endif /* CONFIG_SPE */
986
987/*
988 * extern void giveup_fpu(struct task_struct *prev)
989 *
990 * Not all FSL Book-E cores have an FPU
991 */
992#ifndef CONFIG_PPC_FPU
993_GLOBAL(giveup_fpu)
994 blr
995#endif
996
997/*
998 * extern void abort(void)
999 *
1000 * At present, this routine just applies a system reset.
1001 */
1002_GLOBAL(abort)
1003 li r13,0
1004 mtspr SPRN_DBCR0,r13 /* disable all debug events */
1005 mfmsr r13
1006 ori r13,r13,MSR_DE@l /* Enable Debug Events */
1007 mtmsr r13
1008 mfspr r13,SPRN_DBCR0
1009 lis r13,(DBCR0_IDM|DBCR0_RST_CHIP)@h
1010 mtspr SPRN_DBCR0,r13
1011
1012_GLOBAL(set_context)
1013
1014#ifdef CONFIG_BDI_SWITCH
1015 /* Context switch the PTE pointer for the Abatron BDI2000.
1016 * The PGDIR is the second parameter.
1017 */
1018 lis r5, abatron_pteptrs@h
1019 ori r5, r5, abatron_pteptrs@l
1020 stw r4, 0x4(r5)
1021#endif
1022 mtspr SPRN_PID,r3
1023 isync /* Force context change */
1024 blr
1025
1026/*
1027 * We put a few things here that have to be page-aligned. This stuff
1028 * goes at the beginning of the data segment, which is page-aligned.
1029 */
1030 .data
1031 .align 12
1032 .globl sdata
1033sdata:
1034 .globl empty_zero_page
1035empty_zero_page:
1036 .space 4096
1037 .globl swapper_pg_dir
1038swapper_pg_dir:
1039 .space 4096
1040
1041/* Reserved 4k for the critical exception stack & 4k for the machine
1042 * check stack per CPU for kernel mode exceptions */
1043 .section .bss
1044 .align 12
1045exception_stack_bottom:
1046 .space BOOKE_EXCEPTION_STACK_SIZE * NR_CPUS
1047 .globl exception_stack_top
1048exception_stack_top:
1049
1050/*
1051 * This space gets a copy of optional info passed to us by the bootstrap
1052 * which is used to pass parameters into the kernel like root=/dev/sda1, etc.
1053 */
1054 .globl cmd_line
1055cmd_line:
1056 .space 512
1057
1058/*
1059 * Room for two PTE pointers, usually the kernel and current user pointers
1060 * to their respective root page table.
1061 */
1062abatron_pteptrs:
1063 .space 8
diff --git a/arch/powerpc/kernel/idle_6xx.S b/arch/powerpc/kernel/idle_6xx.S
new file mode 100644
index 000000000000..444fdcc769f1
--- /dev/null
+++ b/arch/powerpc/kernel/idle_6xx.S
@@ -0,0 +1,233 @@
1/*
2 * This file contains the power_save function for 6xx & 7xxx CPUs
3 * rewritten in assembler
4 *
5 * Warning ! This code assumes that if your machine has a 750fx
6 * it will have PLL 1 set to low speed mode (used during NAP/DOZE).
7 * if this is not the case some additional changes will have to
8 * be done to check a runtime var (a bit like powersave-nap)
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version
13 * 2 of the License, or (at your option) any later version.
14 */
15
16#include <linux/config.h>
17#include <linux/threads.h>
18#include <asm/reg.h>
19#include <asm/page.h>
20#include <asm/cputable.h>
21#include <asm/thread_info.h>
22#include <asm/ppc_asm.h>
23#include <asm/asm-offsets.h>
24
25#undef DEBUG
26
27 .text
28
29/*
30 * Init idle, called at early CPU setup time from head.S for each CPU
31 * Make sure no rest of NAP mode remains in HID0, save default
32 * values for some CPU specific registers. Called with r24
33 * containing CPU number and r3 reloc offset
34 */
35_GLOBAL(init_idle_6xx)
36BEGIN_FTR_SECTION
37 mfspr r4,SPRN_HID0
38 rlwinm r4,r4,0,10,8 /* Clear NAP */
39 mtspr SPRN_HID0, r4
40 b 1f
41END_FTR_SECTION_IFSET(CPU_FTR_CAN_NAP)
42 blr
431:
44 slwi r5,r24,2
45 add r5,r5,r3
46BEGIN_FTR_SECTION
47 mfspr r4,SPRN_MSSCR0
48 addis r6,r5, nap_save_msscr0@ha
49 stw r4,nap_save_msscr0@l(r6)
50END_FTR_SECTION_IFSET(CPU_FTR_NAP_DISABLE_L2_PR)
51BEGIN_FTR_SECTION
52 mfspr r4,SPRN_HID1
53 addis r6,r5,nap_save_hid1@ha
54 stw r4,nap_save_hid1@l(r6)
55END_FTR_SECTION_IFSET(CPU_FTR_DUAL_PLL_750FX)
56 blr
57
58/*
59 * Here is the power_save_6xx function. This could eventually be
60 * split into several functions & changing the function pointer
61 * depending on the various features.
62 */
63_GLOBAL(ppc6xx_idle)
64 /* Check if we can nap or doze, put HID0 mask in r3
65 */
66 lis r3, 0
67BEGIN_FTR_SECTION
68 lis r3,HID0_DOZE@h
69END_FTR_SECTION_IFSET(CPU_FTR_CAN_DOZE)
70BEGIN_FTR_SECTION
71 /* We must dynamically check for the NAP feature as it
72 * can be cleared by CPU init after the fixups are done
73 */
74 lis r4,cur_cpu_spec@ha
75 lwz r4,cur_cpu_spec@l(r4)
76 lwz r4,CPU_SPEC_FEATURES(r4)
77 andi. r0,r4,CPU_FTR_CAN_NAP
78 beq 1f
79 /* Now check if user or arch enabled NAP mode */
80 lis r4,powersave_nap@ha
81 lwz r4,powersave_nap@l(r4)
82 cmpwi 0,r4,0
83 beq 1f
84 lis r3,HID0_NAP@h
851:
86END_FTR_SECTION_IFSET(CPU_FTR_CAN_NAP)
87 cmpwi 0,r3,0
88 beqlr
89
90 /* Clear MSR:EE */
91 mfmsr r7
92 rlwinm r0,r7,0,17,15
93 mtmsr r0
94
95 /* Check current_thread_info()->flags */
96 rlwinm r4,r1,0,0,18
97 lwz r4,TI_FLAGS(r4)
98 andi. r0,r4,_TIF_NEED_RESCHED
99 beq 1f
100 mtmsr r7 /* out of line this ? */
101 blr
1021:
103 /* Some pre-nap cleanups needed on some CPUs */
104 andis. r0,r3,HID0_NAP@h
105 beq 2f
106BEGIN_FTR_SECTION
107 /* Disable L2 prefetch on some 745x and try to ensure
108 * L2 prefetch engines are idle. As explained by errata
109 * text, we can't be sure they are, we just hope very hard
110 * that well be enough (sic !). At least I noticed Apple
111 * doesn't even bother doing the dcbf's here...
112 */
113 mfspr r4,SPRN_MSSCR0
114 rlwinm r4,r4,0,0,29
115 sync
116 mtspr SPRN_MSSCR0,r4
117 sync
118 isync
119 lis r4,KERNELBASE@h
120 dcbf 0,r4
121 dcbf 0,r4
122 dcbf 0,r4
123 dcbf 0,r4
124END_FTR_SECTION_IFSET(CPU_FTR_NAP_DISABLE_L2_PR)
125#ifdef DEBUG
126 lis r6,nap_enter_count@ha
127 lwz r4,nap_enter_count@l(r6)
128 addi r4,r4,1
129 stw r4,nap_enter_count@l(r6)
130#endif
1312:
132BEGIN_FTR_SECTION
133 /* Go to low speed mode on some 750FX */
134 lis r4,powersave_lowspeed@ha
135 lwz r4,powersave_lowspeed@l(r4)
136 cmpwi 0,r4,0
137 beq 1f
138 mfspr r4,SPRN_HID1
139 oris r4,r4,0x0001
140 mtspr SPRN_HID1,r4
1411:
142END_FTR_SECTION_IFSET(CPU_FTR_DUAL_PLL_750FX)
143
144 /* Go to NAP or DOZE now */
145 mfspr r4,SPRN_HID0
146 lis r5,(HID0_NAP|HID0_SLEEP)@h
147BEGIN_FTR_SECTION
148 oris r5,r5,HID0_DOZE@h
149END_FTR_SECTION_IFSET(CPU_FTR_CAN_DOZE)
150 andc r4,r4,r5
151 or r4,r4,r3
152BEGIN_FTR_SECTION
153 oris r4,r4,HID0_DPM@h /* that should be done once for all */
154END_FTR_SECTION_IFCLR(CPU_FTR_NO_DPM)
155 mtspr SPRN_HID0,r4
156BEGIN_FTR_SECTION
157 DSSALL
158 sync
159END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
160 ori r7,r7,MSR_EE /* Could be ommited (already set) */
161 oris r7,r7,MSR_POW@h
162 sync
163 isync
164 mtmsr r7
165 isync
166 sync
167 blr
168
169/*
170 * Return from NAP/DOZE mode, restore some CPU specific registers,
171 * we are called with DR/IR still off and r2 containing physical
172 * address of current.
173 */
174_GLOBAL(power_save_6xx_restore)
175 mfspr r11,SPRN_HID0
176 rlwinm. r11,r11,0,10,8 /* Clear NAP & copy NAP bit !state to cr1 EQ */
177 cror 4*cr1+eq,4*cr0+eq,4*cr0+eq
178BEGIN_FTR_SECTION
179 rlwinm r11,r11,0,9,7 /* Clear DOZE */
180END_FTR_SECTION_IFSET(CPU_FTR_CAN_DOZE)
181 mtspr SPRN_HID0, r11
182
183#ifdef DEBUG
184 beq cr1,1f
185 lis r11,(nap_return_count-KERNELBASE)@ha
186 lwz r9,nap_return_count@l(r11)
187 addi r9,r9,1
188 stw r9,nap_return_count@l(r11)
1891:
190#endif
191
192 rlwinm r9,r1,0,0,18
193 tophys(r9,r9)
194 lwz r11,TI_CPU(r9)
195 slwi r11,r11,2
196 /* Todo make sure all these are in the same page
197 * and load r22 (@ha part + CPU offset) only once
198 */
199BEGIN_FTR_SECTION
200 beq cr1,1f
201 addis r9,r11,(nap_save_msscr0-KERNELBASE)@ha
202 lwz r9,nap_save_msscr0@l(r9)
203 mtspr SPRN_MSSCR0, r9
204 sync
205 isync
2061:
207END_FTR_SECTION_IFSET(CPU_FTR_NAP_DISABLE_L2_PR)
208BEGIN_FTR_SECTION
209 addis r9,r11,(nap_save_hid1-KERNELBASE)@ha
210 lwz r9,nap_save_hid1@l(r9)
211 mtspr SPRN_HID1, r9
212END_FTR_SECTION_IFSET(CPU_FTR_DUAL_PLL_750FX)
213 b transfer_to_handler_cont
214
215 .data
216
217_GLOBAL(nap_save_msscr0)
218 .space 4*NR_CPUS
219
220_GLOBAL(nap_save_hid1)
221 .space 4*NR_CPUS
222
223_GLOBAL(powersave_nap)
224 .long 0
225_GLOBAL(powersave_lowspeed)
226 .long 0
227
228#ifdef DEBUG
229_GLOBAL(nap_enter_count)
230 .space 4
231_GLOBAL(nap_return_count)
232 .space 4
233#endif
diff --git a/arch/ppc64/kernel/idle_power4.S b/arch/powerpc/kernel/idle_power4.S
index ca02afe2a795..1494e2f177f7 100644
--- a/arch/ppc64/kernel/idle_power4.S
+++ b/arch/powerpc/kernel/idle_power4.S
@@ -39,13 +39,13 @@ END_FTR_SECTION_IFCLR(CPU_FTR_CAN_NAP)
39 * can be cleared by CPU init after the fixups are done 39 * can be cleared by CPU init after the fixups are done
40 */ 40 */
41 LOADBASE(r3,cur_cpu_spec) 41 LOADBASE(r3,cur_cpu_spec)
42 ld r4,cur_cpu_spec@l(r3) 42 ld r4,OFF(cur_cpu_spec)(r3)
43 ld r4,CPU_SPEC_FEATURES(r4) 43 ld r4,CPU_SPEC_FEATURES(r4)
44 andi. r0,r4,CPU_FTR_CAN_NAP 44 andi. r0,r4,CPU_FTR_CAN_NAP
45 beqlr 45 beqlr
46 /* Now check if user or arch enabled NAP mode */ 46 /* Now check if user or arch enabled NAP mode */
47 LOADBASE(r3,powersave_nap) 47 LOADBASE(r3,powersave_nap)
48 lwz r4,powersave_nap@l(r3) 48 lwz r4,OFF(powersave_nap)(r3)
49 cmpwi 0,r4,0 49 cmpwi 0,r4,0
50 beqlr 50 beqlr
51 51
@@ -63,8 +63,8 @@ END_FTR_SECTION_IFCLR(CPU_FTR_CAN_NAP)
63 beq 1f 63 beq 1f
64 mtmsrd r7 /* out of line this ? */ 64 mtmsrd r7 /* out of line this ? */
65 blr 65 blr
661: 661:
67 /* Go to NAP now */ 67 /* Go to NAP now */
68BEGIN_FTR_SECTION 68BEGIN_FTR_SECTION
69 DSSALL 69 DSSALL
70 sync 70 sync
@@ -76,4 +76,3 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
76 isync 76 isync
77 sync 77 sync
78 blr 78 blr
79
diff --git a/arch/ppc64/kernel/init_task.c b/arch/powerpc/kernel/init_task.c
index 941043ae040f..941043ae040f 100644
--- a/arch/ppc64/kernel/init_task.c
+++ b/arch/powerpc/kernel/init_task.c
diff --git a/arch/ppc64/kernel/lparmap.c b/arch/powerpc/kernel/lparmap.c
index b81de286df5e..b81de286df5e 100644
--- a/arch/ppc64/kernel/lparmap.c
+++ b/arch/powerpc/kernel/lparmap.c
diff --git a/arch/powerpc/kernel/misc_32.S b/arch/powerpc/kernel/misc_32.S
new file mode 100644
index 000000000000..3bedb532aed9
--- /dev/null
+++ b/arch/powerpc/kernel/misc_32.S
@@ -0,0 +1,1037 @@
1/*
2 * This file contains miscellaneous low-level functions.
3 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
4 *
5 * Largely rewritten by Cort Dougan (cort@cs.nmt.edu)
6 * and Paul Mackerras.
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
12 *
13 */
14
15#include <linux/config.h>
16#include <linux/sys.h>
17#include <asm/unistd.h>
18#include <asm/errno.h>
19#include <asm/reg.h>
20#include <asm/page.h>
21#include <asm/cache.h>
22#include <asm/cputable.h>
23#include <asm/mmu.h>
24#include <asm/ppc_asm.h>
25#include <asm/thread_info.h>
26#include <asm/asm-offsets.h>
27
28 .text
29
30 .align 5
31_GLOBAL(__delay)
32 cmpwi 0,r3,0
33 mtctr r3
34 beqlr
351: bdnz 1b
36 blr
37
38/*
39 * This returns the high 64 bits of the product of two 64-bit numbers.
40 */
41_GLOBAL(mulhdu)
42 cmpwi r6,0
43 cmpwi cr1,r3,0
44 mr r10,r4
45 mulhwu r4,r4,r5
46 beq 1f
47 mulhwu r0,r10,r6
48 mullw r7,r10,r5
49 addc r7,r0,r7
50 addze r4,r4
511: beqlr cr1 /* all done if high part of A is 0 */
52 mr r10,r3
53 mullw r9,r3,r5
54 mulhwu r3,r3,r5
55 beq 2f
56 mullw r0,r10,r6
57 mulhwu r8,r10,r6
58 addc r7,r0,r7
59 adde r4,r4,r8
60 addze r3,r3
612: addc r4,r4,r9
62 addze r3,r3
63 blr
64
65/*
66 * Returns (address we're running at) - (address we were linked at)
67 * for use before the text and data are mapped to KERNELBASE.
68 */
69_GLOBAL(reloc_offset)
70 mflr r0
71 bl 1f
721: mflr r3
73 LOADADDR(r4,1b)
74 subf r3,r4,r3
75 mtlr r0
76 blr
77
78/*
79 * add_reloc_offset(x) returns x + reloc_offset().
80 */
81_GLOBAL(add_reloc_offset)
82 mflr r0
83 bl 1f
841: mflr r5
85 LOADADDR(r4,1b)
86 subf r5,r4,r5
87 add r3,r3,r5
88 mtlr r0
89 blr
90
91/*
92 * sub_reloc_offset(x) returns x - reloc_offset().
93 */
94_GLOBAL(sub_reloc_offset)
95 mflr r0
96 bl 1f
971: mflr r5
98 lis r4,1b@ha
99 addi r4,r4,1b@l
100 subf r5,r4,r5
101 subf r3,r5,r3
102 mtlr r0
103 blr
104
105/*
106 * reloc_got2 runs through the .got2 section adding an offset
107 * to each entry.
108 */
109_GLOBAL(reloc_got2)
110 mflr r11
111 lis r7,__got2_start@ha
112 addi r7,r7,__got2_start@l
113 lis r8,__got2_end@ha
114 addi r8,r8,__got2_end@l
115 subf r8,r7,r8
116 srwi. r8,r8,2
117 beqlr
118 mtctr r8
119 bl 1f
1201: mflr r0
121 lis r4,1b@ha
122 addi r4,r4,1b@l
123 subf r0,r4,r0
124 add r7,r0,r7
1252: lwz r0,0(r7)
126 add r0,r0,r3
127 stw r0,0(r7)
128 addi r7,r7,4
129 bdnz 2b
130 mtlr r11
131 blr
132
133/*
134 * identify_cpu,
135 * called with r3 = data offset and r4 = CPU number
136 * doesn't change r3
137 */
138_GLOBAL(identify_cpu)
139 addis r8,r3,cpu_specs@ha
140 addi r8,r8,cpu_specs@l
141 mfpvr r7
1421:
143 lwz r5,CPU_SPEC_PVR_MASK(r8)
144 and r5,r5,r7
145 lwz r6,CPU_SPEC_PVR_VALUE(r8)
146 cmplw 0,r6,r5
147 beq 1f
148 addi r8,r8,CPU_SPEC_ENTRY_SIZE
149 b 1b
1501:
151 addis r6,r3,cur_cpu_spec@ha
152 addi r6,r6,cur_cpu_spec@l
153 sub r8,r8,r3
154 stw r8,0(r6)
155 blr
156
157/*
158 * do_cpu_ftr_fixups - goes through the list of CPU feature fixups
159 * and writes nop's over sections of code that don't apply for this cpu.
160 * r3 = data offset (not changed)
161 */
162_GLOBAL(do_cpu_ftr_fixups)
163 /* Get CPU 0 features */
164 addis r6,r3,cur_cpu_spec@ha
165 addi r6,r6,cur_cpu_spec@l
166 lwz r4,0(r6)
167 add r4,r4,r3
168 lwz r4,CPU_SPEC_FEATURES(r4)
169
170 /* Get the fixup table */
171 addis r6,r3,__start___ftr_fixup@ha
172 addi r6,r6,__start___ftr_fixup@l
173 addis r7,r3,__stop___ftr_fixup@ha
174 addi r7,r7,__stop___ftr_fixup@l
175
176 /* Do the fixup */
1771: cmplw 0,r6,r7
178 bgelr
179 addi r6,r6,16
180 lwz r8,-16(r6) /* mask */
181 and r8,r8,r4
182 lwz r9,-12(r6) /* value */
183 cmplw 0,r8,r9
184 beq 1b
185 lwz r8,-8(r6) /* section begin */
186 lwz r9,-4(r6) /* section end */
187 subf. r9,r8,r9
188 beq 1b
189 /* write nops over the section of code */
190 /* todo: if large section, add a branch at the start of it */
191 srwi r9,r9,2
192 mtctr r9
193 add r8,r8,r3
194 lis r0,0x60000000@h /* nop */
1953: stw r0,0(r8)
196 andi. r10,r4,CPU_FTR_SPLIT_ID_CACHE@l
197 beq 2f
198 dcbst 0,r8 /* suboptimal, but simpler */
199 sync
200 icbi 0,r8
2012: addi r8,r8,4
202 bdnz 3b
203 sync /* additional sync needed on g4 */
204 isync
205 b 1b
206
207/*
208 * call_setup_cpu - call the setup_cpu function for this cpu
209 * r3 = data offset, r24 = cpu number
210 *
211 * Setup function is called with:
212 * r3 = data offset
213 * r4 = ptr to CPU spec (relocated)
214 */
215_GLOBAL(call_setup_cpu)
216 addis r4,r3,cur_cpu_spec@ha
217 addi r4,r4,cur_cpu_spec@l
218 lwz r4,0(r4)
219 add r4,r4,r3
220 lwz r5,CPU_SPEC_SETUP(r4)
221 cmpi 0,r5,0
222 add r5,r5,r3
223 beqlr
224 mtctr r5
225 bctr
226
227#if defined(CONFIG_CPU_FREQ_PMAC) && defined(CONFIG_6xx)
228
229/* This gets called by via-pmu.c to switch the PLL selection
230 * on 750fx CPU. This function should really be moved to some
231 * other place (as most of the cpufreq code in via-pmu
232 */
233_GLOBAL(low_choose_750fx_pll)
234 /* Clear MSR:EE */
235 mfmsr r7
236 rlwinm r0,r7,0,17,15
237 mtmsr r0
238
239 /* If switching to PLL1, disable HID0:BTIC */
240 cmplwi cr0,r3,0
241 beq 1f
242 mfspr r5,SPRN_HID0
243 rlwinm r5,r5,0,27,25
244 sync
245 mtspr SPRN_HID0,r5
246 isync
247 sync
248
2491:
250 /* Calc new HID1 value */
251 mfspr r4,SPRN_HID1 /* Build a HID1:PS bit from parameter */
252 rlwinm r5,r3,16,15,15 /* Clear out HID1:PS from value read */
253 rlwinm r4,r4,0,16,14 /* Could have I used rlwimi here ? */
254 or r4,r4,r5
255 mtspr SPRN_HID1,r4
256
257 /* Store new HID1 image */
258 rlwinm r6,r1,0,0,18
259 lwz r6,TI_CPU(r6)
260 slwi r6,r6,2
261 addis r6,r6,nap_save_hid1@ha
262 stw r4,nap_save_hid1@l(r6)
263
264 /* If switching to PLL0, enable HID0:BTIC */
265 cmplwi cr0,r3,0
266 bne 1f
267 mfspr r5,SPRN_HID0
268 ori r5,r5,HID0_BTIC
269 sync
270 mtspr SPRN_HID0,r5
271 isync
272 sync
273
2741:
275 /* Return */
276 mtmsr r7
277 blr
278
279_GLOBAL(low_choose_7447a_dfs)
280 /* Clear MSR:EE */
281 mfmsr r7
282 rlwinm r0,r7,0,17,15
283 mtmsr r0
284
285 /* Calc new HID1 value */
286 mfspr r4,SPRN_HID1
287 insrwi r4,r3,1,9 /* insert parameter into bit 9 */
288 sync
289 mtspr SPRN_HID1,r4
290 sync
291 isync
292
293 /* Return */
294 mtmsr r7
295 blr
296
297#endif /* CONFIG_CPU_FREQ_PMAC && CONFIG_6xx */
298
299/*
300 * complement mask on the msr then "or" some values on.
301 * _nmask_and_or_msr(nmask, value_to_or)
302 */
303_GLOBAL(_nmask_and_or_msr)
304 mfmsr r0 /* Get current msr */
305 andc r0,r0,r3 /* And off the bits set in r3 (first parm) */
306 or r0,r0,r4 /* Or on the bits in r4 (second parm) */
307 SYNC /* Some chip revs have problems here... */
308 mtmsr r0 /* Update machine state */
309 isync
310 blr /* Done */
311
312
313/*
314 * Flush MMU TLB
315 */
316_GLOBAL(_tlbia)
317#if defined(CONFIG_40x)
318 sync /* Flush to memory before changing mapping */
319 tlbia
320 isync /* Flush shadow TLB */
321#elif defined(CONFIG_44x)
322 li r3,0
323 sync
324
325 /* Load high watermark */
326 lis r4,tlb_44x_hwater@ha
327 lwz r5,tlb_44x_hwater@l(r4)
328
3291: tlbwe r3,r3,PPC44x_TLB_PAGEID
330 addi r3,r3,1
331 cmpw 0,r3,r5
332 ble 1b
333
334 isync
335#elif defined(CONFIG_FSL_BOOKE)
336 /* Invalidate all entries in TLB0 */
337 li r3, 0x04
338 tlbivax 0,3
339 /* Invalidate all entries in TLB1 */
340 li r3, 0x0c
341 tlbivax 0,3
342 /* Invalidate all entries in TLB2 */
343 li r3, 0x14
344 tlbivax 0,3
345 /* Invalidate all entries in TLB3 */
346 li r3, 0x1c
347 tlbivax 0,3
348 msync
349#ifdef CONFIG_SMP
350 tlbsync
351#endif /* CONFIG_SMP */
352#else /* !(CONFIG_40x || CONFIG_44x || CONFIG_FSL_BOOKE) */
353#if defined(CONFIG_SMP)
354 rlwinm r8,r1,0,0,18
355 lwz r8,TI_CPU(r8)
356 oris r8,r8,10
357 mfmsr r10
358 SYNC
359 rlwinm r0,r10,0,17,15 /* clear bit 16 (MSR_EE) */
360 rlwinm r0,r0,0,28,26 /* clear DR */
361 mtmsr r0
362 SYNC_601
363 isync
364 lis r9,mmu_hash_lock@h
365 ori r9,r9,mmu_hash_lock@l
366 tophys(r9,r9)
36710: lwarx r7,0,r9
368 cmpwi 0,r7,0
369 bne- 10b
370 stwcx. r8,0,r9
371 bne- 10b
372 sync
373 tlbia
374 sync
375 TLBSYNC
376 li r0,0
377 stw r0,0(r9) /* clear mmu_hash_lock */
378 mtmsr r10
379 SYNC_601
380 isync
381#else /* CONFIG_SMP */
382 sync
383 tlbia
384 sync
385#endif /* CONFIG_SMP */
386#endif /* ! defined(CONFIG_40x) */
387 blr
388
389/*
390 * Flush MMU TLB for a particular address
391 */
392_GLOBAL(_tlbie)
393#if defined(CONFIG_40x)
394 tlbsx. r3, 0, r3
395 bne 10f
396 sync
397 /* There are only 64 TLB entries, so r3 < 64, which means bit 25 is clear.
398 * Since 25 is the V bit in the TLB_TAG, loading this value will invalidate
399 * the TLB entry. */
400 tlbwe r3, r3, TLB_TAG
401 isync
40210:
403#elif defined(CONFIG_44x)
404 mfspr r4,SPRN_MMUCR
405 mfspr r5,SPRN_PID /* Get PID */
406 rlwimi r4,r5,0,24,31 /* Set TID */
407 mtspr SPRN_MMUCR,r4
408
409 tlbsx. r3, 0, r3
410 bne 10f
411 sync
412 /* There are only 64 TLB entries, so r3 < 64,
413 * which means bit 22, is clear. Since 22 is
414 * the V bit in the TLB_PAGEID, loading this
415 * value will invalidate the TLB entry.
416 */
417 tlbwe r3, r3, PPC44x_TLB_PAGEID
418 isync
41910:
420#elif defined(CONFIG_FSL_BOOKE)
421 rlwinm r4, r3, 0, 0, 19
422 ori r5, r4, 0x08 /* TLBSEL = 1 */
423 ori r6, r4, 0x10 /* TLBSEL = 2 */
424 ori r7, r4, 0x18 /* TLBSEL = 3 */
425 tlbivax 0, r4
426 tlbivax 0, r5
427 tlbivax 0, r6
428 tlbivax 0, r7
429 msync
430#if defined(CONFIG_SMP)
431 tlbsync
432#endif /* CONFIG_SMP */
433#else /* !(CONFIG_40x || CONFIG_44x || CONFIG_FSL_BOOKE) */
434#if defined(CONFIG_SMP)
435 rlwinm r8,r1,0,0,18
436 lwz r8,TI_CPU(r8)
437 oris r8,r8,11
438 mfmsr r10
439 SYNC
440 rlwinm r0,r10,0,17,15 /* clear bit 16 (MSR_EE) */
441 rlwinm r0,r0,0,28,26 /* clear DR */
442 mtmsr r0
443 SYNC_601
444 isync
445 lis r9,mmu_hash_lock@h
446 ori r9,r9,mmu_hash_lock@l
447 tophys(r9,r9)
44810: lwarx r7,0,r9
449 cmpwi 0,r7,0
450 bne- 10b
451 stwcx. r8,0,r9
452 bne- 10b
453 eieio
454 tlbie r3
455 sync
456 TLBSYNC
457 li r0,0
458 stw r0,0(r9) /* clear mmu_hash_lock */
459 mtmsr r10
460 SYNC_601
461 isync
462#else /* CONFIG_SMP */
463 tlbie r3
464 sync
465#endif /* CONFIG_SMP */
466#endif /* ! CONFIG_40x */
467 blr
468
469/*
470 * Flush instruction cache.
471 * This is a no-op on the 601.
472 */
473_GLOBAL(flush_instruction_cache)
474#if defined(CONFIG_8xx)
475 isync
476 lis r5, IDC_INVALL@h
477 mtspr SPRN_IC_CST, r5
478#elif defined(CONFIG_4xx)
479#ifdef CONFIG_403GCX
480 li r3, 512
481 mtctr r3
482 lis r4, KERNELBASE@h
4831: iccci 0, r4
484 addi r4, r4, 16
485 bdnz 1b
486#else
487 lis r3, KERNELBASE@h
488 iccci 0,r3
489#endif
490#elif CONFIG_FSL_BOOKE
491BEGIN_FTR_SECTION
492 mfspr r3,SPRN_L1CSR0
493 ori r3,r3,L1CSR0_CFI|L1CSR0_CLFC
494 /* msync; isync recommended here */
495 mtspr SPRN_L1CSR0,r3
496 isync
497 blr
498END_FTR_SECTION_IFCLR(CPU_FTR_SPLIT_ID_CACHE)
499 mfspr r3,SPRN_L1CSR1
500 ori r3,r3,L1CSR1_ICFI|L1CSR1_ICLFR
501 mtspr SPRN_L1CSR1,r3
502#else
503 mfspr r3,SPRN_PVR
504 rlwinm r3,r3,16,16,31
505 cmpwi 0,r3,1
506 beqlr /* for 601, do nothing */
507 /* 603/604 processor - use invalidate-all bit in HID0 */
508 mfspr r3,SPRN_HID0
509 ori r3,r3,HID0_ICFI
510 mtspr SPRN_HID0,r3
511#endif /* CONFIG_8xx/4xx */
512 isync
513 blr
514
515/*
516 * Write any modified data cache blocks out to memory
517 * and invalidate the corresponding instruction cache blocks.
518 * This is a no-op on the 601.
519 *
520 * flush_icache_range(unsigned long start, unsigned long stop)
521 */
522_GLOBAL(flush_icache_range)
523BEGIN_FTR_SECTION
524 blr /* for 601, do nothing */
525END_FTR_SECTION_IFCLR(CPU_FTR_SPLIT_ID_CACHE)
526 li r5,L1_CACHE_BYTES-1
527 andc r3,r3,r5
528 subf r4,r3,r4
529 add r4,r4,r5
530 srwi. r4,r4,L1_CACHE_SHIFT
531 beqlr
532 mtctr r4
533 mr r6,r3
5341: dcbst 0,r3
535 addi r3,r3,L1_CACHE_BYTES
536 bdnz 1b
537 sync /* wait for dcbst's to get to ram */
538 mtctr r4
5392: icbi 0,r6
540 addi r6,r6,L1_CACHE_BYTES
541 bdnz 2b
542 sync /* additional sync needed on g4 */
543 isync
544 blr
545/*
546 * Write any modified data cache blocks out to memory.
547 * Does not invalidate the corresponding cache lines (especially for
548 * any corresponding instruction cache).
549 *
550 * clean_dcache_range(unsigned long start, unsigned long stop)
551 */
552_GLOBAL(clean_dcache_range)
553 li r5,L1_CACHE_BYTES-1
554 andc r3,r3,r5
555 subf r4,r3,r4
556 add r4,r4,r5
557 srwi. r4,r4,L1_CACHE_SHIFT
558 beqlr
559 mtctr r4
560
5611: dcbst 0,r3
562 addi r3,r3,L1_CACHE_BYTES
563 bdnz 1b
564 sync /* wait for dcbst's to get to ram */
565 blr
566
567/*
568 * Write any modified data cache blocks out to memory and invalidate them.
569 * Does not invalidate the corresponding instruction cache blocks.
570 *
571 * flush_dcache_range(unsigned long start, unsigned long stop)
572 */
573_GLOBAL(flush_dcache_range)
574 li r5,L1_CACHE_BYTES-1
575 andc r3,r3,r5
576 subf r4,r3,r4
577 add r4,r4,r5
578 srwi. r4,r4,L1_CACHE_SHIFT
579 beqlr
580 mtctr r4
581
5821: dcbf 0,r3
583 addi r3,r3,L1_CACHE_BYTES
584 bdnz 1b
585 sync /* wait for dcbst's to get to ram */
586 blr
587
588/*
589 * Like above, but invalidate the D-cache. This is used by the 8xx
590 * to invalidate the cache so the PPC core doesn't get stale data
591 * from the CPM (no cache snooping here :-).
592 *
593 * invalidate_dcache_range(unsigned long start, unsigned long stop)
594 */
595_GLOBAL(invalidate_dcache_range)
596 li r5,L1_CACHE_BYTES-1
597 andc r3,r3,r5
598 subf r4,r3,r4
599 add r4,r4,r5
600 srwi. r4,r4,L1_CACHE_SHIFT
601 beqlr
602 mtctr r4
603
6041: dcbi 0,r3
605 addi r3,r3,L1_CACHE_BYTES
606 bdnz 1b
607 sync /* wait for dcbi's to get to ram */
608 blr
609
610#ifdef CONFIG_NOT_COHERENT_CACHE
611/*
612 * 40x cores have 8K or 16K dcache and 32 byte line size.
613 * 44x has a 32K dcache and 32 byte line size.
614 * 8xx has 1, 2, 4, 8K variants.
615 * For now, cover the worst case of the 44x.
616 * Must be called with external interrupts disabled.
617 */
618#define CACHE_NWAYS 64
619#define CACHE_NLINES 16
620
621_GLOBAL(flush_dcache_all)
622 li r4, (2 * CACHE_NWAYS * CACHE_NLINES)
623 mtctr r4
624 lis r5, KERNELBASE@h
6251: lwz r3, 0(r5) /* Load one word from every line */
626 addi r5, r5, L1_CACHE_BYTES
627 bdnz 1b
628 blr
629#endif /* CONFIG_NOT_COHERENT_CACHE */
630
631/*
632 * Flush a particular page from the data cache to RAM.
633 * Note: this is necessary because the instruction cache does *not*
634 * snoop from the data cache.
635 * This is a no-op on the 601 which has a unified cache.
636 *
637 * void __flush_dcache_icache(void *page)
638 */
639_GLOBAL(__flush_dcache_icache)
640BEGIN_FTR_SECTION
641 blr /* for 601, do nothing */
642END_FTR_SECTION_IFCLR(CPU_FTR_SPLIT_ID_CACHE)
643 rlwinm r3,r3,0,0,19 /* Get page base address */
644 li r4,4096/L1_CACHE_BYTES /* Number of lines in a page */
645 mtctr r4
646 mr r6,r3
6470: dcbst 0,r3 /* Write line to ram */
648 addi r3,r3,L1_CACHE_BYTES
649 bdnz 0b
650 sync
651 mtctr r4
6521: icbi 0,r6
653 addi r6,r6,L1_CACHE_BYTES
654 bdnz 1b
655 sync
656 isync
657 blr
658
659/*
660 * Flush a particular page from the data cache to RAM, identified
661 * by its physical address. We turn off the MMU so we can just use
662 * the physical address (this may be a highmem page without a kernel
663 * mapping).
664 *
665 * void __flush_dcache_icache_phys(unsigned long physaddr)
666 */
667_GLOBAL(__flush_dcache_icache_phys)
668BEGIN_FTR_SECTION
669 blr /* for 601, do nothing */
670END_FTR_SECTION_IFCLR(CPU_FTR_SPLIT_ID_CACHE)
671 mfmsr r10
672 rlwinm r0,r10,0,28,26 /* clear DR */
673 mtmsr r0
674 isync
675 rlwinm r3,r3,0,0,19 /* Get page base address */
676 li r4,4096/L1_CACHE_BYTES /* Number of lines in a page */
677 mtctr r4
678 mr r6,r3
6790: dcbst 0,r3 /* Write line to ram */
680 addi r3,r3,L1_CACHE_BYTES
681 bdnz 0b
682 sync
683 mtctr r4
6841: icbi 0,r6
685 addi r6,r6,L1_CACHE_BYTES
686 bdnz 1b
687 sync
688 mtmsr r10 /* restore DR */
689 isync
690 blr
691
692/*
693 * Clear pages using the dcbz instruction, which doesn't cause any
694 * memory traffic (except to write out any cache lines which get
695 * displaced). This only works on cacheable memory.
696 *
697 * void clear_pages(void *page, int order) ;
698 */
699_GLOBAL(clear_pages)
700 li r0,4096/L1_CACHE_BYTES
701 slw r0,r0,r4
702 mtctr r0
703#ifdef CONFIG_8xx
704 li r4, 0
7051: stw r4, 0(r3)
706 stw r4, 4(r3)
707 stw r4, 8(r3)
708 stw r4, 12(r3)
709#else
7101: dcbz 0,r3
711#endif
712 addi r3,r3,L1_CACHE_BYTES
713 bdnz 1b
714 blr
715
716/*
717 * Copy a whole page. We use the dcbz instruction on the destination
718 * to reduce memory traffic (it eliminates the unnecessary reads of
719 * the destination into cache). This requires that the destination
720 * is cacheable.
721 */
722#define COPY_16_BYTES \
723 lwz r6,4(r4); \
724 lwz r7,8(r4); \
725 lwz r8,12(r4); \
726 lwzu r9,16(r4); \
727 stw r6,4(r3); \
728 stw r7,8(r3); \
729 stw r8,12(r3); \
730 stwu r9,16(r3)
731
732_GLOBAL(copy_page)
733 addi r3,r3,-4
734 addi r4,r4,-4
735
736#ifdef CONFIG_8xx
737 /* don't use prefetch on 8xx */
738 li r0,4096/L1_CACHE_BYTES
739 mtctr r0
7401: COPY_16_BYTES
741 bdnz 1b
742 blr
743
744#else /* not 8xx, we can prefetch */
745 li r5,4
746
747#if MAX_COPY_PREFETCH > 1
748 li r0,MAX_COPY_PREFETCH
749 li r11,4
750 mtctr r0
75111: dcbt r11,r4
752 addi r11,r11,L1_CACHE_BYTES
753 bdnz 11b
754#else /* MAX_COPY_PREFETCH == 1 */
755 dcbt r5,r4
756 li r11,L1_CACHE_BYTES+4
757#endif /* MAX_COPY_PREFETCH */
758 li r0,4096/L1_CACHE_BYTES - MAX_COPY_PREFETCH
759 crclr 4*cr0+eq
7602:
761 mtctr r0
7621:
763 dcbt r11,r4
764 dcbz r5,r3
765 COPY_16_BYTES
766#if L1_CACHE_BYTES >= 32
767 COPY_16_BYTES
768#if L1_CACHE_BYTES >= 64
769 COPY_16_BYTES
770 COPY_16_BYTES
771#if L1_CACHE_BYTES >= 128
772 COPY_16_BYTES
773 COPY_16_BYTES
774 COPY_16_BYTES
775 COPY_16_BYTES
776#endif
777#endif
778#endif
779 bdnz 1b
780 beqlr
781 crnot 4*cr0+eq,4*cr0+eq
782 li r0,MAX_COPY_PREFETCH
783 li r11,4
784 b 2b
785#endif /* CONFIG_8xx */
786
787/*
788 * void atomic_clear_mask(atomic_t mask, atomic_t *addr)
789 * void atomic_set_mask(atomic_t mask, atomic_t *addr);
790 */
791_GLOBAL(atomic_clear_mask)
79210: lwarx r5,0,r4
793 andc r5,r5,r3
794 PPC405_ERR77(0,r4)
795 stwcx. r5,0,r4
796 bne- 10b
797 blr
798_GLOBAL(atomic_set_mask)
79910: lwarx r5,0,r4
800 or r5,r5,r3
801 PPC405_ERR77(0,r4)
802 stwcx. r5,0,r4
803 bne- 10b
804 blr
805
806/*
807 * I/O string operations
808 *
809 * insb(port, buf, len)
810 * outsb(port, buf, len)
811 * insw(port, buf, len)
812 * outsw(port, buf, len)
813 * insl(port, buf, len)
814 * outsl(port, buf, len)
815 * insw_ns(port, buf, len)
816 * outsw_ns(port, buf, len)
817 * insl_ns(port, buf, len)
818 * outsl_ns(port, buf, len)
819 *
820 * The *_ns versions don't do byte-swapping.
821 */
822_GLOBAL(_insb)
823 cmpwi 0,r5,0
824 mtctr r5
825 subi r4,r4,1
826 blelr-
82700: lbz r5,0(r3)
828 eieio
829 stbu r5,1(r4)
830 bdnz 00b
831 blr
832
833_GLOBAL(_outsb)
834 cmpwi 0,r5,0
835 mtctr r5
836 subi r4,r4,1
837 blelr-
83800: lbzu r5,1(r4)
839 stb r5,0(r3)
840 eieio
841 bdnz 00b
842 blr
843
844_GLOBAL(_insw)
845 cmpwi 0,r5,0
846 mtctr r5
847 subi r4,r4,2
848 blelr-
84900: lhbrx r5,0,r3
850 eieio
851 sthu r5,2(r4)
852 bdnz 00b
853 blr
854
855_GLOBAL(_outsw)
856 cmpwi 0,r5,0
857 mtctr r5
858 subi r4,r4,2
859 blelr-
86000: lhzu r5,2(r4)
861 eieio
862 sthbrx r5,0,r3
863 bdnz 00b
864 blr
865
866_GLOBAL(_insl)
867 cmpwi 0,r5,0
868 mtctr r5
869 subi r4,r4,4
870 blelr-
87100: lwbrx r5,0,r3
872 eieio
873 stwu r5,4(r4)
874 bdnz 00b
875 blr
876
877_GLOBAL(_outsl)
878 cmpwi 0,r5,0
879 mtctr r5
880 subi r4,r4,4
881 blelr-
88200: lwzu r5,4(r4)
883 stwbrx r5,0,r3
884 eieio
885 bdnz 00b
886 blr
887
888_GLOBAL(__ide_mm_insw)
889_GLOBAL(_insw_ns)
890 cmpwi 0,r5,0
891 mtctr r5
892 subi r4,r4,2
893 blelr-
89400: lhz r5,0(r3)
895 eieio
896 sthu r5,2(r4)
897 bdnz 00b
898 blr
899
900_GLOBAL(__ide_mm_outsw)
901_GLOBAL(_outsw_ns)
902 cmpwi 0,r5,0
903 mtctr r5
904 subi r4,r4,2
905 blelr-
90600: lhzu r5,2(r4)
907 sth r5,0(r3)
908 eieio
909 bdnz 00b
910 blr
911
912_GLOBAL(__ide_mm_insl)
913_GLOBAL(_insl_ns)
914 cmpwi 0,r5,0
915 mtctr r5
916 subi r4,r4,4
917 blelr-
91800: lwz r5,0(r3)
919 eieio
920 stwu r5,4(r4)
921 bdnz 00b
922 blr
923
924_GLOBAL(__ide_mm_outsl)
925_GLOBAL(_outsl_ns)
926 cmpwi 0,r5,0
927 mtctr r5
928 subi r4,r4,4
929 blelr-
93000: lwzu r5,4(r4)
931 stw r5,0(r3)
932 eieio
933 bdnz 00b
934 blr
935
936/*
937 * Extended precision shifts.
938 *
939 * Updated to be valid for shift counts from 0 to 63 inclusive.
940 * -- Gabriel
941 *
942 * R3/R4 has 64 bit value
943 * R5 has shift count
944 * result in R3/R4
945 *
946 * ashrdi3: arithmetic right shift (sign propagation)
947 * lshrdi3: logical right shift
948 * ashldi3: left shift
949 */
950_GLOBAL(__ashrdi3)
951 subfic r6,r5,32
952 srw r4,r4,r5 # LSW = count > 31 ? 0 : LSW >> count
953 addi r7,r5,32 # could be xori, or addi with -32
954 slw r6,r3,r6 # t1 = count > 31 ? 0 : MSW << (32-count)
955 rlwinm r8,r7,0,32 # t3 = (count < 32) ? 32 : 0
956 sraw r7,r3,r7 # t2 = MSW >> (count-32)
957 or r4,r4,r6 # LSW |= t1
958 slw r7,r7,r8 # t2 = (count < 32) ? 0 : t2
959 sraw r3,r3,r5 # MSW = MSW >> count
960 or r4,r4,r7 # LSW |= t2
961 blr
962
963_GLOBAL(__ashldi3)
964 subfic r6,r5,32
965 slw r3,r3,r5 # MSW = count > 31 ? 0 : MSW << count
966 addi r7,r5,32 # could be xori, or addi with -32
967 srw r6,r4,r6 # t1 = count > 31 ? 0 : LSW >> (32-count)
968 slw r7,r4,r7 # t2 = count < 32 ? 0 : LSW << (count-32)
969 or r3,r3,r6 # MSW |= t1
970 slw r4,r4,r5 # LSW = LSW << count
971 or r3,r3,r7 # MSW |= t2
972 blr
973
974_GLOBAL(__lshrdi3)
975 subfic r6,r5,32
976 srw r4,r4,r5 # LSW = count > 31 ? 0 : LSW >> count
977 addi r7,r5,32 # could be xori, or addi with -32
978 slw r6,r3,r6 # t1 = count > 31 ? 0 : MSW << (32-count)
979 srw r7,r3,r7 # t2 = count < 32 ? 0 : MSW >> (count-32)
980 or r4,r4,r6 # LSW |= t1
981 srw r3,r3,r5 # MSW = MSW >> count
982 or r4,r4,r7 # LSW |= t2
983 blr
984
985_GLOBAL(abs)
986 srawi r4,r3,31
987 xor r3,r3,r4
988 sub r3,r3,r4
989 blr
990
991_GLOBAL(_get_SP)
992 mr r3,r1 /* Close enough */
993 blr
994
995/*
996 * Create a kernel thread
997 * kernel_thread(fn, arg, flags)
998 */
999_GLOBAL(kernel_thread)
1000 stwu r1,-16(r1)
1001 stw r30,8(r1)
1002 stw r31,12(r1)
1003 mr r30,r3 /* function */
1004 mr r31,r4 /* argument */
1005 ori r3,r5,CLONE_VM /* flags */
1006 oris r3,r3,CLONE_UNTRACED>>16
1007 li r4,0 /* new sp (unused) */
1008 li r0,__NR_clone
1009 sc
1010 cmpwi 0,r3,0 /* parent or child? */
1011 bne 1f /* return if parent */
1012 li r0,0 /* make top-level stack frame */
1013 stwu r0,-16(r1)
1014 mtlr r30 /* fn addr in lr */
1015 mr r3,r31 /* load arg and call fn */
1016 PPC440EP_ERR42
1017 blrl
1018 li r0,__NR_exit /* exit if function returns */
1019 li r3,0
1020 sc
10211: lwz r30,8(r1)
1022 lwz r31,12(r1)
1023 addi r1,r1,16
1024 blr
1025
1026_GLOBAL(execve)
1027 li r0,__NR_execve
1028 sc
1029 bnslr
1030 neg r3,r3
1031 blr
1032
1033/*
1034 * This routine is just here to keep GCC happy - sigh...
1035 */
1036_GLOBAL(__main)
1037 blr
diff --git a/arch/powerpc/kernel/misc_64.S b/arch/powerpc/kernel/misc_64.S
new file mode 100644
index 000000000000..b3e95ff0dba0
--- /dev/null
+++ b/arch/powerpc/kernel/misc_64.S
@@ -0,0 +1,880 @@
1/*
2 * arch/powerpc/kernel/misc64.S
3 *
4 * This file contains miscellaneous low-level functions.
5 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
6 *
7 * Largely rewritten by Cort Dougan (cort@cs.nmt.edu)
8 * and Paul Mackerras.
9 * Adapted for iSeries by Mike Corrigan (mikejc@us.ibm.com)
10 * PPC64 updates by Dave Engebretsen (engebret@us.ibm.com)
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version
15 * 2 of the License, or (at your option) any later version.
16 *
17 */
18
19#include <linux/config.h>
20#include <linux/sys.h>
21#include <asm/unistd.h>
22#include <asm/errno.h>
23#include <asm/processor.h>
24#include <asm/page.h>
25#include <asm/cache.h>
26#include <asm/ppc_asm.h>
27#include <asm/asm-offsets.h>
28#include <asm/cputable.h>
29#include <asm/thread_info.h>
30
31 .text
32
33/*
34 * Returns (address we are running at) - (address we were linked at)
35 * for use before the text and data are mapped to KERNELBASE.
36 */
37
38_GLOBAL(reloc_offset)
39 mflr r0
40 bl 1f
411: mflr r3
42 LOADADDR(r4,1b)
43 subf r3,r4,r3
44 mtlr r0
45 blr
46
47/*
48 * add_reloc_offset(x) returns x + reloc_offset().
49 */
50_GLOBAL(add_reloc_offset)
51 mflr r0
52 bl 1f
531: mflr r5
54 LOADADDR(r4,1b)
55 subf r5,r4,r5
56 add r3,r3,r5
57 mtlr r0
58 blr
59
60_GLOBAL(get_msr)
61 mfmsr r3
62 blr
63
64_GLOBAL(get_dar)
65 mfdar r3
66 blr
67
68_GLOBAL(get_srr0)
69 mfsrr0 r3
70 blr
71
72_GLOBAL(get_srr1)
73 mfsrr1 r3
74 blr
75
76_GLOBAL(get_sp)
77 mr r3,r1
78 blr
79
80#ifdef CONFIG_IRQSTACKS
81_GLOBAL(call_do_softirq)
82 mflr r0
83 std r0,16(r1)
84 stdu r1,THREAD_SIZE-112(r3)
85 mr r1,r3
86 bl .__do_softirq
87 ld r1,0(r1)
88 ld r0,16(r1)
89 mtlr r0
90 blr
91
92_GLOBAL(call_handle_IRQ_event)
93 mflr r0
94 std r0,16(r1)
95 stdu r1,THREAD_SIZE-112(r6)
96 mr r1,r6
97 bl .handle_IRQ_event
98 ld r1,0(r1)
99 ld r0,16(r1)
100 mtlr r0
101 blr
102#endif /* CONFIG_IRQSTACKS */
103
104 /*
105 * To be called by C code which needs to do some operations with MMU
106 * disabled. Note that interrupts have to be disabled by the caller
107 * prior to calling us. The code called _MUST_ be in the RMO of course
108 * and part of the linear mapping as we don't attempt to translate the
109 * stack pointer at all. The function is called with the stack switched
110 * to this CPU emergency stack
111 *
112 * prototype is void *call_with_mmu_off(void *func, void *data);
113 *
114 * the called function is expected to be of the form
115 *
116 * void *called(void *data);
117 */
118_GLOBAL(call_with_mmu_off)
119 mflr r0 /* get link, save it on stackframe */
120 std r0,16(r1)
121 mr r1,r5 /* save old stack ptr */
122 ld r1,PACAEMERGSP(r13) /* get emerg. stack */
123 subi r1,r1,STACK_FRAME_OVERHEAD
124 std r0,16(r1) /* save link on emerg. stack */
125 std r5,0(r1) /* save old stack ptr in backchain */
126 ld r3,0(r3) /* get to real function ptr (assume same TOC) */
127 bl 2f /* we need LR to return, continue at label 2 */
128
129 ld r0,16(r1) /* we return here from the call, get LR and */
130 ld r1,0(r1) /* .. old stack ptr */
131 mtspr SPRN_SRR0,r0 /* and get back to virtual mode with these */
132 mfmsr r4
133 ori r4,r4,MSR_IR|MSR_DR
134 mtspr SPRN_SRR1,r4
135 rfid
136
1372: mtspr SPRN_SRR0,r3 /* coming from above, enter real mode */
138 mr r3,r4 /* get parameter */
139 mfmsr r0
140 ori r0,r0,MSR_IR|MSR_DR
141 xori r0,r0,MSR_IR|MSR_DR
142 mtspr SPRN_SRR1,r0
143 rfid
144
145
146 .section ".toc","aw"
147PPC64_CACHES:
148 .tc ppc64_caches[TC],ppc64_caches
149 .section ".text"
150
151/*
152 * Write any modified data cache blocks out to memory
153 * and invalidate the corresponding instruction cache blocks.
154 *
155 * flush_icache_range(unsigned long start, unsigned long stop)
156 *
157 * flush all bytes from start through stop-1 inclusive
158 */
159
160_KPROBE(__flush_icache_range)
161
162/*
163 * Flush the data cache to memory
164 *
165 * Different systems have different cache line sizes
166 * and in some cases i-cache and d-cache line sizes differ from
167 * each other.
168 */
169 ld r10,PPC64_CACHES@toc(r2)
170 lwz r7,DCACHEL1LINESIZE(r10)/* Get cache line size */
171 addi r5,r7,-1
172 andc r6,r3,r5 /* round low to line bdy */
173 subf r8,r6,r4 /* compute length */
174 add r8,r8,r5 /* ensure we get enough */
175 lwz r9,DCACHEL1LOGLINESIZE(r10) /* Get log-2 of cache line size */
176 srw. r8,r8,r9 /* compute line count */
177 beqlr /* nothing to do? */
178 mtctr r8
1791: dcbst 0,r6
180 add r6,r6,r7
181 bdnz 1b
182 sync
183
184/* Now invalidate the instruction cache */
185
186 lwz r7,ICACHEL1LINESIZE(r10) /* Get Icache line size */
187 addi r5,r7,-1
188 andc r6,r3,r5 /* round low to line bdy */
189 subf r8,r6,r4 /* compute length */
190 add r8,r8,r5
191 lwz r9,ICACHEL1LOGLINESIZE(r10) /* Get log-2 of Icache line size */
192 srw. r8,r8,r9 /* compute line count */
193 beqlr /* nothing to do? */
194 mtctr r8
1952: icbi 0,r6
196 add r6,r6,r7
197 bdnz 2b
198 isync
199 blr
200 .previous .text
201/*
202 * Like above, but only do the D-cache.
203 *
204 * flush_dcache_range(unsigned long start, unsigned long stop)
205 *
206 * flush all bytes from start to stop-1 inclusive
207 */
208_GLOBAL(flush_dcache_range)
209
210/*
211 * Flush the data cache to memory
212 *
213 * Different systems have different cache line sizes
214 */
215 ld r10,PPC64_CACHES@toc(r2)
216 lwz r7,DCACHEL1LINESIZE(r10) /* Get dcache line size */
217 addi r5,r7,-1
218 andc r6,r3,r5 /* round low to line bdy */
219 subf r8,r6,r4 /* compute length */
220 add r8,r8,r5 /* ensure we get enough */
221 lwz r9,DCACHEL1LOGLINESIZE(r10) /* Get log-2 of dcache line size */
222 srw. r8,r8,r9 /* compute line count */
223 beqlr /* nothing to do? */
224 mtctr r8
2250: dcbst 0,r6
226 add r6,r6,r7
227 bdnz 0b
228 sync
229 blr
230
231/*
232 * Like above, but works on non-mapped physical addresses.
233 * Use only for non-LPAR setups ! It also assumes real mode
234 * is cacheable. Used for flushing out the DART before using
235 * it as uncacheable memory
236 *
237 * flush_dcache_phys_range(unsigned long start, unsigned long stop)
238 *
239 * flush all bytes from start to stop-1 inclusive
240 */
241_GLOBAL(flush_dcache_phys_range)
242 ld r10,PPC64_CACHES@toc(r2)
243 lwz r7,DCACHEL1LINESIZE(r10) /* Get dcache line size */
244 addi r5,r7,-1
245 andc r6,r3,r5 /* round low to line bdy */
246 subf r8,r6,r4 /* compute length */
247 add r8,r8,r5 /* ensure we get enough */
248 lwz r9,DCACHEL1LOGLINESIZE(r10) /* Get log-2 of dcache line size */
249 srw. r8,r8,r9 /* compute line count */
250 beqlr /* nothing to do? */
251 mfmsr r5 /* Disable MMU Data Relocation */
252 ori r0,r5,MSR_DR
253 xori r0,r0,MSR_DR
254 sync
255 mtmsr r0
256 sync
257 isync
258 mtctr r8
2590: dcbst 0,r6
260 add r6,r6,r7
261 bdnz 0b
262 sync
263 isync
264 mtmsr r5 /* Re-enable MMU Data Relocation */
265 sync
266 isync
267 blr
268
269_GLOBAL(flush_inval_dcache_range)
270 ld r10,PPC64_CACHES@toc(r2)
271 lwz r7,DCACHEL1LINESIZE(r10) /* Get dcache line size */
272 addi r5,r7,-1
273 andc r6,r3,r5 /* round low to line bdy */
274 subf r8,r6,r4 /* compute length */
275 add r8,r8,r5 /* ensure we get enough */
276 lwz r9,DCACHEL1LOGLINESIZE(r10)/* Get log-2 of dcache line size */
277 srw. r8,r8,r9 /* compute line count */
278 beqlr /* nothing to do? */
279 sync
280 isync
281 mtctr r8
2820: dcbf 0,r6
283 add r6,r6,r7
284 bdnz 0b
285 sync
286 isync
287 blr
288
289
290/*
291 * Flush a particular page from the data cache to RAM.
292 * Note: this is necessary because the instruction cache does *not*
293 * snoop from the data cache.
294 *
295 * void __flush_dcache_icache(void *page)
296 */
297_GLOBAL(__flush_dcache_icache)
298/*
299 * Flush the data cache to memory
300 *
301 * Different systems have different cache line sizes
302 */
303
304/* Flush the dcache */
305 ld r7,PPC64_CACHES@toc(r2)
306 clrrdi r3,r3,PAGE_SHIFT /* Page align */
307 lwz r4,DCACHEL1LINESPERPAGE(r7) /* Get # dcache lines per page */
308 lwz r5,DCACHEL1LINESIZE(r7) /* Get dcache line size */
309 mr r6,r3
310 mtctr r4
3110: dcbst 0,r6
312 add r6,r6,r5
313 bdnz 0b
314 sync
315
316/* Now invalidate the icache */
317
318 lwz r4,ICACHEL1LINESPERPAGE(r7) /* Get # icache lines per page */
319 lwz r5,ICACHEL1LINESIZE(r7) /* Get icache line size */
320 mtctr r4
3211: icbi 0,r3
322 add r3,r3,r5
323 bdnz 1b
324 isync
325 blr
326
327/*
328 * I/O string operations
329 *
330 * insb(port, buf, len)
331 * outsb(port, buf, len)
332 * insw(port, buf, len)
333 * outsw(port, buf, len)
334 * insl(port, buf, len)
335 * outsl(port, buf, len)
336 * insw_ns(port, buf, len)
337 * outsw_ns(port, buf, len)
338 * insl_ns(port, buf, len)
339 * outsl_ns(port, buf, len)
340 *
341 * The *_ns versions don't do byte-swapping.
342 */
343_GLOBAL(_insb)
344 cmpwi 0,r5,0
345 mtctr r5
346 subi r4,r4,1
347 blelr-
34800: lbz r5,0(r3)
349 eieio
350 stbu r5,1(r4)
351 bdnz 00b
352 twi 0,r5,0
353 isync
354 blr
355
356_GLOBAL(_outsb)
357 cmpwi 0,r5,0
358 mtctr r5
359 subi r4,r4,1
360 blelr-
36100: lbzu r5,1(r4)
362 stb r5,0(r3)
363 bdnz 00b
364 sync
365 blr
366
367_GLOBAL(_insw)
368 cmpwi 0,r5,0
369 mtctr r5
370 subi r4,r4,2
371 blelr-
37200: lhbrx r5,0,r3
373 eieio
374 sthu r5,2(r4)
375 bdnz 00b
376 twi 0,r5,0
377 isync
378 blr
379
380_GLOBAL(_outsw)
381 cmpwi 0,r5,0
382 mtctr r5
383 subi r4,r4,2
384 blelr-
38500: lhzu r5,2(r4)
386 sthbrx r5,0,r3
387 bdnz 00b
388 sync
389 blr
390
391_GLOBAL(_insl)
392 cmpwi 0,r5,0
393 mtctr r5
394 subi r4,r4,4
395 blelr-
39600: lwbrx r5,0,r3
397 eieio
398 stwu r5,4(r4)
399 bdnz 00b
400 twi 0,r5,0
401 isync
402 blr
403
404_GLOBAL(_outsl)
405 cmpwi 0,r5,0
406 mtctr r5
407 subi r4,r4,4
408 blelr-
40900: lwzu r5,4(r4)
410 stwbrx r5,0,r3
411 bdnz 00b
412 sync
413 blr
414
415/* _GLOBAL(ide_insw) now in drivers/ide/ide-iops.c */
416_GLOBAL(_insw_ns)
417 cmpwi 0,r5,0
418 mtctr r5
419 subi r4,r4,2
420 blelr-
42100: lhz r5,0(r3)
422 eieio
423 sthu r5,2(r4)
424 bdnz 00b
425 twi 0,r5,0
426 isync
427 blr
428
429/* _GLOBAL(ide_outsw) now in drivers/ide/ide-iops.c */
430_GLOBAL(_outsw_ns)
431 cmpwi 0,r5,0
432 mtctr r5
433 subi r4,r4,2
434 blelr-
43500: lhzu r5,2(r4)
436 sth r5,0(r3)
437 bdnz 00b
438 sync
439 blr
440
441_GLOBAL(_insl_ns)
442 cmpwi 0,r5,0
443 mtctr r5
444 subi r4,r4,4
445 blelr-
44600: lwz r5,0(r3)
447 eieio
448 stwu r5,4(r4)
449 bdnz 00b
450 twi 0,r5,0
451 isync
452 blr
453
454_GLOBAL(_outsl_ns)
455 cmpwi 0,r5,0
456 mtctr r5
457 subi r4,r4,4
458 blelr-
45900: lwzu r5,4(r4)
460 stw r5,0(r3)
461 bdnz 00b
462 sync
463 blr
464
465/*
466 * identify_cpu and calls setup_cpu
467 * In: r3 = base of the cpu_specs array
468 * r4 = address of cur_cpu_spec
469 * r5 = relocation offset
470 */
471_GLOBAL(identify_cpu)
472 mfpvr r7
4731:
474 lwz r8,CPU_SPEC_PVR_MASK(r3)
475 and r8,r8,r7
476 lwz r9,CPU_SPEC_PVR_VALUE(r3)
477 cmplw 0,r9,r8
478 beq 1f
479 addi r3,r3,CPU_SPEC_ENTRY_SIZE
480 b 1b
4811:
482 sub r0,r3,r5
483 std r0,0(r4)
484 ld r4,CPU_SPEC_SETUP(r3)
485 add r4,r4,r5
486 ld r4,0(r4)
487 add r4,r4,r5
488 mtctr r4
489 /* Calling convention for cpu setup is r3=offset, r4=cur_cpu_spec */
490 mr r4,r3
491 mr r3,r5
492 bctr
493
494/*
495 * do_cpu_ftr_fixups - goes through the list of CPU feature fixups
496 * and writes nop's over sections of code that don't apply for this cpu.
497 * r3 = data offset (not changed)
498 */
499_GLOBAL(do_cpu_ftr_fixups)
500 /* Get CPU 0 features */
501 LOADADDR(r6,cur_cpu_spec)
502 sub r6,r6,r3
503 ld r4,0(r6)
504 sub r4,r4,r3
505 ld r4,CPU_SPEC_FEATURES(r4)
506 /* Get the fixup table */
507 LOADADDR(r6,__start___ftr_fixup)
508 sub r6,r6,r3
509 LOADADDR(r7,__stop___ftr_fixup)
510 sub r7,r7,r3
511 /* Do the fixup */
5121: cmpld r6,r7
513 bgelr
514 addi r6,r6,32
515 ld r8,-32(r6) /* mask */
516 and r8,r8,r4
517 ld r9,-24(r6) /* value */
518 cmpld r8,r9
519 beq 1b
520 ld r8,-16(r6) /* section begin */
521 ld r9,-8(r6) /* section end */
522 subf. r9,r8,r9
523 beq 1b
524 /* write nops over the section of code */
525 /* todo: if large section, add a branch at the start of it */
526 srwi r9,r9,2
527 mtctr r9
528 sub r8,r8,r3
529 lis r0,0x60000000@h /* nop */
5303: stw r0,0(r8)
531 andi. r10,r4,CPU_FTR_SPLIT_ID_CACHE@l
532 beq 2f
533 dcbst 0,r8 /* suboptimal, but simpler */
534 sync
535 icbi 0,r8
5362: addi r8,r8,4
537 bdnz 3b
538 sync /* additional sync needed on g4 */
539 isync
540 b 1b
541
542#if defined(CONFIG_PPC_PMAC) || defined(CONFIG_PPC_MAPLE)
543/*
544 * Do an IO access in real mode
545 */
546_GLOBAL(real_readb)
547 mfmsr r7
548 ori r0,r7,MSR_DR
549 xori r0,r0,MSR_DR
550 sync
551 mtmsrd r0
552 sync
553 isync
554 mfspr r6,SPRN_HID4
555 rldicl r5,r6,32,0
556 ori r5,r5,0x100
557 rldicl r5,r5,32,0
558 sync
559 mtspr SPRN_HID4,r5
560 isync
561 slbia
562 isync
563 lbz r3,0(r3)
564 sync
565 mtspr SPRN_HID4,r6
566 isync
567 slbia
568 isync
569 mtmsrd r7
570 sync
571 isync
572 blr
573
574 /*
575 * Do an IO access in real mode
576 */
577_GLOBAL(real_writeb)
578 mfmsr r7
579 ori r0,r7,MSR_DR
580 xori r0,r0,MSR_DR
581 sync
582 mtmsrd r0
583 sync
584 isync
585 mfspr r6,SPRN_HID4
586 rldicl r5,r6,32,0
587 ori r5,r5,0x100
588 rldicl r5,r5,32,0
589 sync
590 mtspr SPRN_HID4,r5
591 isync
592 slbia
593 isync
594 stb r3,0(r4)
595 sync
596 mtspr SPRN_HID4,r6
597 isync
598 slbia
599 isync
600 mtmsrd r7
601 sync
602 isync
603 blr
604#endif /* defined(CONFIG_PPC_PMAC) || defined(CONFIG_PPC_MAPLE) */
605
606/*
607 * Create a kernel thread
608 * kernel_thread(fn, arg, flags)
609 */
610_GLOBAL(kernel_thread)
611 std r29,-24(r1)
612 std r30,-16(r1)
613 stdu r1,-STACK_FRAME_OVERHEAD(r1)
614 mr r29,r3
615 mr r30,r4
616 ori r3,r5,CLONE_VM /* flags */
617 oris r3,r3,(CLONE_UNTRACED>>16)
618 li r4,0 /* new sp (unused) */
619 li r0,__NR_clone
620 sc
621 cmpdi 0,r3,0 /* parent or child? */
622 bne 1f /* return if parent */
623 li r0,0
624 stdu r0,-STACK_FRAME_OVERHEAD(r1)
625 ld r2,8(r29)
626 ld r29,0(r29)
627 mtlr r29 /* fn addr in lr */
628 mr r3,r30 /* load arg and call fn */
629 blrl
630 li r0,__NR_exit /* exit after child exits */
631 li r3,0
632 sc
6331: addi r1,r1,STACK_FRAME_OVERHEAD
634 ld r29,-24(r1)
635 ld r30,-16(r1)
636 blr
637
638/*
639 * disable_kernel_fp()
640 * Disable the FPU.
641 */
642_GLOBAL(disable_kernel_fp)
643 mfmsr r3
644 rldicl r0,r3,(63-MSR_FP_LG),1
645 rldicl r3,r0,(MSR_FP_LG+1),0
646 mtmsrd r3 /* disable use of fpu now */
647 isync
648 blr
649
650#ifdef CONFIG_ALTIVEC
651
652#if 0 /* this has no callers for now */
653/*
654 * disable_kernel_altivec()
655 * Disable the VMX.
656 */
657_GLOBAL(disable_kernel_altivec)
658 mfmsr r3
659 rldicl r0,r3,(63-MSR_VEC_LG),1
660 rldicl r3,r0,(MSR_VEC_LG+1),0
661 mtmsrd r3 /* disable use of VMX now */
662 isync
663 blr
664#endif /* 0 */
665
666/*
667 * giveup_altivec(tsk)
668 * Disable VMX for the task given as the argument,
669 * and save the vector registers in its thread_struct.
670 * Enables the VMX for use in the kernel on return.
671 */
672_GLOBAL(giveup_altivec)
673 mfmsr r5
674 oris r5,r5,MSR_VEC@h
675 mtmsrd r5 /* enable use of VMX now */
676 isync
677 cmpdi 0,r3,0
678 beqlr- /* if no previous owner, done */
679 addi r3,r3,THREAD /* want THREAD of task */
680 ld r5,PT_REGS(r3)
681 cmpdi 0,r5,0
682 SAVE_32VRS(0,r4,r3)
683 mfvscr vr0
684 li r4,THREAD_VSCR
685 stvx vr0,r4,r3
686 beq 1f
687 ld r4,_MSR-STACK_FRAME_OVERHEAD(r5)
688 lis r3,MSR_VEC@h
689 andc r4,r4,r3 /* disable FP for previous task */
690 std r4,_MSR-STACK_FRAME_OVERHEAD(r5)
6911:
692#ifndef CONFIG_SMP
693 li r5,0
694 ld r4,last_task_used_altivec@got(r2)
695 std r5,0(r4)
696#endif /* CONFIG_SMP */
697 blr
698
699#endif /* CONFIG_ALTIVEC */
700
701_GLOBAL(__setup_cpu_power3)
702 blr
703
704_GLOBAL(execve)
705 li r0,__NR_execve
706 sc
707 bnslr
708 neg r3,r3
709 blr
710
711/* kexec_wait(phys_cpu)
712 *
713 * wait for the flag to change, indicating this kernel is going away but
714 * the slave code for the next one is at addresses 0 to 100.
715 *
716 * This is used by all slaves.
717 *
718 * Physical (hardware) cpu id should be in r3.
719 */
720_GLOBAL(kexec_wait)
721 bl 1f
7221: mflr r5
723 addi r5,r5,kexec_flag-1b
724
72599: HMT_LOW
726#ifdef CONFIG_KEXEC /* use no memory without kexec */
727 lwz r4,0(r5)
728 cmpwi 0,r4,0
729 bnea 0x60
730#endif
731 b 99b
732
733/* this can be in text because we won't change it until we are
734 * running in real anyways
735 */
736kexec_flag:
737 .long 0
738
739
740#ifdef CONFIG_KEXEC
741
742/* kexec_smp_wait(void)
743 *
744 * call with interrupts off
745 * note: this is a terminal routine, it does not save lr
746 *
747 * get phys id from paca
748 * set paca id to -1 to say we got here
749 * switch to real mode
750 * join other cpus in kexec_wait(phys_id)
751 */
752_GLOBAL(kexec_smp_wait)
753 lhz r3,PACAHWCPUID(r13)
754 li r4,-1
755 sth r4,PACAHWCPUID(r13) /* let others know we left */
756 bl real_mode
757 b .kexec_wait
758
759/*
760 * switch to real mode (turn mmu off)
761 * we use the early kernel trick that the hardware ignores bits
762 * 0 and 1 (big endian) of the effective address in real mode
763 *
764 * don't overwrite r3 here, it is live for kexec_wait above.
765 */
766real_mode: /* assume normal blr return */
7671: li r9,MSR_RI
768 li r10,MSR_DR|MSR_IR
769 mflr r11 /* return address to SRR0 */
770 mfmsr r12
771 andc r9,r12,r9
772 andc r10,r12,r10
773
774 mtmsrd r9,1
775 mtspr SPRN_SRR1,r10
776 mtspr SPRN_SRR0,r11
777 rfid
778
779
780/*
781 * kexec_sequence(newstack, start, image, control, clear_all())
782 *
783 * does the grungy work with stack switching and real mode switches
784 * also does simple calls to other code
785 */
786
787_GLOBAL(kexec_sequence)
788 mflr r0
789 std r0,16(r1)
790
791 /* switch stacks to newstack -- &kexec_stack.stack */
792 stdu r1,THREAD_SIZE-112(r3)
793 mr r1,r3
794
795 li r0,0
796 std r0,16(r1)
797
798 /* save regs for local vars on new stack.
799 * yes, we won't go back, but ...
800 */
801 std r31,-8(r1)
802 std r30,-16(r1)
803 std r29,-24(r1)
804 std r28,-32(r1)
805 std r27,-40(r1)
806 std r26,-48(r1)
807 std r25,-56(r1)
808
809 stdu r1,-112-64(r1)
810
811 /* save args into preserved regs */
812 mr r31,r3 /* newstack (both) */
813 mr r30,r4 /* start (real) */
814 mr r29,r5 /* image (virt) */
815 mr r28,r6 /* control, unused */
816 mr r27,r7 /* clear_all() fn desc */
817 mr r26,r8 /* spare */
818 lhz r25,PACAHWCPUID(r13) /* get our phys cpu from paca */
819
820 /* disable interrupts, we are overwriting kernel data next */
821 mfmsr r3
822 rlwinm r3,r3,0,17,15
823 mtmsrd r3,1
824
825 /* copy dest pages, flush whole dest image */
826 mr r3,r29
827 bl .kexec_copy_flush /* (image) */
828
829 /* turn off mmu */
830 bl real_mode
831
832 /* clear out hardware hash page table and tlb */
833 ld r5,0(r27) /* deref function descriptor */
834 mtctr r5
835 bctrl /* ppc_md.hash_clear_all(void); */
836
837/*
838 * kexec image calling is:
839 * the first 0x100 bytes of the entry point are copied to 0
840 *
841 * all slaves branch to slave = 0x60 (absolute)
842 * slave(phys_cpu_id);
843 *
844 * master goes to start = entry point
845 * start(phys_cpu_id, start, 0);
846 *
847 *
848 * a wrapper is needed to call existing kernels, here is an approximate
849 * description of one method:
850 *
851 * v2: (2.6.10)
852 * start will be near the boot_block (maybe 0x100 bytes before it?)
853 * it will have a 0x60, which will b to boot_block, where it will wait
854 * and 0 will store phys into struct boot-block and load r3 from there,
855 * copy kernel 0-0x100 and tell slaves to back down to 0x60 again
856 *
857 * v1: (2.6.9)
858 * boot block will have all cpus scanning device tree to see if they
859 * are the boot cpu ?????
860 * other device tree differences (prop sizes, va vs pa, etc)...
861 */
862
863 /* copy 0x100 bytes starting at start to 0 */
864 li r3,0
865 mr r4,r30
866 li r5,0x100
867 li r6,0
868 bl .copy_and_flush /* (dest, src, copy limit, start offset) */
8691: /* assume normal blr return */
870
871 /* release other cpus to the new kernel secondary start at 0x60 */
872 mflr r5
873 li r6,1
874 stw r6,kexec_flag-1b(5)
875 mr r3,r25 # my phys cpu
876 mr r4,r30 # start, aka phys mem offset
877 mtlr 4
878 li r5,0
879 blr /* image->start(physid, image->start, 0); */
880#endif /* CONFIG_KEXEC */
diff --git a/arch/ppc64/kernel/of_device.c b/arch/powerpc/kernel/of_device.c
index 9f200f0f2ad5..7065e40e2f42 100644
--- a/arch/ppc64/kernel/of_device.c
+++ b/arch/powerpc/kernel/of_device.c
@@ -4,6 +4,8 @@
4#include <linux/init.h> 4#include <linux/init.h>
5#include <linux/module.h> 5#include <linux/module.h>
6#include <linux/mod_devicetable.h> 6#include <linux/mod_devicetable.h>
7#include <linux/slab.h>
8
7#include <asm/errno.h> 9#include <asm/errno.h>
8#include <asm/of_device.h> 10#include <asm/of_device.h>
9 11
@@ -184,6 +186,7 @@ void of_release_dev(struct device *dev)
184 struct of_device *ofdev; 186 struct of_device *ofdev;
185 187
186 ofdev = to_of_device(dev); 188 ofdev = to_of_device(dev);
189 of_node_put(ofdev->node);
187 kfree(ofdev); 190 kfree(ofdev);
188} 191}
189 192
@@ -244,7 +247,7 @@ struct of_device* of_platform_device_create(struct device_node *np,
244 return NULL; 247 return NULL;
245 memset(dev, 0, sizeof(*dev)); 248 memset(dev, 0, sizeof(*dev));
246 249
247 dev->node = np; 250 dev->node = of_node_get(np);
248 dev->dma_mask = 0xffffffffUL; 251 dev->dma_mask = 0xffffffffUL;
249 dev->dev.dma_mask = &dev->dma_mask; 252 dev->dev.dma_mask = &dev->dma_mask;
250 dev->dev.parent = parent; 253 dev->dev.parent = parent;
@@ -261,7 +264,6 @@ struct of_device* of_platform_device_create(struct device_node *np,
261 return dev; 264 return dev;
262} 265}
263 266
264
265EXPORT_SYMBOL(of_match_device); 267EXPORT_SYMBOL(of_match_device);
266EXPORT_SYMBOL(of_platform_bus_type); 268EXPORT_SYMBOL(of_platform_bus_type);
267EXPORT_SYMBOL(of_register_driver); 269EXPORT_SYMBOL(of_register_driver);
diff --git a/arch/ppc64/kernel/pmc.c b/arch/powerpc/kernel/pmc.c
index 63d9481c3ec2..2d333cc84082 100644
--- a/arch/ppc64/kernel/pmc.c
+++ b/arch/powerpc/kernel/pmc.c
@@ -1,7 +1,10 @@
1/* 1/*
2 * linux/arch/ppc64/kernel/pmc.c 2 * arch/powerpc/kernel/pmc.c
3 * 3 *
4 * Copyright (C) 2004 David Gibson, IBM Corporation. 4 * Copyright (C) 2004 David Gibson, IBM Corporation.
5 * Includes code formerly from arch/ppc/kernel/perfmon.c:
6 * Author: Andy Fleming
7 * Copyright (c) 2004 Freescale Semiconductor, Inc
5 * 8 *
6 * This program is free software; you can redistribute it and/or 9 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License 10 * modify it under the terms of the GNU General Public License
@@ -17,6 +20,20 @@
17#include <asm/processor.h> 20#include <asm/processor.h>
18#include <asm/pmc.h> 21#include <asm/pmc.h>
19 22
23#if defined(CONFIG_FSL_BOOKE) && !defined(CONFIG_E200)
24static void dummy_perf(struct pt_regs *regs)
25{
26 unsigned int pmgc0 = mfpmr(PMRN_PMGC0);
27
28 pmgc0 &= ~PMGC0_PMIE;
29 mtpmr(PMRN_PMGC0, pmgc0);
30}
31#elif defined(CONFIG_PPC64) || defined(CONFIG_6xx)
32
33#ifndef MMCR0_PMAO
34#define MMCR0_PMAO 0
35#endif
36
20/* Ensure exceptions are disabled */ 37/* Ensure exceptions are disabled */
21static void dummy_perf(struct pt_regs *regs) 38static void dummy_perf(struct pt_regs *regs)
22{ 39{
@@ -25,6 +42,11 @@ static void dummy_perf(struct pt_regs *regs)
25 mmcr0 &= ~(MMCR0_PMXE|MMCR0_PMAO); 42 mmcr0 &= ~(MMCR0_PMXE|MMCR0_PMAO);
26 mtspr(SPRN_MMCR0, mmcr0); 43 mtspr(SPRN_MMCR0, mmcr0);
27} 44}
45#else
46static void dummy_perf(struct pt_regs *regs)
47{
48}
49#endif
28 50
29static DEFINE_SPINLOCK(pmc_owner_lock); 51static DEFINE_SPINLOCK(pmc_owner_lock);
30static void *pmc_owner_caller; /* mostly for debugging */ 52static void *pmc_owner_caller; /* mostly for debugging */
@@ -66,11 +88,12 @@ void release_pmc_hardware(void)
66} 88}
67EXPORT_SYMBOL_GPL(release_pmc_hardware); 89EXPORT_SYMBOL_GPL(release_pmc_hardware);
68 90
91#ifdef CONFIG_PPC64
69void power4_enable_pmcs(void) 92void power4_enable_pmcs(void)
70{ 93{
71 unsigned long hid0; 94 unsigned long hid0;
72 95
73 hid0 = mfspr(HID0); 96 hid0 = mfspr(SPRN_HID0);
74 hid0 |= 1UL << (63 - 20); 97 hid0 |= 1UL << (63 - 20);
75 98
76 /* POWER4 requires the following sequence */ 99 /* POWER4 requires the following sequence */
@@ -83,6 +106,7 @@ void power4_enable_pmcs(void)
83 "mfspr %0, %1\n" 106 "mfspr %0, %1\n"
84 "mfspr %0, %1\n" 107 "mfspr %0, %1\n"
85 "mfspr %0, %1\n" 108 "mfspr %0, %1\n"
86 "isync" : "=&r" (hid0) : "i" (HID0), "0" (hid0): 109 "isync" : "=&r" (hid0) : "i" (SPRN_HID0), "0" (hid0):
87 "memory"); 110 "memory");
88} 111}
112#endif /* CONFIG_PPC64 */
diff --git a/arch/powerpc/kernel/ppc_ksyms.c b/arch/powerpc/kernel/ppc_ksyms.c
new file mode 100644
index 000000000000..8bc540337ba0
--- /dev/null
+++ b/arch/powerpc/kernel/ppc_ksyms.c
@@ -0,0 +1,273 @@
1#include <linux/config.h>
2#include <linux/module.h>
3#include <linux/threads.h>
4#include <linux/smp.h>
5#include <linux/sched.h>
6#include <linux/elfcore.h>
7#include <linux/string.h>
8#include <linux/interrupt.h>
9#include <linux/tty.h>
10#include <linux/vt_kern.h>
11#include <linux/nvram.h>
12#include <linux/console.h>
13#include <linux/irq.h>
14#include <linux/pci.h>
15#include <linux/delay.h>
16#include <linux/ide.h>
17#include <linux/bitops.h>
18
19#include <asm/page.h>
20#include <asm/semaphore.h>
21#include <asm/processor.h>
22#include <asm/uaccess.h>
23#include <asm/io.h>
24#include <asm/ide.h>
25#include <asm/atomic.h>
26#include <asm/checksum.h>
27#include <asm/pgtable.h>
28#include <asm/tlbflush.h>
29#include <linux/adb.h>
30#include <linux/cuda.h>
31#include <linux/pmu.h>
32#include <asm/prom.h>
33#include <asm/system.h>
34#include <asm/pci-bridge.h>
35#include <asm/irq.h>
36#include <asm/pmac_feature.h>
37#include <asm/dma.h>
38#include <asm/machdep.h>
39#include <asm/hw_irq.h>
40#include <asm/nvram.h>
41#include <asm/mmu_context.h>
42#include <asm/backlight.h>
43#include <asm/time.h>
44#include <asm/cputable.h>
45#include <asm/btext.h>
46#include <asm/div64.h>
47
48#ifdef CONFIG_8xx
49#include <asm/commproc.h>
50#endif
51
52#ifdef CONFIG_PPC32
53extern void transfer_to_handler(void);
54extern void do_IRQ(struct pt_regs *regs);
55extern void machine_check_exception(struct pt_regs *regs);
56extern void alignment_exception(struct pt_regs *regs);
57extern void program_check_exception(struct pt_regs *regs);
58extern void single_step_exception(struct pt_regs *regs);
59extern int do_signal(sigset_t *, struct pt_regs *);
60extern int pmac_newworld;
61extern int sys_sigreturn(struct pt_regs *regs);
62
63EXPORT_SYMBOL(clear_pages);
64EXPORT_SYMBOL(ISA_DMA_THRESHOLD);
65EXPORT_SYMBOL(DMA_MODE_READ);
66EXPORT_SYMBOL(DMA_MODE_WRITE);
67EXPORT_SYMBOL(__div64_32);
68
69EXPORT_SYMBOL(do_signal);
70EXPORT_SYMBOL(transfer_to_handler);
71EXPORT_SYMBOL(do_IRQ);
72EXPORT_SYMBOL(machine_check_exception);
73EXPORT_SYMBOL(alignment_exception);
74EXPORT_SYMBOL(program_check_exception);
75EXPORT_SYMBOL(single_step_exception);
76EXPORT_SYMBOL(sys_sigreturn);
77#endif
78
79#if defined(CONFIG_PPC_PREP)
80EXPORT_SYMBOL(_prep_type);
81EXPORT_SYMBOL(ucSystemType);
82#endif
83
84#if !defined(__INLINE_BITOPS)
85EXPORT_SYMBOL(set_bit);
86EXPORT_SYMBOL(clear_bit);
87EXPORT_SYMBOL(change_bit);
88EXPORT_SYMBOL(test_and_set_bit);
89EXPORT_SYMBOL(test_and_clear_bit);
90EXPORT_SYMBOL(test_and_change_bit);
91#endif /* __INLINE_BITOPS */
92
93EXPORT_SYMBOL(strcpy);
94EXPORT_SYMBOL(strncpy);
95EXPORT_SYMBOL(strcat);
96EXPORT_SYMBOL(strncat);
97EXPORT_SYMBOL(strchr);
98EXPORT_SYMBOL(strrchr);
99EXPORT_SYMBOL(strpbrk);
100EXPORT_SYMBOL(strstr);
101EXPORT_SYMBOL(strlen);
102EXPORT_SYMBOL(strnlen);
103EXPORT_SYMBOL(strcmp);
104EXPORT_SYMBOL(strncmp);
105EXPORT_SYMBOL(strcasecmp);
106
107EXPORT_SYMBOL(csum_partial);
108EXPORT_SYMBOL(csum_partial_copy_generic);
109EXPORT_SYMBOL(ip_fast_csum);
110EXPORT_SYMBOL(csum_tcpudp_magic);
111
112EXPORT_SYMBOL(__copy_tofrom_user);
113EXPORT_SYMBOL(__clear_user);
114EXPORT_SYMBOL(__strncpy_from_user);
115EXPORT_SYMBOL(__strnlen_user);
116
117EXPORT_SYMBOL(_insb);
118EXPORT_SYMBOL(_outsb);
119EXPORT_SYMBOL(_insw);
120EXPORT_SYMBOL(_outsw);
121EXPORT_SYMBOL(_insl);
122EXPORT_SYMBOL(_outsl);
123EXPORT_SYMBOL(_insw_ns);
124EXPORT_SYMBOL(_outsw_ns);
125EXPORT_SYMBOL(_insl_ns);
126EXPORT_SYMBOL(_outsl_ns);
127EXPORT_SYMBOL(ioremap);
128#ifdef CONFIG_44x
129EXPORT_SYMBOL(ioremap64);
130#endif
131EXPORT_SYMBOL(__ioremap);
132EXPORT_SYMBOL(iounmap);
133#ifdef CONFIG_PPC32
134EXPORT_SYMBOL(ioremap_bot); /* aka VMALLOC_END */
135#endif
136
137#if defined(CONFIG_PPC32) && (defined(CONFIG_BLK_DEV_IDE) || defined(CONFIG_BLK_DEV_IDE_MODULE))
138EXPORT_SYMBOL(ppc_ide_md);
139#endif
140
141#if defined(CONFIG_PCI) && defined(CONFIG_PPC32)
142EXPORT_SYMBOL(isa_io_base);
143EXPORT_SYMBOL(isa_mem_base);
144EXPORT_SYMBOL(pci_dram_offset);
145EXPORT_SYMBOL(pci_alloc_consistent);
146EXPORT_SYMBOL(pci_free_consistent);
147EXPORT_SYMBOL(pci_bus_io_base);
148EXPORT_SYMBOL(pci_bus_io_base_phys);
149EXPORT_SYMBOL(pci_bus_mem_base_phys);
150EXPORT_SYMBOL(pci_bus_to_hose);
151EXPORT_SYMBOL(pci_resource_to_bus);
152EXPORT_SYMBOL(pci_phys_to_bus);
153EXPORT_SYMBOL(pci_bus_to_phys);
154#endif /* CONFIG_PCI */
155
156#ifdef CONFIG_NOT_COHERENT_CACHE
157EXPORT_SYMBOL(flush_dcache_all);
158#endif
159
160EXPORT_SYMBOL(start_thread);
161EXPORT_SYMBOL(kernel_thread);
162
163EXPORT_SYMBOL(giveup_fpu);
164#ifdef CONFIG_ALTIVEC
165EXPORT_SYMBOL(giveup_altivec);
166#endif /* CONFIG_ALTIVEC */
167#ifdef CONFIG_SPE
168EXPORT_SYMBOL(giveup_spe);
169#endif /* CONFIG_SPE */
170
171#ifdef CONFIG_PPC64
172EXPORT_SYMBOL(__flush_icache_range);
173#else
174EXPORT_SYMBOL(flush_instruction_cache);
175EXPORT_SYMBOL(flush_icache_range);
176EXPORT_SYMBOL(flush_tlb_kernel_range);
177EXPORT_SYMBOL(flush_tlb_page);
178EXPORT_SYMBOL(_tlbie);
179#endif
180EXPORT_SYMBOL(flush_dcache_range);
181
182#ifdef CONFIG_SMP
183EXPORT_SYMBOL(smp_call_function);
184#ifdef CONFIG_PPC32
185EXPORT_SYMBOL(smp_hw_index);
186#endif
187#endif
188
189#ifdef CONFIG_ADB
190EXPORT_SYMBOL(adb_request);
191EXPORT_SYMBOL(adb_register);
192EXPORT_SYMBOL(adb_unregister);
193EXPORT_SYMBOL(adb_poll);
194EXPORT_SYMBOL(adb_try_handler_change);
195#endif /* CONFIG_ADB */
196#ifdef CONFIG_ADB_CUDA
197EXPORT_SYMBOL(cuda_request);
198EXPORT_SYMBOL(cuda_poll);
199#endif /* CONFIG_ADB_CUDA */
200#if defined(CONFIG_PPC_MULTIPLATFORM) && defined(CONFIG_PPC32)
201EXPORT_SYMBOL(_machine);
202#endif
203#ifdef CONFIG_PPC_PMAC
204EXPORT_SYMBOL(sys_ctrler);
205#endif
206#ifdef CONFIG_VT
207EXPORT_SYMBOL(kd_mksound);
208#endif
209EXPORT_SYMBOL(to_tm);
210
211#ifdef CONFIG_PPC32
212long long __ashrdi3(long long, int);
213long long __ashldi3(long long, int);
214long long __lshrdi3(long long, int);
215EXPORT_SYMBOL(__ashrdi3);
216EXPORT_SYMBOL(__ashldi3);
217EXPORT_SYMBOL(__lshrdi3);
218#endif
219
220EXPORT_SYMBOL(memcpy);
221EXPORT_SYMBOL(memset);
222EXPORT_SYMBOL(memmove);
223EXPORT_SYMBOL(memscan);
224EXPORT_SYMBOL(memcmp);
225EXPORT_SYMBOL(memchr);
226
227#if defined(CONFIG_FB_VGA16_MODULE)
228EXPORT_SYMBOL(screen_info);
229#endif
230
231#ifdef CONFIG_PPC32
232EXPORT_SYMBOL(__delay);
233EXPORT_SYMBOL(timer_interrupt);
234EXPORT_SYMBOL(irq_desc);
235EXPORT_SYMBOL(tb_ticks_per_jiffy);
236EXPORT_SYMBOL(console_drivers);
237EXPORT_SYMBOL(cacheable_memcpy);
238#endif
239
240EXPORT_SYMBOL(__up);
241EXPORT_SYMBOL(__down);
242EXPORT_SYMBOL(__down_interruptible);
243
244#ifdef CONFIG_8xx
245EXPORT_SYMBOL(cpm_install_handler);
246EXPORT_SYMBOL(cpm_free_handler);
247#endif /* CONFIG_8xx */
248#if defined(CONFIG_8xx) || defined(CONFIG_40x) || defined(CONFIG_85xx) ||\
249 defined(CONFIG_83xx)
250EXPORT_SYMBOL(__res);
251#endif
252
253#ifdef CONFIG_PPC32
254EXPORT_SYMBOL(next_mmu_context);
255EXPORT_SYMBOL(set_context);
256#endif
257
258#ifdef CONFIG_PPC_STD_MMU_32
259extern long mol_trampoline;
260EXPORT_SYMBOL(mol_trampoline); /* For MOL */
261EXPORT_SYMBOL(flush_hash_pages); /* For MOL */
262EXPORT_SYMBOL_GPL(__handle_mm_fault); /* For MOL */
263#ifdef CONFIG_SMP
264extern int mmu_hash_lock;
265EXPORT_SYMBOL(mmu_hash_lock); /* For MOL */
266#endif /* CONFIG_SMP */
267extern long *intercept_table;
268EXPORT_SYMBOL(intercept_table);
269#endif /* CONFIG_PPC_STD_MMU_32 */
270#if defined(CONFIG_40x) || defined(CONFIG_BOOKE)
271EXPORT_SYMBOL(__mtdcr);
272EXPORT_SYMBOL(__mfdcr);
273#endif
diff --git a/arch/ppc64/kernel/process.c b/arch/powerpc/kernel/process.c
index 887005358eb1..8f85dabe4df3 100644
--- a/arch/ppc64/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * linux/arch/ppc64/kernel/process.c 2 * arch/ppc/kernel/process.c
3 * 3 *
4 * Derived from "arch/i386/kernel/process.c" 4 * Derived from "arch/i386/kernel/process.c"
5 * Copyright (C) 1995 Linus Torvalds 5 * Copyright (C) 1995 Linus Torvalds
@@ -7,7 +7,7 @@
7 * Updated and modified by Cort Dougan (cort@cs.nmt.edu) and 7 * Updated and modified by Cort Dougan (cort@cs.nmt.edu) and
8 * Paul Mackerras (paulus@cs.anu.edu.au) 8 * Paul Mackerras (paulus@cs.anu.edu.au)
9 * 9 *
10 * PowerPC version 10 * PowerPC version
11 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) 11 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
12 * 12 *
13 * This program is free software; you can redistribute it and/or 13 * This program is free software; you can redistribute it and/or
@@ -17,7 +17,6 @@
17 */ 17 */
18 18
19#include <linux/config.h> 19#include <linux/config.h>
20#include <linux/module.h>
21#include <linux/errno.h> 20#include <linux/errno.h>
22#include <linux/sched.h> 21#include <linux/sched.h>
23#include <linux/kernel.h> 22#include <linux/kernel.h>
@@ -26,15 +25,17 @@
26#include <linux/smp_lock.h> 25#include <linux/smp_lock.h>
27#include <linux/stddef.h> 26#include <linux/stddef.h>
28#include <linux/unistd.h> 27#include <linux/unistd.h>
28#include <linux/ptrace.h>
29#include <linux/slab.h> 29#include <linux/slab.h>
30#include <linux/user.h> 30#include <linux/user.h>
31#include <linux/elf.h> 31#include <linux/elf.h>
32#include <linux/init.h> 32#include <linux/init.h>
33#include <linux/init_task.h>
34#include <linux/prctl.h> 33#include <linux/prctl.h>
35#include <linux/ptrace.h> 34#include <linux/init_task.h>
35#include <linux/module.h>
36#include <linux/kallsyms.h> 36#include <linux/kallsyms.h>
37#include <linux/interrupt.h> 37#include <linux/mqueue.h>
38#include <linux/hardirq.h>
38#include <linux/utsname.h> 39#include <linux/utsname.h>
39#include <linux/kprobes.h> 40#include <linux/kprobes.h>
40 41
@@ -44,21 +45,19 @@
44#include <asm/io.h> 45#include <asm/io.h>
45#include <asm/processor.h> 46#include <asm/processor.h>
46#include <asm/mmu.h> 47#include <asm/mmu.h>
47#include <asm/mmu_context.h>
48#include <asm/prom.h> 48#include <asm/prom.h>
49#include <asm/ppcdebug.h> 49#ifdef CONFIG_PPC64
50#include <asm/machdep.h>
51#include <asm/iSeries/HvCallHpt.h>
52#include <asm/cputable.h>
53#include <asm/firmware.h> 50#include <asm/firmware.h>
54#include <asm/sections.h>
55#include <asm/tlbflush.h>
56#include <asm/time.h>
57#include <asm/plpar_wrappers.h> 51#include <asm/plpar_wrappers.h>
52#include <asm/time.h>
53#endif
54
55extern unsigned long _get_SP(void);
58 56
59#ifndef CONFIG_SMP 57#ifndef CONFIG_SMP
60struct task_struct *last_task_used_math = NULL; 58struct task_struct *last_task_used_math = NULL;
61struct task_struct *last_task_used_altivec = NULL; 59struct task_struct *last_task_used_altivec = NULL;
60struct task_struct *last_task_used_spe = NULL;
62#endif 61#endif
63 62
64/* 63/*
@@ -121,7 +120,6 @@ int dump_task_fpu(struct task_struct *tsk, elf_fpregset_t *fpregs)
121} 120}
122 121
123#ifdef CONFIG_ALTIVEC 122#ifdef CONFIG_ALTIVEC
124
125void enable_kernel_altivec(void) 123void enable_kernel_altivec(void)
126{ 124{
127 WARN_ON(preemptible()); 125 WARN_ON(preemptible());
@@ -130,7 +128,7 @@ void enable_kernel_altivec(void)
130 if (current->thread.regs && (current->thread.regs->msr & MSR_VEC)) 128 if (current->thread.regs && (current->thread.regs->msr & MSR_VEC))
131 giveup_altivec(current); 129 giveup_altivec(current);
132 else 130 else
133 giveup_altivec(NULL); /* just enables FP for kernel */ 131 giveup_altivec(NULL); /* just enable AltiVec for kernel - force */
134#else 132#else
135 giveup_altivec(last_task_used_altivec); 133 giveup_altivec(last_task_used_altivec);
136#endif /* CONFIG_SMP */ 134#endif /* CONFIG_SMP */
@@ -161,9 +159,48 @@ int dump_task_altivec(struct pt_regs *regs, elf_vrregset_t *vrregs)
161 memcpy(vrregs, &current->thread.vr[0], sizeof(*vrregs)); 159 memcpy(vrregs, &current->thread.vr[0], sizeof(*vrregs));
162 return 1; 160 return 1;
163} 161}
164
165#endif /* CONFIG_ALTIVEC */ 162#endif /* CONFIG_ALTIVEC */
166 163
164#ifdef CONFIG_SPE
165
166void enable_kernel_spe(void)
167{
168 WARN_ON(preemptible());
169
170#ifdef CONFIG_SMP
171 if (current->thread.regs && (current->thread.regs->msr & MSR_SPE))
172 giveup_spe(current);
173 else
174 giveup_spe(NULL); /* just enable SPE for kernel - force */
175#else
176 giveup_spe(last_task_used_spe);
177#endif /* __SMP __ */
178}
179EXPORT_SYMBOL(enable_kernel_spe);
180
181void flush_spe_to_thread(struct task_struct *tsk)
182{
183 if (tsk->thread.regs) {
184 preempt_disable();
185 if (tsk->thread.regs->msr & MSR_SPE) {
186#ifdef CONFIG_SMP
187 BUG_ON(tsk != current);
188#endif
189 giveup_spe(current);
190 }
191 preempt_enable();
192 }
193}
194
195int dump_spe(struct pt_regs *regs, elf_vrregset_t *evrregs)
196{
197 flush_spe_to_thread(current);
198 /* We copy u32 evr[32] + u64 acc + u32 spefscr -> 35 */
199 memcpy(evrregs, &current->thread.evr[0], sizeof(u32) * 35);
200 return 1;
201}
202#endif /* CONFIG_SPE */
203
167static void set_dabr_spr(unsigned long val) 204static void set_dabr_spr(unsigned long val)
168{ 205{
169 mtspr(SPRN_DABR, val); 206 mtspr(SPRN_DABR, val);
@@ -173,24 +210,27 @@ int set_dabr(unsigned long dabr)
173{ 210{
174 int ret = 0; 211 int ret = 0;
175 212
213#ifdef CONFIG_PPC64
176 if (firmware_has_feature(FW_FEATURE_XDABR)) { 214 if (firmware_has_feature(FW_FEATURE_XDABR)) {
177 /* We want to catch accesses from kernel and userspace */ 215 /* We want to catch accesses from kernel and userspace */
178 unsigned long flags = H_DABRX_KERNEL|H_DABRX_USER; 216 unsigned long flags = H_DABRX_KERNEL|H_DABRX_USER;
179 ret = plpar_set_xdabr(dabr, flags); 217 ret = plpar_set_xdabr(dabr, flags);
180 } else if (firmware_has_feature(FW_FEATURE_DABR)) { 218 } else if (firmware_has_feature(FW_FEATURE_DABR)) {
181 ret = plpar_set_dabr(dabr); 219 ret = plpar_set_dabr(dabr);
182 } else { 220 } else
221#endif
183 set_dabr_spr(dabr); 222 set_dabr_spr(dabr);
184 }
185 223
186 return ret; 224 return ret;
187} 225}
188 226
227#ifdef CONFIG_PPC64
189DEFINE_PER_CPU(struct cpu_usage, cpu_usage_array); 228DEFINE_PER_CPU(struct cpu_usage, cpu_usage_array);
190static DEFINE_PER_CPU(unsigned long, current_dabr); 229static DEFINE_PER_CPU(unsigned long, current_dabr);
230#endif
191 231
192struct task_struct *__switch_to(struct task_struct *prev, 232struct task_struct *__switch_to(struct task_struct *prev,
193 struct task_struct *new) 233 struct task_struct *new)
194{ 234{
195 struct thread_struct *new_thread, *old_thread; 235 struct thread_struct *new_thread, *old_thread;
196 unsigned long flags; 236 unsigned long flags;
@@ -200,7 +240,7 @@ struct task_struct *__switch_to(struct task_struct *prev,
200 /* avoid complexity of lazy save/restore of fpu 240 /* avoid complexity of lazy save/restore of fpu
201 * by just saving it every time we switch out if 241 * by just saving it every time we switch out if
202 * this task used the fpu during the last quantum. 242 * this task used the fpu during the last quantum.
203 * 243 *
204 * If it tries to use the fpu again, it'll trap and 244 * If it tries to use the fpu again, it'll trap and
205 * reload its fp regs. So we don't have to do a restore 245 * reload its fp regs. So we don't have to do a restore
206 * every switch, just a save. 246 * every switch, just a save.
@@ -209,31 +249,65 @@ struct task_struct *__switch_to(struct task_struct *prev,
209 if (prev->thread.regs && (prev->thread.regs->msr & MSR_FP)) 249 if (prev->thread.regs && (prev->thread.regs->msr & MSR_FP))
210 giveup_fpu(prev); 250 giveup_fpu(prev);
211#ifdef CONFIG_ALTIVEC 251#ifdef CONFIG_ALTIVEC
252 /*
253 * If the previous thread used altivec in the last quantum
254 * (thus changing altivec regs) then save them.
255 * We used to check the VRSAVE register but not all apps
256 * set it, so we don't rely on it now (and in fact we need
257 * to save & restore VSCR even if VRSAVE == 0). -- paulus
258 *
259 * On SMP we always save/restore altivec regs just to avoid the
260 * complexity of changing processors.
261 * -- Cort
262 */
212 if (prev->thread.regs && (prev->thread.regs->msr & MSR_VEC)) 263 if (prev->thread.regs && (prev->thread.regs->msr & MSR_VEC))
213 giveup_altivec(prev); 264 giveup_altivec(prev);
214#endif /* CONFIG_ALTIVEC */ 265#endif /* CONFIG_ALTIVEC */
215#endif /* CONFIG_SMP */ 266#ifdef CONFIG_SPE
267 /*
268 * If the previous thread used spe in the last quantum
269 * (thus changing spe regs) then save them.
270 *
271 * On SMP we always save/restore spe regs just to avoid the
272 * complexity of changing processors.
273 */
274 if ((prev->thread.regs && (prev->thread.regs->msr & MSR_SPE)))
275 giveup_spe(prev);
276#endif /* CONFIG_SPE */
216 277
217#if defined(CONFIG_ALTIVEC) && !defined(CONFIG_SMP) 278#else /* CONFIG_SMP */
279#ifdef CONFIG_ALTIVEC
218 /* Avoid the trap. On smp this this never happens since 280 /* Avoid the trap. On smp this this never happens since
219 * we don't set last_task_used_altivec -- Cort 281 * we don't set last_task_used_altivec -- Cort
220 */ 282 */
221 if (new->thread.regs && last_task_used_altivec == new) 283 if (new->thread.regs && last_task_used_altivec == new)
222 new->thread.regs->msr |= MSR_VEC; 284 new->thread.regs->msr |= MSR_VEC;
223#endif /* CONFIG_ALTIVEC */ 285#endif /* CONFIG_ALTIVEC */
286#ifdef CONFIG_SPE
287 /* Avoid the trap. On smp this this never happens since
288 * we don't set last_task_used_spe
289 */
290 if (new->thread.regs && last_task_used_spe == new)
291 new->thread.regs->msr |= MSR_SPE;
292#endif /* CONFIG_SPE */
224 293
294#endif /* CONFIG_SMP */
295
296#ifdef CONFIG_PPC64 /* for now */
225 if (unlikely(__get_cpu_var(current_dabr) != new->thread.dabr)) { 297 if (unlikely(__get_cpu_var(current_dabr) != new->thread.dabr)) {
226 set_dabr(new->thread.dabr); 298 set_dabr(new->thread.dabr);
227 __get_cpu_var(current_dabr) = new->thread.dabr; 299 __get_cpu_var(current_dabr) = new->thread.dabr;
228 } 300 }
229 301
230 flush_tlb_pending(); 302 flush_tlb_pending();
303#endif
231 304
232 new_thread = &new->thread; 305 new_thread = &new->thread;
233 old_thread = &current->thread; 306 old_thread = &current->thread;
234 307
235 /* Collect purr utilization data per process and per processor 308#ifdef CONFIG_PPC64
236 * wise purr is nothing but processor time base 309 /*
310 * Collect processor utilization data per process
237 */ 311 */
238 if (firmware_has_feature(FW_FEATURE_SPLPAR)) { 312 if (firmware_has_feature(FW_FEATURE_SPLPAR)) {
239 struct cpu_usage *cu = &__get_cpu_var(cpu_usage_array); 313 struct cpu_usage *cu = &__get_cpu_var(cpu_usage_array);
@@ -243,6 +317,7 @@ struct task_struct *__switch_to(struct task_struct *prev,
243 old_thread->accum_tb += (current_tb - start_tb); 317 old_thread->accum_tb += (current_tb - start_tb);
244 new_thread->start_tb = current_tb; 318 new_thread->start_tb = current_tb;
245 } 319 }
320#endif
246 321
247 local_irq_save(flags); 322 local_irq_save(flags);
248 last = _switch(old_thread, new_thread); 323 last = _switch(old_thread, new_thread);
@@ -254,6 +329,13 @@ struct task_struct *__switch_to(struct task_struct *prev,
254 329
255static int instructions_to_print = 16; 330static int instructions_to_print = 16;
256 331
332#ifdef CONFIG_PPC64
333#define BAD_PC(pc) ((REGION_ID(pc) != KERNEL_REGION_ID) && \
334 (REGION_ID(pc) != VMALLOC_REGION_ID))
335#else
336#define BAD_PC(pc) ((pc) < KERNELBASE)
337#endif
338
257static void show_instructions(struct pt_regs *regs) 339static void show_instructions(struct pt_regs *regs)
258{ 340{
259 int i; 341 int i;
@@ -268,9 +350,7 @@ static void show_instructions(struct pt_regs *regs)
268 if (!(i % 8)) 350 if (!(i % 8))
269 printk("\n"); 351 printk("\n");
270 352
271 if (((REGION_ID(pc) != KERNEL_REGION_ID) && 353 if (BAD_PC(pc) || __get_user(instr, (unsigned int *)pc)) {
272 (REGION_ID(pc) != VMALLOC_REGION_ID)) ||
273 __get_user(instr, (unsigned int *)pc)) {
274 printk("XXXXXXXX "); 354 printk("XXXXXXXX ");
275 } else { 355 } else {
276 if (regs->nip == pc) 356 if (regs->nip == pc)
@@ -285,50 +365,82 @@ static void show_instructions(struct pt_regs *regs)
285 printk("\n"); 365 printk("\n");
286} 366}
287 367
368static struct regbit {
369 unsigned long bit;
370 const char *name;
371} msr_bits[] = {
372 {MSR_EE, "EE"},
373 {MSR_PR, "PR"},
374 {MSR_FP, "FP"},
375 {MSR_ME, "ME"},
376 {MSR_IR, "IR"},
377 {MSR_DR, "DR"},
378 {0, NULL}
379};
380
381static void printbits(unsigned long val, struct regbit *bits)
382{
383 const char *sep = "";
384
385 printk("<");
386 for (; bits->bit; ++bits)
387 if (val & bits->bit) {
388 printk("%s%s", sep, bits->name);
389 sep = ",";
390 }
391 printk(">");
392}
393
394#ifdef CONFIG_PPC64
395#define REG "%016lX"
396#define REGS_PER_LINE 4
397#define LAST_VOLATILE 13
398#else
399#define REG "%08lX"
400#define REGS_PER_LINE 8
401#define LAST_VOLATILE 12
402#endif
403
288void show_regs(struct pt_regs * regs) 404void show_regs(struct pt_regs * regs)
289{ 405{
290 int i; 406 int i, trap;
291 unsigned long trap;
292 407
293 printk("NIP: %016lX XER: %08X LR: %016lX CTR: %016lX\n", 408 printk("NIP: "REG" LR: "REG" CTR: "REG"\n",
294 regs->nip, (unsigned int)regs->xer, regs->link, regs->ctr); 409 regs->nip, regs->link, regs->ctr);
295 printk("REGS: %p TRAP: %04lx %s (%s)\n", 410 printk("REGS: %p TRAP: %04lx %s (%s)\n",
296 regs, regs->trap, print_tainted(), system_utsname.release); 411 regs, regs->trap, print_tainted(), system_utsname.release);
297 printk("MSR: %016lx EE: %01x PR: %01x FP: %01x ME: %01x " 412 printk("MSR: "REG" ", regs->msr);
298 "IR/DR: %01x%01x CR: %08X\n", 413 printbits(regs->msr, msr_bits);
299 regs->msr, regs->msr&MSR_EE ? 1 : 0, regs->msr&MSR_PR ? 1 : 0, 414 printk(" CR: %08lX XER: %08lX\n", regs->ccr, regs->xer);
300 regs->msr & MSR_FP ? 1 : 0,regs->msr&MSR_ME ? 1 : 0,
301 regs->msr&MSR_IR ? 1 : 0,
302 regs->msr&MSR_DR ? 1 : 0,
303 (unsigned int)regs->ccr);
304 trap = TRAP(regs); 415 trap = TRAP(regs);
305 printk("DAR: %016lx DSISR: %016lx\n", regs->dar, regs->dsisr); 416 if (trap == 0x300 || trap == 0x600)
306 printk("TASK: %p[%d] '%s' THREAD: %p", 417 printk("DAR: "REG", DSISR: "REG"\n", regs->dar, regs->dsisr);
418 printk("TASK = %p[%d] '%s' THREAD: %p",
307 current, current->pid, current->comm, current->thread_info); 419 current, current->pid, current->comm, current->thread_info);
308 420
309#ifdef CONFIG_SMP 421#ifdef CONFIG_SMP
310 printk(" CPU: %d", smp_processor_id()); 422 printk(" CPU: %d", smp_processor_id());
311#endif /* CONFIG_SMP */ 423#endif /* CONFIG_SMP */
312 424
313 for (i = 0; i < 32; i++) { 425 for (i = 0; i < 32; i++) {
314 if ((i % 4) == 0) { 426 if ((i % REGS_PER_LINE) == 0)
315 printk("\n" KERN_INFO "GPR%02d: ", i); 427 printk("\n" KERN_INFO "GPR%02d: ", i);
316 } 428 printk(REG " ", regs->gpr[i]);
317 429 if (i == LAST_VOLATILE && !FULL_REGS(regs))
318 printk("%016lX ", regs->gpr[i]);
319 if (i == 13 && !FULL_REGS(regs))
320 break; 430 break;
321 } 431 }
322 printk("\n"); 432 printk("\n");
433#ifdef CONFIG_KALLSYMS
323 /* 434 /*
324 * Lookup NIP late so we have the best change of getting the 435 * Lookup NIP late so we have the best change of getting the
325 * above info out without failing 436 * above info out without failing
326 */ 437 */
327 printk("NIP [%016lx] ", regs->nip); 438 printk("NIP ["REG"] ", regs->nip);
328 print_symbol("%s\n", regs->nip); 439 print_symbol("%s\n", regs->nip);
329 printk("LR [%016lx] ", regs->link); 440 printk("LR ["REG"] ", regs->link);
330 print_symbol("%s\n", regs->link); 441 print_symbol("%s\n", regs->link);
331 show_stack(current, (unsigned long *)regs->gpr[1]); 442#endif
443 show_stack(current, (unsigned long *) regs->gpr[1]);
332 if (!user_mode(regs)) 444 if (!user_mode(regs))
333 show_instructions(regs); 445 show_instructions(regs);
334} 446}
@@ -344,16 +456,22 @@ void exit_thread(void)
344 if (last_task_used_altivec == current) 456 if (last_task_used_altivec == current)
345 last_task_used_altivec = NULL; 457 last_task_used_altivec = NULL;
346#endif /* CONFIG_ALTIVEC */ 458#endif /* CONFIG_ALTIVEC */
459#ifdef CONFIG_SPE
460 if (last_task_used_spe == current)
461 last_task_used_spe = NULL;
462#endif
347#endif /* CONFIG_SMP */ 463#endif /* CONFIG_SMP */
348} 464}
349 465
350void flush_thread(void) 466void flush_thread(void)
351{ 467{
468#ifdef CONFIG_PPC64
352 struct thread_info *t = current_thread_info(); 469 struct thread_info *t = current_thread_info();
353 470
354 kprobe_flush_task(current);
355 if (t->flags & _TIF_ABI_PENDING) 471 if (t->flags & _TIF_ABI_PENDING)
356 t->flags ^= (_TIF_ABI_PENDING | _TIF_32BIT); 472 t->flags ^= (_TIF_ABI_PENDING | _TIF_32BIT);
473#endif
474 kprobe_flush_task(current);
357 475
358#ifndef CONFIG_SMP 476#ifndef CONFIG_SMP
359 if (last_task_used_math == current) 477 if (last_task_used_math == current)
@@ -362,12 +480,18 @@ void flush_thread(void)
362 if (last_task_used_altivec == current) 480 if (last_task_used_altivec == current)
363 last_task_used_altivec = NULL; 481 last_task_used_altivec = NULL;
364#endif /* CONFIG_ALTIVEC */ 482#endif /* CONFIG_ALTIVEC */
483#ifdef CONFIG_SPE
484 if (last_task_used_spe == current)
485 last_task_used_spe = NULL;
486#endif
365#endif /* CONFIG_SMP */ 487#endif /* CONFIG_SMP */
366 488
489#ifdef CONFIG_PPC64 /* for now */
367 if (current->thread.dabr) { 490 if (current->thread.dabr) {
368 current->thread.dabr = 0; 491 current->thread.dabr = 0;
369 set_dabr(0); 492 set_dabr(0);
370 } 493 }
494#endif
371} 495}
372 496
373void 497void
@@ -375,7 +499,6 @@ release_thread(struct task_struct *t)
375{ 499{
376} 500}
377 501
378
379/* 502/*
380 * This gets called before we allocate a new thread and copy 503 * This gets called before we allocate a new thread and copy
381 * the current task into it. 504 * the current task into it.
@@ -384,36 +507,44 @@ void prepare_to_copy(struct task_struct *tsk)
384{ 507{
385 flush_fp_to_thread(current); 508 flush_fp_to_thread(current);
386 flush_altivec_to_thread(current); 509 flush_altivec_to_thread(current);
510 flush_spe_to_thread(current);
387} 511}
388 512
389/* 513/*
390 * Copy a thread.. 514 * Copy a thread..
391 */ 515 */
392int 516int copy_thread(int nr, unsigned long clone_flags, unsigned long usp,
393copy_thread(int nr, unsigned long clone_flags, unsigned long usp, 517 unsigned long unused, struct task_struct *p,
394 unsigned long unused, struct task_struct *p, struct pt_regs *regs) 518 struct pt_regs *regs)
395{ 519{
396 struct pt_regs *childregs, *kregs; 520 struct pt_regs *childregs, *kregs;
397 extern void ret_from_fork(void); 521 extern void ret_from_fork(void);
398 unsigned long sp = (unsigned long)p->thread_info + THREAD_SIZE; 522 unsigned long sp = (unsigned long)p->thread_info + THREAD_SIZE;
399 523
524 CHECK_FULL_REGS(regs);
400 /* Copy registers */ 525 /* Copy registers */
401 sp -= sizeof(struct pt_regs); 526 sp -= sizeof(struct pt_regs);
402 childregs = (struct pt_regs *) sp; 527 childregs = (struct pt_regs *) sp;
403 *childregs = *regs; 528 *childregs = *regs;
404 if ((childregs->msr & MSR_PR) == 0) { 529 if ((childregs->msr & MSR_PR) == 0) {
405 /* for kernel thread, set stackptr in new task */ 530 /* for kernel thread, set `current' and stackptr in new task */
406 childregs->gpr[1] = sp + sizeof(struct pt_regs); 531 childregs->gpr[1] = sp + sizeof(struct pt_regs);
407 p->thread.regs = NULL; /* no user register state */ 532#ifdef CONFIG_PPC32
533 childregs->gpr[2] = (unsigned long) p;
534#else
408 clear_ti_thread_flag(p->thread_info, TIF_32BIT); 535 clear_ti_thread_flag(p->thread_info, TIF_32BIT);
536#endif
537 p->thread.regs = NULL; /* no user register state */
409 } else { 538 } else {
410 childregs->gpr[1] = usp; 539 childregs->gpr[1] = usp;
411 p->thread.regs = childregs; 540 p->thread.regs = childregs;
412 if (clone_flags & CLONE_SETTLS) { 541 if (clone_flags & CLONE_SETTLS) {
413 if (test_thread_flag(TIF_32BIT)) 542#ifdef CONFIG_PPC64
414 childregs->gpr[2] = childregs->gpr[6]; 543 if (!test_thread_flag(TIF_32BIT))
415 else
416 childregs->gpr[13] = childregs->gpr[6]; 544 childregs->gpr[13] = childregs->gpr[6];
545 else
546#endif
547 childregs->gpr[2] = childregs->gpr[6];
417 } 548 }
418 } 549 }
419 childregs->gpr[3] = 0; /* Result from fork() */ 550 childregs->gpr[3] = 0; /* Result from fork() */
@@ -431,6 +562,8 @@ copy_thread(int nr, unsigned long clone_flags, unsigned long usp,
431 kregs = (struct pt_regs *) sp; 562 kregs = (struct pt_regs *) sp;
432 sp -= STACK_FRAME_OVERHEAD; 563 sp -= STACK_FRAME_OVERHEAD;
433 p->thread.ksp = sp; 564 p->thread.ksp = sp;
565
566#ifdef CONFIG_PPC64
434 if (cpu_has_feature(CPU_FTR_SLB)) { 567 if (cpu_has_feature(CPU_FTR_SLB)) {
435 unsigned long sp_vsid = get_kernel_vsid(sp); 568 unsigned long sp_vsid = get_kernel_vsid(sp);
436 569
@@ -449,6 +582,10 @@ copy_thread(int nr, unsigned long clone_flags, unsigned long usp,
449 * function. 582 * function.
450 */ 583 */
451 kregs->nip = *((unsigned long *)ret_from_fork); 584 kregs->nip = *((unsigned long *)ret_from_fork);
585#else
586 kregs->nip = (unsigned long)ret_from_fork;
587 p->thread.last_syscall = -1;
588#endif
452 589
453 return 0; 590 return 0;
454} 591}
@@ -456,30 +593,17 @@ copy_thread(int nr, unsigned long clone_flags, unsigned long usp,
456/* 593/*
457 * Set up a thread for executing a new program 594 * Set up a thread for executing a new program
458 */ 595 */
459void start_thread(struct pt_regs *regs, unsigned long fdptr, unsigned long sp) 596void start_thread(struct pt_regs *regs, unsigned long start, unsigned long sp)
460{ 597{
461 unsigned long entry, toc, load_addr = regs->gpr[2]; 598#ifdef CONFIG_PPC64
599 unsigned long load_addr = regs->gpr[2]; /* saved by ELF_PLAT_INIT */
600#endif
462 601
463 /* fdptr is a relocated pointer to the function descriptor for
464 * the elf _start routine. The first entry in the function
465 * descriptor is the entry address of _start and the second
466 * entry is the TOC value we need to use.
467 */
468 set_fs(USER_DS); 602 set_fs(USER_DS);
469 __get_user(entry, (unsigned long __user *)fdptr);
470 __get_user(toc, (unsigned long __user *)fdptr+1);
471
472 /* Check whether the e_entry function descriptor entries
473 * need to be relocated before we can use them.
474 */
475 if (load_addr != 0) {
476 entry += load_addr;
477 toc += load_addr;
478 }
479 603
480 /* 604 /*
481 * If we exec out of a kernel thread then thread.regs will not be 605 * If we exec out of a kernel thread then thread.regs will not be
482 * set. Do it now. 606 * set. Do it now.
483 */ 607 */
484 if (!current->thread.regs) { 608 if (!current->thread.regs) {
485 unsigned long childregs = (unsigned long)current->thread_info + 609 unsigned long childregs = (unsigned long)current->thread_info +
@@ -488,36 +612,101 @@ void start_thread(struct pt_regs *regs, unsigned long fdptr, unsigned long sp)
488 current->thread.regs = (struct pt_regs *)childregs; 612 current->thread.regs = (struct pt_regs *)childregs;
489 } 613 }
490 614
491 regs->nip = entry; 615 memset(regs->gpr, 0, sizeof(regs->gpr));
616 regs->ctr = 0;
617 regs->link = 0;
618 regs->xer = 0;
619 regs->ccr = 0;
492 regs->gpr[1] = sp; 620 regs->gpr[1] = sp;
493 regs->gpr[2] = toc; 621
494 regs->msr = MSR_USER64; 622#ifdef CONFIG_PPC32
623 regs->mq = 0;
624 regs->nip = start;
625 regs->msr = MSR_USER;
626#else
627 if (!test_thread_flag(TIF_32BIT)) {
628 unsigned long entry, toc;
629
630 /* start is a relocated pointer to the function descriptor for
631 * the elf _start routine. The first entry in the function
632 * descriptor is the entry address of _start and the second
633 * entry is the TOC value we need to use.
634 */
635 __get_user(entry, (unsigned long __user *)start);
636 __get_user(toc, (unsigned long __user *)start+1);
637
638 /* Check whether the e_entry function descriptor entries
639 * need to be relocated before we can use them.
640 */
641 if (load_addr != 0) {
642 entry += load_addr;
643 toc += load_addr;
644 }
645 regs->nip = entry;
646 regs->gpr[2] = toc;
647 regs->msr = MSR_USER64;
648 } else {
649 regs->nip = start;
650 regs->gpr[2] = 0;
651 regs->msr = MSR_USER32;
652 }
653#endif
654
495#ifndef CONFIG_SMP 655#ifndef CONFIG_SMP
496 if (last_task_used_math == current) 656 if (last_task_used_math == current)
497 last_task_used_math = 0; 657 last_task_used_math = NULL;
498#endif /* CONFIG_SMP */
499 memset(current->thread.fpr, 0, sizeof(current->thread.fpr));
500 current->thread.fpscr = 0;
501#ifdef CONFIG_ALTIVEC 658#ifdef CONFIG_ALTIVEC
502#ifndef CONFIG_SMP
503 if (last_task_used_altivec == current) 659 if (last_task_used_altivec == current)
504 last_task_used_altivec = 0; 660 last_task_used_altivec = NULL;
661#endif
662#ifdef CONFIG_SPE
663 if (last_task_used_spe == current)
664 last_task_used_spe = NULL;
665#endif
505#endif /* CONFIG_SMP */ 666#endif /* CONFIG_SMP */
667 memset(current->thread.fpr, 0, sizeof(current->thread.fpr));
668 current->thread.fpscr.val = 0;
669#ifdef CONFIG_ALTIVEC
506 memset(current->thread.vr, 0, sizeof(current->thread.vr)); 670 memset(current->thread.vr, 0, sizeof(current->thread.vr));
507 current->thread.vscr.u[0] = 0; 671 memset(&current->thread.vscr, 0, sizeof(current->thread.vscr));
508 current->thread.vscr.u[1] = 0;
509 current->thread.vscr.u[2] = 0;
510 current->thread.vscr.u[3] = 0x00010000; /* Java mode disabled */ 672 current->thread.vscr.u[3] = 0x00010000; /* Java mode disabled */
511 current->thread.vrsave = 0; 673 current->thread.vrsave = 0;
512 current->thread.used_vr = 0; 674 current->thread.used_vr = 0;
513#endif /* CONFIG_ALTIVEC */ 675#endif /* CONFIG_ALTIVEC */
676#ifdef CONFIG_SPE
677 memset(current->thread.evr, 0, sizeof(current->thread.evr));
678 current->thread.acc = 0;
679 current->thread.spefscr = 0;
680 current->thread.used_spe = 0;
681#endif /* CONFIG_SPE */
514} 682}
515EXPORT_SYMBOL(start_thread); 683
684#define PR_FP_ALL_EXCEPT (PR_FP_EXC_DIV | PR_FP_EXC_OVF | PR_FP_EXC_UND \
685 | PR_FP_EXC_RES | PR_FP_EXC_INV)
516 686
517int set_fpexc_mode(struct task_struct *tsk, unsigned int val) 687int set_fpexc_mode(struct task_struct *tsk, unsigned int val)
518{ 688{
519 struct pt_regs *regs = tsk->thread.regs; 689 struct pt_regs *regs = tsk->thread.regs;
520 690
691 /* This is a bit hairy. If we are an SPE enabled processor
692 * (have embedded fp) we store the IEEE exception enable flags in
693 * fpexc_mode. fpexc_mode is also used for setting FP exception
694 * mode (asyn, precise, disabled) for 'Classic' FP. */
695 if (val & PR_FP_EXC_SW_ENABLE) {
696#ifdef CONFIG_SPE
697 tsk->thread.fpexc_mode = val &
698 (PR_FP_EXC_SW_ENABLE | PR_FP_ALL_EXCEPT);
699 return 0;
700#else
701 return -EINVAL;
702#endif
703 }
704
705 /* on a CONFIG_SPE this does not hurt us. The bits that
706 * __pack_fe01 use do not overlap with bits used for
707 * PR_FP_EXC_SW_ENABLE. Additionally, the MSR[FE0,FE1] bits
708 * on CONFIG_SPE implementations are reserved so writing to
709 * them does not change anything */
521 if (val > PR_FP_EXC_PRECISE) 710 if (val > PR_FP_EXC_PRECISE)
522 return -EINVAL; 711 return -EINVAL;
523 tsk->thread.fpexc_mode = __pack_fe01(val); 712 tsk->thread.fpexc_mode = __pack_fe01(val);
@@ -531,38 +720,41 @@ int get_fpexc_mode(struct task_struct *tsk, unsigned long adr)
531{ 720{
532 unsigned int val; 721 unsigned int val;
533 722
534 val = __unpack_fe01(tsk->thread.fpexc_mode); 723 if (tsk->thread.fpexc_mode & PR_FP_EXC_SW_ENABLE)
724#ifdef CONFIG_SPE
725 val = tsk->thread.fpexc_mode;
726#else
727 return -EINVAL;
728#endif
729 else
730 val = __unpack_fe01(tsk->thread.fpexc_mode);
535 return put_user(val, (unsigned int __user *) adr); 731 return put_user(val, (unsigned int __user *) adr);
536} 732}
537 733
538int sys_clone(unsigned long clone_flags, unsigned long p2, unsigned long p3, 734#define TRUNC_PTR(x) ((typeof(x))(((unsigned long)(x)) & 0xffffffff))
539 unsigned long p4, unsigned long p5, unsigned long p6, 735
736int sys_clone(unsigned long clone_flags, unsigned long usp,
737 int __user *parent_tidp, void __user *child_threadptr,
738 int __user *child_tidp, int p6,
540 struct pt_regs *regs) 739 struct pt_regs *regs)
541{ 740{
542 unsigned long parent_tidptr = 0; 741 CHECK_FULL_REGS(regs);
543 unsigned long child_tidptr = 0; 742 if (usp == 0)
544 743 usp = regs->gpr[1]; /* stack pointer for child */
545 if (p2 == 0) 744#ifdef CONFIG_PPC64
546 p2 = regs->gpr[1]; /* stack pointer for child */ 745 if (test_thread_flag(TIF_32BIT)) {
547 746 parent_tidp = TRUNC_PTR(parent_tidp);
548 if (clone_flags & (CLONE_PARENT_SETTID | CLONE_CHILD_SETTID | 747 child_tidp = TRUNC_PTR(child_tidp);
549 CLONE_CHILD_CLEARTID)) {
550 parent_tidptr = p3;
551 child_tidptr = p5;
552 if (test_thread_flag(TIF_32BIT)) {
553 parent_tidptr &= 0xffffffff;
554 child_tidptr &= 0xffffffff;
555 }
556 } 748 }
557 749#endif
558 return do_fork(clone_flags, p2, regs, 0, 750 return do_fork(clone_flags, usp, regs, 0, parent_tidp, child_tidp);
559 (int __user *)parent_tidptr, (int __user *)child_tidptr);
560} 751}
561 752
562int sys_fork(unsigned long p1, unsigned long p2, unsigned long p3, 753int sys_fork(unsigned long p1, unsigned long p2, unsigned long p3,
563 unsigned long p4, unsigned long p5, unsigned long p6, 754 unsigned long p4, unsigned long p5, unsigned long p6,
564 struct pt_regs *regs) 755 struct pt_regs *regs)
565{ 756{
757 CHECK_FULL_REGS(regs);
566 return do_fork(SIGCHLD, regs->gpr[1], regs, 0, NULL, NULL); 758 return do_fork(SIGCHLD, regs->gpr[1], regs, 0, NULL, NULL);
567} 759}
568 760
@@ -570,8 +762,9 @@ int sys_vfork(unsigned long p1, unsigned long p2, unsigned long p3,
570 unsigned long p4, unsigned long p5, unsigned long p6, 762 unsigned long p4, unsigned long p5, unsigned long p6,
571 struct pt_regs *regs) 763 struct pt_regs *regs)
572{ 764{
573 return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs->gpr[1], regs, 0, 765 CHECK_FULL_REGS(regs);
574 NULL, NULL); 766 return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs->gpr[1],
767 regs, 0, NULL, NULL);
575} 768}
576 769
577int sys_execve(unsigned long a0, unsigned long a1, unsigned long a2, 770int sys_execve(unsigned long a0, unsigned long a1, unsigned long a2,
@@ -579,30 +772,27 @@ int sys_execve(unsigned long a0, unsigned long a1, unsigned long a2,
579 struct pt_regs *regs) 772 struct pt_regs *regs)
580{ 773{
581 int error; 774 int error;
582 char * filename; 775 char *filename;
583 776
584 filename = getname((char __user *) a0); 777 filename = getname((char __user *) a0);
585 error = PTR_ERR(filename); 778 error = PTR_ERR(filename);
586 if (IS_ERR(filename)) 779 if (IS_ERR(filename))
587 goto out; 780 goto out;
588 flush_fp_to_thread(current); 781 flush_fp_to_thread(current);
589 flush_altivec_to_thread(current); 782 flush_altivec_to_thread(current);
783 flush_spe_to_thread(current);
590 error = do_execve(filename, (char __user * __user *) a1, 784 error = do_execve(filename, (char __user * __user *) a1,
591 (char __user * __user *) a2, regs); 785 (char __user * __user *) a2, regs);
592
593 if (error == 0) { 786 if (error == 0) {
594 task_lock(current); 787 task_lock(current);
595 current->ptrace &= ~PT_DTRACE; 788 current->ptrace &= ~PT_DTRACE;
596 task_unlock(current); 789 task_unlock(current);
597 } 790 }
598 putname(filename); 791 putname(filename);
599
600out: 792out:
601 return error; 793 return error;
602} 794}
603 795
604static int kstack_depth_to_print = 64;
605
606static int validate_sp(unsigned long sp, struct task_struct *p, 796static int validate_sp(unsigned long sp, struct task_struct *p,
607 unsigned long nbytes) 797 unsigned long nbytes)
608{ 798{
@@ -627,6 +817,20 @@ static int validate_sp(unsigned long sp, struct task_struct *p,
627 return 0; 817 return 0;
628} 818}
629 819
820#ifdef CONFIG_PPC64
821#define MIN_STACK_FRAME 112 /* same as STACK_FRAME_OVERHEAD, in fact */
822#define FRAME_LR_SAVE 2
823#define INT_FRAME_SIZE (sizeof(struct pt_regs) + STACK_FRAME_OVERHEAD + 288)
824#define REGS_MARKER 0x7265677368657265ul
825#define FRAME_MARKER 12
826#else
827#define MIN_STACK_FRAME 16
828#define FRAME_LR_SAVE 1
829#define INT_FRAME_SIZE (sizeof(struct pt_regs) + STACK_FRAME_OVERHEAD)
830#define REGS_MARKER 0x72656773ul
831#define FRAME_MARKER 2
832#endif
833
630unsigned long get_wchan(struct task_struct *p) 834unsigned long get_wchan(struct task_struct *p)
631{ 835{
632 unsigned long ip, sp; 836 unsigned long ip, sp;
@@ -636,15 +840,15 @@ unsigned long get_wchan(struct task_struct *p)
636 return 0; 840 return 0;
637 841
638 sp = p->thread.ksp; 842 sp = p->thread.ksp;
639 if (!validate_sp(sp, p, 112)) 843 if (!validate_sp(sp, p, MIN_STACK_FRAME))
640 return 0; 844 return 0;
641 845
642 do { 846 do {
643 sp = *(unsigned long *)sp; 847 sp = *(unsigned long *)sp;
644 if (!validate_sp(sp, p, 112)) 848 if (!validate_sp(sp, p, MIN_STACK_FRAME))
645 return 0; 849 return 0;
646 if (count > 0) { 850 if (count > 0) {
647 ip = *(unsigned long *)(sp + 16); 851 ip = ((unsigned long *)sp)[FRAME_LR_SAVE];
648 if (!in_sched_functions(ip)) 852 if (!in_sched_functions(ip))
649 return ip; 853 return ip;
650 } 854 }
@@ -653,33 +857,35 @@ unsigned long get_wchan(struct task_struct *p)
653} 857}
654EXPORT_SYMBOL(get_wchan); 858EXPORT_SYMBOL(get_wchan);
655 859
656void show_stack(struct task_struct *p, unsigned long *_sp) 860static int kstack_depth_to_print = 64;
861
862void show_stack(struct task_struct *tsk, unsigned long *stack)
657{ 863{
658 unsigned long ip, newsp, lr; 864 unsigned long sp, ip, lr, newsp;
659 int count = 0; 865 int count = 0;
660 unsigned long sp = (unsigned long)_sp;
661 int firstframe = 1; 866 int firstframe = 1;
662 867
868 sp = (unsigned long) stack;
869 if (tsk == NULL)
870 tsk = current;
663 if (sp == 0) { 871 if (sp == 0) {
664 if (p) { 872 if (tsk == current)
665 sp = p->thread.ksp; 873 asm("mr %0,1" : "=r" (sp));
666 } else { 874 else
667 sp = __get_SP(); 875 sp = tsk->thread.ksp;
668 p = current;
669 }
670 } 876 }
671 877
672 lr = 0; 878 lr = 0;
673 printk("Call Trace:\n"); 879 printk("Call Trace:\n");
674 do { 880 do {
675 if (!validate_sp(sp, p, 112)) 881 if (!validate_sp(sp, tsk, MIN_STACK_FRAME))
676 return; 882 return;
677 883
678 _sp = (unsigned long *) sp; 884 stack = (unsigned long *) sp;
679 newsp = _sp[0]; 885 newsp = stack[0];
680 ip = _sp[2]; 886 ip = stack[FRAME_LR_SAVE];
681 if (!firstframe || ip != lr) { 887 if (!firstframe || ip != lr) {
682 printk("[%016lx] [%016lx] ", sp, ip); 888 printk("["REG"] ["REG"] ", sp, ip);
683 print_symbol("%s", ip); 889 print_symbol("%s", ip);
684 if (firstframe) 890 if (firstframe)
685 printk(" (unreliable)"); 891 printk(" (unreliable)");
@@ -691,8 +897,8 @@ void show_stack(struct task_struct *p, unsigned long *_sp)
691 * See if this is an exception frame. 897 * See if this is an exception frame.
692 * We look for the "regshere" marker in the current frame. 898 * We look for the "regshere" marker in the current frame.
693 */ 899 */
694 if (validate_sp(sp, p, sizeof(struct pt_regs) + 400) 900 if (validate_sp(sp, tsk, INT_FRAME_SIZE)
695 && _sp[12] == 0x7265677368657265ul) { 901 && stack[FRAME_MARKER] == REGS_MARKER) {
696 struct pt_regs *regs = (struct pt_regs *) 902 struct pt_regs *regs = (struct pt_regs *)
697 (sp + STACK_FRAME_OVERHEAD); 903 (sp + STACK_FRAME_OVERHEAD);
698 printk("--- Exception: %lx", regs->trap); 904 printk("--- Exception: %lx", regs->trap);
@@ -708,6 +914,6 @@ void show_stack(struct task_struct *p, unsigned long *_sp)
708 914
709void dump_stack(void) 915void dump_stack(void)
710{ 916{
711 show_stack(current, (unsigned long *)__get_SP()); 917 show_stack(current, NULL);
712} 918}
713EXPORT_SYMBOL(dump_stack); 919EXPORT_SYMBOL(dump_stack);
diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c
new file mode 100644
index 000000000000..2eccd0e159e3
--- /dev/null
+++ b/arch/powerpc/kernel/prom.c
@@ -0,0 +1,2170 @@
1/*
2 * Procedures for creating, accessing and interpreting the device tree.
3 *
4 * Paul Mackerras August 1996.
5 * Copyright (C) 1996-2005 Paul Mackerras.
6 *
7 * Adapted for 64bit PowerPC by Dave Engebretsen and Peter Bergner.
8 * {engebret|bergner}@us.ibm.com
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version
13 * 2 of the License, or (at your option) any later version.
14 */
15
16#undef DEBUG
17
18#include <stdarg.h>
19#include <linux/config.h>
20#include <linux/kernel.h>
21#include <linux/string.h>
22#include <linux/init.h>
23#include <linux/threads.h>
24#include <linux/spinlock.h>
25#include <linux/types.h>
26#include <linux/pci.h>
27#include <linux/stringify.h>
28#include <linux/delay.h>
29#include <linux/initrd.h>
30#include <linux/bitops.h>
31#include <linux/module.h>
32
33#include <asm/prom.h>
34#include <asm/rtas.h>
35#include <asm/lmb.h>
36#include <asm/page.h>
37#include <asm/processor.h>
38#include <asm/irq.h>
39#include <asm/io.h>
40#include <asm/smp.h>
41#include <asm/system.h>
42#include <asm/mmu.h>
43#include <asm/pgtable.h>
44#include <asm/pci.h>
45#include <asm/iommu.h>
46#include <asm/btext.h>
47#include <asm/sections.h>
48#include <asm/machdep.h>
49#include <asm/pSeries_reconfig.h>
50#include <asm/pci-bridge.h>
51#ifdef CONFIG_PPC64
52#include <asm/systemcfg.h>
53#endif
54
55#ifdef DEBUG
56#define DBG(fmt...) printk(KERN_ERR fmt)
57#else
58#define DBG(fmt...)
59#endif
60
61struct pci_reg_property {
62 struct pci_address addr;
63 u32 size_hi;
64 u32 size_lo;
65};
66
67struct isa_reg_property {
68 u32 space;
69 u32 address;
70 u32 size;
71};
72
73
74typedef int interpret_func(struct device_node *, unsigned long *,
75 int, int, int);
76
77extern struct rtas_t rtas;
78extern struct lmb lmb;
79extern unsigned long klimit;
80
81static int __initdata dt_root_addr_cells;
82static int __initdata dt_root_size_cells;
83
84#ifdef CONFIG_PPC64
85static int __initdata iommu_is_off;
86int __initdata iommu_force_on;
87unsigned long tce_alloc_start, tce_alloc_end;
88#endif
89
90typedef u32 cell_t;
91
92#if 0
93static struct boot_param_header *initial_boot_params __initdata;
94#else
95struct boot_param_header *initial_boot_params;
96#endif
97
98static struct device_node *allnodes = NULL;
99
100/* use when traversing tree through the allnext, child, sibling,
101 * or parent members of struct device_node.
102 */
103static DEFINE_RWLOCK(devtree_lock);
104
105/* export that to outside world */
106struct device_node *of_chosen;
107
108struct device_node *dflt_interrupt_controller;
109int num_interrupt_controllers;
110
111/*
112 * Wrapper for allocating memory for various data that needs to be
113 * attached to device nodes as they are processed at boot or when
114 * added to the device tree later (e.g. DLPAR). At boot there is
115 * already a region reserved so we just increment *mem_start by size;
116 * otherwise we call kmalloc.
117 */
118static void * prom_alloc(unsigned long size, unsigned long *mem_start)
119{
120 unsigned long tmp;
121
122 if (!mem_start)
123 return kmalloc(size, GFP_KERNEL);
124
125 tmp = *mem_start;
126 *mem_start += size;
127 return (void *)tmp;
128}
129
130/*
131 * Find the device_node with a given phandle.
132 */
133static struct device_node * find_phandle(phandle ph)
134{
135 struct device_node *np;
136
137 for (np = allnodes; np != 0; np = np->allnext)
138 if (np->linux_phandle == ph)
139 return np;
140 return NULL;
141}
142
143/*
144 * Find the interrupt parent of a node.
145 */
146static struct device_node * __devinit intr_parent(struct device_node *p)
147{
148 phandle *parp;
149
150 parp = (phandle *) get_property(p, "interrupt-parent", NULL);
151 if (parp == NULL)
152 return p->parent;
153 p = find_phandle(*parp);
154 if (p != NULL)
155 return p;
156 /*
157 * On a powermac booted with BootX, we don't get to know the
158 * phandles for any nodes, so find_phandle will return NULL.
159 * Fortunately these machines only have one interrupt controller
160 * so there isn't in fact any ambiguity. -- paulus
161 */
162 if (num_interrupt_controllers == 1)
163 p = dflt_interrupt_controller;
164 return p;
165}
166
167/*
168 * Find out the size of each entry of the interrupts property
169 * for a node.
170 */
171int __devinit prom_n_intr_cells(struct device_node *np)
172{
173 struct device_node *p;
174 unsigned int *icp;
175
176 for (p = np; (p = intr_parent(p)) != NULL; ) {
177 icp = (unsigned int *)
178 get_property(p, "#interrupt-cells", NULL);
179 if (icp != NULL)
180 return *icp;
181 if (get_property(p, "interrupt-controller", NULL) != NULL
182 || get_property(p, "interrupt-map", NULL) != NULL) {
183 printk("oops, node %s doesn't have #interrupt-cells\n",
184 p->full_name);
185 return 1;
186 }
187 }
188#ifdef DEBUG_IRQ
189 printk("prom_n_intr_cells failed for %s\n", np->full_name);
190#endif
191 return 1;
192}
193
194/*
195 * Map an interrupt from a device up to the platform interrupt
196 * descriptor.
197 */
198static int __devinit map_interrupt(unsigned int **irq, struct device_node **ictrler,
199 struct device_node *np, unsigned int *ints,
200 int nintrc)
201{
202 struct device_node *p, *ipar;
203 unsigned int *imap, *imask, *ip;
204 int i, imaplen, match;
205 int newintrc = 0, newaddrc = 0;
206 unsigned int *reg;
207 int naddrc;
208
209 reg = (unsigned int *) get_property(np, "reg", NULL);
210 naddrc = prom_n_addr_cells(np);
211 p = intr_parent(np);
212 while (p != NULL) {
213 if (get_property(p, "interrupt-controller", NULL) != NULL)
214 /* this node is an interrupt controller, stop here */
215 break;
216 imap = (unsigned int *)
217 get_property(p, "interrupt-map", &imaplen);
218 if (imap == NULL) {
219 p = intr_parent(p);
220 continue;
221 }
222 imask = (unsigned int *)
223 get_property(p, "interrupt-map-mask", NULL);
224 if (imask == NULL) {
225 printk("oops, %s has interrupt-map but no mask\n",
226 p->full_name);
227 return 0;
228 }
229 imaplen /= sizeof(unsigned int);
230 match = 0;
231 ipar = NULL;
232 while (imaplen > 0 && !match) {
233 /* check the child-interrupt field */
234 match = 1;
235 for (i = 0; i < naddrc && match; ++i)
236 match = ((reg[i] ^ imap[i]) & imask[i]) == 0;
237 for (; i < naddrc + nintrc && match; ++i)
238 match = ((ints[i-naddrc] ^ imap[i]) & imask[i]) == 0;
239 imap += naddrc + nintrc;
240 imaplen -= naddrc + nintrc;
241 /* grab the interrupt parent */
242 ipar = find_phandle((phandle) *imap++);
243 --imaplen;
244 if (ipar == NULL && num_interrupt_controllers == 1)
245 /* cope with BootX not giving us phandles */
246 ipar = dflt_interrupt_controller;
247 if (ipar == NULL) {
248 printk("oops, no int parent %x in map of %s\n",
249 imap[-1], p->full_name);
250 return 0;
251 }
252 /* find the parent's # addr and intr cells */
253 ip = (unsigned int *)
254 get_property(ipar, "#interrupt-cells", NULL);
255 if (ip == NULL) {
256 printk("oops, no #interrupt-cells on %s\n",
257 ipar->full_name);
258 return 0;
259 }
260 newintrc = *ip;
261 ip = (unsigned int *)
262 get_property(ipar, "#address-cells", NULL);
263 newaddrc = (ip == NULL)? 0: *ip;
264 imap += newaddrc + newintrc;
265 imaplen -= newaddrc + newintrc;
266 }
267 if (imaplen < 0) {
268 printk("oops, error decoding int-map on %s, len=%d\n",
269 p->full_name, imaplen);
270 return 0;
271 }
272 if (!match) {
273#ifdef DEBUG_IRQ
274 printk("oops, no match in %s int-map for %s\n",
275 p->full_name, np->full_name);
276#endif
277 return 0;
278 }
279 p = ipar;
280 naddrc = newaddrc;
281 nintrc = newintrc;
282 ints = imap - nintrc;
283 reg = ints - naddrc;
284 }
285 if (p == NULL) {
286#ifdef DEBUG_IRQ
287 printk("hmmm, int tree for %s doesn't have ctrler\n",
288 np->full_name);
289#endif
290 return 0;
291 }
292 *irq = ints;
293 *ictrler = p;
294 return nintrc;
295}
296
297static unsigned char map_isa_senses[4] = {
298 IRQ_SENSE_LEVEL | IRQ_POLARITY_NEGATIVE,
299 IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE,
300 IRQ_SENSE_EDGE | IRQ_POLARITY_NEGATIVE,
301 IRQ_SENSE_EDGE | IRQ_POLARITY_POSITIVE
302};
303
304static unsigned char map_mpic_senses[4] = {
305 IRQ_SENSE_EDGE | IRQ_POLARITY_POSITIVE,
306 IRQ_SENSE_LEVEL | IRQ_POLARITY_NEGATIVE,
307 /* 2 seems to be used for the 8259 cascade... */
308 IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE,
309 IRQ_SENSE_EDGE | IRQ_POLARITY_NEGATIVE,
310};
311
312static int __devinit finish_node_interrupts(struct device_node *np,
313 unsigned long *mem_start,
314 int measure_only)
315{
316 unsigned int *ints;
317 int intlen, intrcells, intrcount;
318 int i, j, n, sense;
319 unsigned int *irq, virq;
320 struct device_node *ic;
321
322 if (num_interrupt_controllers == 0) {
323 /*
324 * Old machines just have a list of interrupt numbers
325 * and no interrupt-controller nodes.
326 */
327 ints = (unsigned int *) get_property(np, "AAPL,interrupts",
328 &intlen);
329 /* XXX old interpret_pci_props looked in parent too */
330 /* XXX old interpret_macio_props looked for interrupts
331 before AAPL,interrupts */
332 if (ints == NULL)
333 ints = (unsigned int *) get_property(np, "interrupts",
334 &intlen);
335 if (ints == NULL)
336 return 0;
337
338 np->n_intrs = intlen / sizeof(unsigned int);
339 np->intrs = prom_alloc(np->n_intrs * sizeof(np->intrs[0]),
340 mem_start);
341 if (!np->intrs)
342 return -ENOMEM;
343 if (measure_only)
344 return 0;
345
346 for (i = 0; i < np->n_intrs; ++i) {
347 np->intrs[i].line = *ints++;
348 np->intrs[i].sense = IRQ_SENSE_LEVEL
349 | IRQ_POLARITY_NEGATIVE;
350 }
351 return 0;
352 }
353
354 ints = (unsigned int *) get_property(np, "interrupts", &intlen);
355 if (ints == NULL)
356 return 0;
357 intrcells = prom_n_intr_cells(np);
358 intlen /= intrcells * sizeof(unsigned int);
359
360 np->intrs = prom_alloc(intlen * sizeof(*(np->intrs)), mem_start);
361 if (!np->intrs)
362 return -ENOMEM;
363
364 if (measure_only)
365 return 0;
366
367 intrcount = 0;
368 for (i = 0; i < intlen; ++i, ints += intrcells) {
369 n = map_interrupt(&irq, &ic, np, ints, intrcells);
370 if (n <= 0)
371 continue;
372
373 /* don't map IRQ numbers under a cascaded 8259 controller */
374 if (ic && device_is_compatible(ic, "chrp,iic")) {
375 np->intrs[intrcount].line = irq[0];
376 sense = (n > 1)? (irq[1] & 3): 3;
377 np->intrs[intrcount].sense = map_isa_senses[sense];
378 } else {
379 virq = virt_irq_create_mapping(irq[0]);
380#ifdef CONFIG_PPC64
381 if (virq == NO_IRQ) {
382 printk(KERN_CRIT "Could not allocate interrupt"
383 " number for %s\n", np->full_name);
384 continue;
385 }
386#endif
387 np->intrs[intrcount].line = irq_offset_up(virq);
388 sense = (n > 1)? (irq[1] & 3): 1;
389 np->intrs[intrcount].sense = map_mpic_senses[sense];
390 }
391
392#ifdef CONFIG_PPC64
393 /* We offset irq numbers for the u3 MPIC by 128 in PowerMac */
394 if (systemcfg->platform == PLATFORM_POWERMAC && ic && ic->parent) {
395 char *name = get_property(ic->parent, "name", NULL);
396 if (name && !strcmp(name, "u3"))
397 np->intrs[intrcount].line += 128;
398 else if (!(name && !strcmp(name, "mac-io")))
399 /* ignore other cascaded controllers, such as
400 the k2-sata-root */
401 break;
402 }
403#endif
404 if (n > 2) {
405 printk("hmmm, got %d intr cells for %s:", n,
406 np->full_name);
407 for (j = 0; j < n; ++j)
408 printk(" %d", irq[j]);
409 printk("\n");
410 }
411 ++intrcount;
412 }
413 np->n_intrs = intrcount;
414
415 return 0;
416}
417
418static int __devinit interpret_pci_props(struct device_node *np,
419 unsigned long *mem_start,
420 int naddrc, int nsizec,
421 int measure_only)
422{
423 struct address_range *adr;
424 struct pci_reg_property *pci_addrs;
425 int i, l, n_addrs;
426
427 pci_addrs = (struct pci_reg_property *)
428 get_property(np, "assigned-addresses", &l);
429 if (!pci_addrs)
430 return 0;
431
432 n_addrs = l / sizeof(*pci_addrs);
433
434 adr = prom_alloc(n_addrs * sizeof(*adr), mem_start);
435 if (!adr)
436 return -ENOMEM;
437
438 if (measure_only)
439 return 0;
440
441 np->addrs = adr;
442 np->n_addrs = n_addrs;
443
444 for (i = 0; i < n_addrs; i++) {
445 adr[i].space = pci_addrs[i].addr.a_hi;
446 adr[i].address = pci_addrs[i].addr.a_lo |
447 ((u64)pci_addrs[i].addr.a_mid << 32);
448 adr[i].size = pci_addrs[i].size_lo;
449 }
450
451 return 0;
452}
453
454static int __init interpret_dbdma_props(struct device_node *np,
455 unsigned long *mem_start,
456 int naddrc, int nsizec,
457 int measure_only)
458{
459 struct reg_property32 *rp;
460 struct address_range *adr;
461 unsigned long base_address;
462 int i, l;
463 struct device_node *db;
464
465 base_address = 0;
466 if (!measure_only) {
467 for (db = np->parent; db != NULL; db = db->parent) {
468 if (!strcmp(db->type, "dbdma") && db->n_addrs != 0) {
469 base_address = db->addrs[0].address;
470 break;
471 }
472 }
473 }
474
475 rp = (struct reg_property32 *) get_property(np, "reg", &l);
476 if (rp != 0 && l >= sizeof(struct reg_property32)) {
477 i = 0;
478 adr = (struct address_range *) (*mem_start);
479 while ((l -= sizeof(struct reg_property32)) >= 0) {
480 if (!measure_only) {
481 adr[i].space = 2;
482 adr[i].address = rp[i].address + base_address;
483 adr[i].size = rp[i].size;
484 }
485 ++i;
486 }
487 np->addrs = adr;
488 np->n_addrs = i;
489 (*mem_start) += i * sizeof(struct address_range);
490 }
491
492 return 0;
493}
494
495static int __init interpret_macio_props(struct device_node *np,
496 unsigned long *mem_start,
497 int naddrc, int nsizec,
498 int measure_only)
499{
500 struct reg_property32 *rp;
501 struct address_range *adr;
502 unsigned long base_address;
503 int i, l;
504 struct device_node *db;
505
506 base_address = 0;
507 if (!measure_only) {
508 for (db = np->parent; db != NULL; db = db->parent) {
509 if (!strcmp(db->type, "mac-io") && db->n_addrs != 0) {
510 base_address = db->addrs[0].address;
511 break;
512 }
513 }
514 }
515
516 rp = (struct reg_property32 *) get_property(np, "reg", &l);
517 if (rp != 0 && l >= sizeof(struct reg_property32)) {
518 i = 0;
519 adr = (struct address_range *) (*mem_start);
520 while ((l -= sizeof(struct reg_property32)) >= 0) {
521 if (!measure_only) {
522 adr[i].space = 2;
523 adr[i].address = rp[i].address + base_address;
524 adr[i].size = rp[i].size;
525 }
526 ++i;
527 }
528 np->addrs = adr;
529 np->n_addrs = i;
530 (*mem_start) += i * sizeof(struct address_range);
531 }
532
533 return 0;
534}
535
536static int __init interpret_isa_props(struct device_node *np,
537 unsigned long *mem_start,
538 int naddrc, int nsizec,
539 int measure_only)
540{
541 struct isa_reg_property *rp;
542 struct address_range *adr;
543 int i, l;
544
545 rp = (struct isa_reg_property *) get_property(np, "reg", &l);
546 if (rp != 0 && l >= sizeof(struct isa_reg_property)) {
547 i = 0;
548 adr = (struct address_range *) (*mem_start);
549 while ((l -= sizeof(struct isa_reg_property)) >= 0) {
550 if (!measure_only) {
551 adr[i].space = rp[i].space;
552 adr[i].address = rp[i].address;
553 adr[i].size = rp[i].size;
554 }
555 ++i;
556 }
557 np->addrs = adr;
558 np->n_addrs = i;
559 (*mem_start) += i * sizeof(struct address_range);
560 }
561
562 return 0;
563}
564
565static int __init interpret_root_props(struct device_node *np,
566 unsigned long *mem_start,
567 int naddrc, int nsizec,
568 int measure_only)
569{
570 struct address_range *adr;
571 int i, l;
572 unsigned int *rp;
573 int rpsize = (naddrc + nsizec) * sizeof(unsigned int);
574
575 rp = (unsigned int *) get_property(np, "reg", &l);
576 if (rp != 0 && l >= rpsize) {
577 i = 0;
578 adr = (struct address_range *) (*mem_start);
579 while ((l -= rpsize) >= 0) {
580 if (!measure_only) {
581 adr[i].space = 0;
582 adr[i].address = rp[naddrc - 1];
583 adr[i].size = rp[naddrc + nsizec - 1];
584 }
585 ++i;
586 rp += naddrc + nsizec;
587 }
588 np->addrs = adr;
589 np->n_addrs = i;
590 (*mem_start) += i * sizeof(struct address_range);
591 }
592
593 return 0;
594}
595
596static int __devinit finish_node(struct device_node *np,
597 unsigned long *mem_start,
598 interpret_func *ifunc,
599 int naddrc, int nsizec,
600 int measure_only)
601{
602 struct device_node *child;
603 int *ip, rc = 0;
604
605 /* get the device addresses and interrupts */
606 if (ifunc != NULL)
607 rc = ifunc(np, mem_start, naddrc, nsizec, measure_only);
608 if (rc)
609 goto out;
610
611 rc = finish_node_interrupts(np, mem_start, measure_only);
612 if (rc)
613 goto out;
614
615 /* Look for #address-cells and #size-cells properties. */
616 ip = (int *) get_property(np, "#address-cells", NULL);
617 if (ip != NULL)
618 naddrc = *ip;
619 ip = (int *) get_property(np, "#size-cells", NULL);
620 if (ip != NULL)
621 nsizec = *ip;
622
623 if (!strcmp(np->name, "device-tree") || np->parent == NULL)
624 ifunc = interpret_root_props;
625 else if (np->type == 0)
626 ifunc = NULL;
627 else if (!strcmp(np->type, "pci") || !strcmp(np->type, "vci"))
628 ifunc = interpret_pci_props;
629 else if (!strcmp(np->type, "dbdma"))
630 ifunc = interpret_dbdma_props;
631 else if (!strcmp(np->type, "mac-io") || ifunc == interpret_macio_props)
632 ifunc = interpret_macio_props;
633 else if (!strcmp(np->type, "isa"))
634 ifunc = interpret_isa_props;
635 else if (!strcmp(np->name, "uni-n") || !strcmp(np->name, "u3"))
636 ifunc = interpret_root_props;
637 else if (!((ifunc == interpret_dbdma_props
638 || ifunc == interpret_macio_props)
639 && (!strcmp(np->type, "escc")
640 || !strcmp(np->type, "media-bay"))))
641 ifunc = NULL;
642
643 for (child = np->child; child != NULL; child = child->sibling) {
644 rc = finish_node(child, mem_start, ifunc,
645 naddrc, nsizec, measure_only);
646 if (rc)
647 goto out;
648 }
649out:
650 return rc;
651}
652
653static void __init scan_interrupt_controllers(void)
654{
655 struct device_node *np;
656 int n = 0;
657 char *name, *ic;
658 int iclen;
659
660 for (np = allnodes; np != NULL; np = np->allnext) {
661 ic = get_property(np, "interrupt-controller", &iclen);
662 name = get_property(np, "name", NULL);
663 /* checking iclen makes sure we don't get a false
664 match on /chosen.interrupt_controller */
665 if ((name != NULL
666 && strcmp(name, "interrupt-controller") == 0)
667 || (ic != NULL && iclen == 0
668 && strcmp(name, "AppleKiwi"))) {
669 if (n == 0)
670 dflt_interrupt_controller = np;
671 ++n;
672 }
673 }
674 num_interrupt_controllers = n;
675}
676
677/**
678 * finish_device_tree is called once things are running normally
679 * (i.e. with text and data mapped to the address they were linked at).
680 * It traverses the device tree and fills in some of the additional,
681 * fields in each node like {n_}addrs and {n_}intrs, the virt interrupt
682 * mapping is also initialized at this point.
683 */
684void __init finish_device_tree(void)
685{
686 unsigned long start, end, size = 0;
687
688 DBG(" -> finish_device_tree\n");
689
690#ifdef CONFIG_PPC64
691 /* Initialize virtual IRQ map */
692 virt_irq_init();
693#endif
694 scan_interrupt_controllers();
695
696 /*
697 * Finish device-tree (pre-parsing some properties etc...)
698 * We do this in 2 passes. One with "measure_only" set, which
699 * will only measure the amount of memory needed, then we can
700 * allocate that memory, and call finish_node again. However,
701 * we must be careful as most routines will fail nowadays when
702 * prom_alloc() returns 0, so we must make sure our first pass
703 * doesn't start at 0. We pre-initialize size to 16 for that
704 * reason and then remove those additional 16 bytes
705 */
706 size = 16;
707 finish_node(allnodes, &size, NULL, 0, 0, 1);
708 size -= 16;
709 end = start = (unsigned long) __va(lmb_alloc(size, 128));
710 finish_node(allnodes, &end, NULL, 0, 0, 0);
711 BUG_ON(end != start + size);
712
713 DBG(" <- finish_device_tree\n");
714}
715
716static inline char *find_flat_dt_string(u32 offset)
717{
718 return ((char *)initial_boot_params) +
719 initial_boot_params->off_dt_strings + offset;
720}
721
722/**
723 * This function is used to scan the flattened device-tree, it is
724 * used to extract the memory informations at boot before we can
725 * unflatten the tree
726 */
727static int __init scan_flat_dt(int (*it)(unsigned long node,
728 const char *uname, int depth,
729 void *data),
730 void *data)
731{
732 unsigned long p = ((unsigned long)initial_boot_params) +
733 initial_boot_params->off_dt_struct;
734 int rc = 0;
735 int depth = -1;
736
737 do {
738 u32 tag = *((u32 *)p);
739 char *pathp;
740
741 p += 4;
742 if (tag == OF_DT_END_NODE) {
743 depth --;
744 continue;
745 }
746 if (tag == OF_DT_NOP)
747 continue;
748 if (tag == OF_DT_END)
749 break;
750 if (tag == OF_DT_PROP) {
751 u32 sz = *((u32 *)p);
752 p += 8;
753 if (initial_boot_params->version < 0x10)
754 p = _ALIGN(p, sz >= 8 ? 8 : 4);
755 p += sz;
756 p = _ALIGN(p, 4);
757 continue;
758 }
759 if (tag != OF_DT_BEGIN_NODE) {
760 printk(KERN_WARNING "Invalid tag %x scanning flattened"
761 " device tree !\n", tag);
762 return -EINVAL;
763 }
764 depth++;
765 pathp = (char *)p;
766 p = _ALIGN(p + strlen(pathp) + 1, 4);
767 if ((*pathp) == '/') {
768 char *lp, *np;
769 for (lp = NULL, np = pathp; *np; np++)
770 if ((*np) == '/')
771 lp = np+1;
772 if (lp != NULL)
773 pathp = lp;
774 }
775 rc = it(p, pathp, depth, data);
776 if (rc != 0)
777 break;
778 } while(1);
779
780 return rc;
781}
782
783/**
784 * This function can be used within scan_flattened_dt callback to get
785 * access to properties
786 */
787static void* __init get_flat_dt_prop(unsigned long node, const char *name,
788 unsigned long *size)
789{
790 unsigned long p = node;
791
792 do {
793 u32 tag = *((u32 *)p);
794 u32 sz, noff;
795 const char *nstr;
796
797 p += 4;
798 if (tag == OF_DT_NOP)
799 continue;
800 if (tag != OF_DT_PROP)
801 return NULL;
802
803 sz = *((u32 *)p);
804 noff = *((u32 *)(p + 4));
805 p += 8;
806 if (initial_boot_params->version < 0x10)
807 p = _ALIGN(p, sz >= 8 ? 8 : 4);
808
809 nstr = find_flat_dt_string(noff);
810 if (nstr == NULL) {
811 printk(KERN_WARNING "Can't find property index"
812 " name !\n");
813 return NULL;
814 }
815 if (strcmp(name, nstr) == 0) {
816 if (size)
817 *size = sz;
818 return (void *)p;
819 }
820 p += sz;
821 p = _ALIGN(p, 4);
822 } while(1);
823}
824
825static void *__init unflatten_dt_alloc(unsigned long *mem, unsigned long size,
826 unsigned long align)
827{
828 void *res;
829
830 *mem = _ALIGN(*mem, align);
831 res = (void *)*mem;
832 *mem += size;
833
834 return res;
835}
836
837static unsigned long __init unflatten_dt_node(unsigned long mem,
838 unsigned long *p,
839 struct device_node *dad,
840 struct device_node ***allnextpp,
841 unsigned long fpsize)
842{
843 struct device_node *np;
844 struct property *pp, **prev_pp = NULL;
845 char *pathp;
846 u32 tag;
847 unsigned int l, allocl;
848 int has_name = 0;
849 int new_format = 0;
850
851 tag = *((u32 *)(*p));
852 if (tag != OF_DT_BEGIN_NODE) {
853 printk("Weird tag at start of node: %x\n", tag);
854 return mem;
855 }
856 *p += 4;
857 pathp = (char *)*p;
858 l = allocl = strlen(pathp) + 1;
859 *p = _ALIGN(*p + l, 4);
860
861 /* version 0x10 has a more compact unit name here instead of the full
862 * path. we accumulate the full path size using "fpsize", we'll rebuild
863 * it later. We detect this because the first character of the name is
864 * not '/'.
865 */
866 if ((*pathp) != '/') {
867 new_format = 1;
868 if (fpsize == 0) {
869 /* root node: special case. fpsize accounts for path
870 * plus terminating zero. root node only has '/', so
871 * fpsize should be 2, but we want to avoid the first
872 * level nodes to have two '/' so we use fpsize 1 here
873 */
874 fpsize = 1;
875 allocl = 2;
876 } else {
877 /* account for '/' and path size minus terminal 0
878 * already in 'l'
879 */
880 fpsize += l;
881 allocl = fpsize;
882 }
883 }
884
885
886 np = unflatten_dt_alloc(&mem, sizeof(struct device_node) + allocl,
887 __alignof__(struct device_node));
888 if (allnextpp) {
889 memset(np, 0, sizeof(*np));
890 np->full_name = ((char*)np) + sizeof(struct device_node);
891 if (new_format) {
892 char *p = np->full_name;
893 /* rebuild full path for new format */
894 if (dad && dad->parent) {
895 strcpy(p, dad->full_name);
896#ifdef DEBUG
897 if ((strlen(p) + l + 1) != allocl) {
898 DBG("%s: p: %d, l: %d, a: %d\n",
899 pathp, strlen(p), l, allocl);
900 }
901#endif
902 p += strlen(p);
903 }
904 *(p++) = '/';
905 memcpy(p, pathp, l);
906 } else
907 memcpy(np->full_name, pathp, l);
908 prev_pp = &np->properties;
909 **allnextpp = np;
910 *allnextpp = &np->allnext;
911 if (dad != NULL) {
912 np->parent = dad;
913 /* we temporarily use the next field as `last_child'*/
914 if (dad->next == 0)
915 dad->child = np;
916 else
917 dad->next->sibling = np;
918 dad->next = np;
919 }
920 kref_init(&np->kref);
921 }
922 while(1) {
923 u32 sz, noff;
924 char *pname;
925
926 tag = *((u32 *)(*p));
927 if (tag == OF_DT_NOP) {
928 *p += 4;
929 continue;
930 }
931 if (tag != OF_DT_PROP)
932 break;
933 *p += 4;
934 sz = *((u32 *)(*p));
935 noff = *((u32 *)((*p) + 4));
936 *p += 8;
937 if (initial_boot_params->version < 0x10)
938 *p = _ALIGN(*p, sz >= 8 ? 8 : 4);
939
940 pname = find_flat_dt_string(noff);
941 if (pname == NULL) {
942 printk("Can't find property name in list !\n");
943 break;
944 }
945 if (strcmp(pname, "name") == 0)
946 has_name = 1;
947 l = strlen(pname) + 1;
948 pp = unflatten_dt_alloc(&mem, sizeof(struct property),
949 __alignof__(struct property));
950 if (allnextpp) {
951 if (strcmp(pname, "linux,phandle") == 0) {
952 np->node = *((u32 *)*p);
953 if (np->linux_phandle == 0)
954 np->linux_phandle = np->node;
955 }
956 if (strcmp(pname, "ibm,phandle") == 0)
957 np->linux_phandle = *((u32 *)*p);
958 pp->name = pname;
959 pp->length = sz;
960 pp->value = (void *)*p;
961 *prev_pp = pp;
962 prev_pp = &pp->next;
963 }
964 *p = _ALIGN((*p) + sz, 4);
965 }
966 /* with version 0x10 we may not have the name property, recreate
967 * it here from the unit name if absent
968 */
969 if (!has_name) {
970 char *p = pathp, *ps = pathp, *pa = NULL;
971 int sz;
972
973 while (*p) {
974 if ((*p) == '@')
975 pa = p;
976 if ((*p) == '/')
977 ps = p + 1;
978 p++;
979 }
980 if (pa < ps)
981 pa = p;
982 sz = (pa - ps) + 1;
983 pp = unflatten_dt_alloc(&mem, sizeof(struct property) + sz,
984 __alignof__(struct property));
985 if (allnextpp) {
986 pp->name = "name";
987 pp->length = sz;
988 pp->value = (unsigned char *)(pp + 1);
989 *prev_pp = pp;
990 prev_pp = &pp->next;
991 memcpy(pp->value, ps, sz - 1);
992 ((char *)pp->value)[sz - 1] = 0;
993 DBG("fixed up name for %s -> %s\n", pathp, pp->value);
994 }
995 }
996 if (allnextpp) {
997 *prev_pp = NULL;
998 np->name = get_property(np, "name", NULL);
999 np->type = get_property(np, "device_type", NULL);
1000
1001 if (!np->name)
1002 np->name = "<NULL>";
1003 if (!np->type)
1004 np->type = "<NULL>";
1005 }
1006 while (tag == OF_DT_BEGIN_NODE) {
1007 mem = unflatten_dt_node(mem, p, np, allnextpp, fpsize);
1008 tag = *((u32 *)(*p));
1009 }
1010 if (tag != OF_DT_END_NODE) {
1011 printk("Weird tag at end of node: %x\n", tag);
1012 return mem;
1013 }
1014 *p += 4;
1015 return mem;
1016}
1017
1018
1019/**
1020 * unflattens the device-tree passed by the firmware, creating the
1021 * tree of struct device_node. It also fills the "name" and "type"
1022 * pointers of the nodes so the normal device-tree walking functions
1023 * can be used (this used to be done by finish_device_tree)
1024 */
1025void __init unflatten_device_tree(void)
1026{
1027 unsigned long start, mem, size;
1028 struct device_node **allnextp = &allnodes;
1029 char *p = NULL;
1030 int l = 0;
1031
1032 DBG(" -> unflatten_device_tree()\n");
1033
1034 /* First pass, scan for size */
1035 start = ((unsigned long)initial_boot_params) +
1036 initial_boot_params->off_dt_struct;
1037 size = unflatten_dt_node(0, &start, NULL, NULL, 0);
1038 size = (size | 3) + 1;
1039
1040 DBG(" size is %lx, allocating...\n", size);
1041
1042 /* Allocate memory for the expanded device tree */
1043 mem = lmb_alloc(size + 4, __alignof__(struct device_node));
1044 if (!mem) {
1045 DBG("Couldn't allocate memory with lmb_alloc()!\n");
1046 panic("Couldn't allocate memory with lmb_alloc()!\n");
1047 }
1048 mem = (unsigned long) __va(mem);
1049
1050 ((u32 *)mem)[size / 4] = 0xdeadbeef;
1051
1052 DBG(" unflattening %lx...\n", mem);
1053
1054 /* Second pass, do actual unflattening */
1055 start = ((unsigned long)initial_boot_params) +
1056 initial_boot_params->off_dt_struct;
1057 unflatten_dt_node(mem, &start, NULL, &allnextp, 0);
1058 if (*((u32 *)start) != OF_DT_END)
1059 printk(KERN_WARNING "Weird tag at end of tree: %08x\n", *((u32 *)start));
1060 if (((u32 *)mem)[size / 4] != 0xdeadbeef)
1061 printk(KERN_WARNING "End of tree marker overwritten: %08x\n",
1062 ((u32 *)mem)[size / 4] );
1063 *allnextp = NULL;
1064
1065 /* Get pointer to OF "/chosen" node for use everywhere */
1066 of_chosen = of_find_node_by_path("/chosen");
1067 if (of_chosen == NULL)
1068 of_chosen = of_find_node_by_path("/chosen@0");
1069
1070 /* Retreive command line */
1071 if (of_chosen != NULL) {
1072 p = (char *)get_property(of_chosen, "bootargs", &l);
1073 if (p != NULL && l > 0)
1074 strlcpy(cmd_line, p, min(l, COMMAND_LINE_SIZE));
1075 }
1076#ifdef CONFIG_CMDLINE
1077 if (l == 0 || (l == 1 && (*p) == 0))
1078 strlcpy(cmd_line, CONFIG_CMDLINE, COMMAND_LINE_SIZE);
1079#endif /* CONFIG_CMDLINE */
1080
1081 DBG("Command line is: %s\n", cmd_line);
1082
1083 DBG(" <- unflatten_device_tree()\n");
1084}
1085
1086
1087static int __init early_init_dt_scan_cpus(unsigned long node,
1088 const char *uname, int depth, void *data)
1089{
1090 char *type = get_flat_dt_prop(node, "device_type", NULL);
1091 u32 *prop;
1092 unsigned long size = 0;
1093
1094 /* We are scanning "cpu" nodes only */
1095 if (type == NULL || strcmp(type, "cpu") != 0)
1096 return 0;
1097
1098#ifdef CONFIG_PPC_PSERIES
1099 /* On LPAR, look for the first ibm,pft-size property for the hash table size
1100 */
1101 if (systemcfg->platform == PLATFORM_PSERIES_LPAR && ppc64_pft_size == 0) {
1102 u32 *pft_size;
1103 pft_size = get_flat_dt_prop(node, "ibm,pft-size", NULL);
1104 if (pft_size != NULL) {
1105 /* pft_size[0] is the NUMA CEC cookie */
1106 ppc64_pft_size = pft_size[1];
1107 }
1108 }
1109#endif
1110
1111 boot_cpuid = 0;
1112 boot_cpuid_phys = 0;
1113 if (initial_boot_params && initial_boot_params->version >= 2) {
1114 /* version 2 of the kexec param format adds the phys cpuid
1115 * of booted proc.
1116 */
1117 boot_cpuid_phys = initial_boot_params->boot_cpuid_phys;
1118 } else {
1119 /* Check if it's the boot-cpu, set it's hw index now */
1120 if (get_flat_dt_prop(node, "linux,boot-cpu", NULL) != NULL) {
1121 prop = get_flat_dt_prop(node, "reg", NULL);
1122 if (prop != NULL)
1123 boot_cpuid_phys = *prop;
1124 }
1125 }
1126 set_hard_smp_processor_id(0, boot_cpuid_phys);
1127
1128#ifdef CONFIG_ALTIVEC
1129 /* Check if we have a VMX and eventually update CPU features */
1130 prop = (u32 *)get_flat_dt_prop(node, "ibm,vmx", &size);
1131 if (prop && (*prop) > 0) {
1132 cur_cpu_spec->cpu_features |= CPU_FTR_ALTIVEC;
1133 cur_cpu_spec->cpu_user_features |= PPC_FEATURE_HAS_ALTIVEC;
1134 }
1135
1136 /* Same goes for Apple's "altivec" property */
1137 prop = (u32 *)get_flat_dt_prop(node, "altivec", NULL);
1138 if (prop) {
1139 cur_cpu_spec->cpu_features |= CPU_FTR_ALTIVEC;
1140 cur_cpu_spec->cpu_user_features |= PPC_FEATURE_HAS_ALTIVEC;
1141 }
1142#endif /* CONFIG_ALTIVEC */
1143
1144#ifdef CONFIG_PPC_PSERIES
1145 /*
1146 * Check for an SMT capable CPU and set the CPU feature. We do
1147 * this by looking at the size of the ibm,ppc-interrupt-server#s
1148 * property
1149 */
1150 prop = (u32 *)get_flat_dt_prop(node, "ibm,ppc-interrupt-server#s",
1151 &size);
1152 cur_cpu_spec->cpu_features &= ~CPU_FTR_SMT;
1153 if (prop && ((size / sizeof(u32)) > 1))
1154 cur_cpu_spec->cpu_features |= CPU_FTR_SMT;
1155#endif
1156
1157 return 0;
1158}
1159
1160static int __init early_init_dt_scan_chosen(unsigned long node,
1161 const char *uname, int depth, void *data)
1162{
1163 u32 *prop;
1164 unsigned long *lprop;
1165
1166 DBG("search \"chosen\", depth: %d, uname: %s\n", depth, uname);
1167
1168 if (depth != 1 ||
1169 (strcmp(uname, "chosen") != 0 && strcmp(uname, "chosen@0") != 0))
1170 return 0;
1171
1172 /* get platform type */
1173 prop = (u32 *)get_flat_dt_prop(node, "linux,platform", NULL);
1174 if (prop == NULL)
1175 return 0;
1176#ifdef CONFIG_PPC64
1177 systemcfg->platform = *prop;
1178#else
1179#ifdef CONFIG_PPC_MULTIPLATFORM
1180 _machine = *prop;
1181#endif
1182#endif
1183
1184#ifdef CONFIG_PPC64
1185 /* check if iommu is forced on or off */
1186 if (get_flat_dt_prop(node, "linux,iommu-off", NULL) != NULL)
1187 iommu_is_off = 1;
1188 if (get_flat_dt_prop(node, "linux,iommu-force-on", NULL) != NULL)
1189 iommu_force_on = 1;
1190#endif
1191
1192 lprop = get_flat_dt_prop(node, "linux,memory-limit", NULL);
1193 if (lprop)
1194 memory_limit = *lprop;
1195
1196#ifdef CONFIG_PPC64
1197 lprop = get_flat_dt_prop(node, "linux,tce-alloc-start", NULL);
1198 if (lprop)
1199 tce_alloc_start = *lprop;
1200 lprop = get_flat_dt_prop(node, "linux,tce-alloc-end", NULL);
1201 if (lprop)
1202 tce_alloc_end = *lprop;
1203#endif
1204
1205#ifdef CONFIG_PPC_RTAS
1206 /* To help early debugging via the front panel, we retreive a minimal
1207 * set of RTAS infos now if available
1208 */
1209 {
1210 u64 *basep, *entryp;
1211
1212 basep = get_flat_dt_prop(node, "linux,rtas-base", NULL);
1213 entryp = get_flat_dt_prop(node, "linux,rtas-entry", NULL);
1214 prop = get_flat_dt_prop(node, "linux,rtas-size", NULL);
1215 if (basep && entryp && prop) {
1216 rtas.base = *basep;
1217 rtas.entry = *entryp;
1218 rtas.size = *prop;
1219 }
1220 }
1221#endif /* CONFIG_PPC_RTAS */
1222
1223 /* break now */
1224 return 1;
1225}
1226
1227static int __init early_init_dt_scan_root(unsigned long node,
1228 const char *uname, int depth, void *data)
1229{
1230 u32 *prop;
1231
1232 if (depth != 0)
1233 return 0;
1234
1235 prop = get_flat_dt_prop(node, "#size-cells", NULL);
1236 dt_root_size_cells = (prop == NULL) ? 1 : *prop;
1237 DBG("dt_root_size_cells = %x\n", dt_root_size_cells);
1238
1239 prop = get_flat_dt_prop(node, "#address-cells", NULL);
1240 dt_root_addr_cells = (prop == NULL) ? 2 : *prop;
1241 DBG("dt_root_addr_cells = %x\n", dt_root_addr_cells);
1242
1243 /* break now */
1244 return 1;
1245}
1246
1247static unsigned long __init dt_mem_next_cell(int s, cell_t **cellp)
1248{
1249 cell_t *p = *cellp;
1250 unsigned long r;
1251
1252 /* Ignore more than 2 cells */
1253 while (s > sizeof(unsigned long) / 4) {
1254 p++;
1255 s--;
1256 }
1257 r = *p++;
1258#ifdef CONFIG_PPC64
1259 if (s > 1) {
1260 r <<= 32;
1261 r |= *(p++);
1262 s--;
1263 }
1264#endif
1265
1266 *cellp = p;
1267 return r;
1268}
1269
1270
1271static int __init early_init_dt_scan_memory(unsigned long node,
1272 const char *uname, int depth, void *data)
1273{
1274 char *type = get_flat_dt_prop(node, "device_type", NULL);
1275 cell_t *reg, *endp;
1276 unsigned long l;
1277
1278 /* We are scanning "memory" nodes only */
1279 if (type == NULL || strcmp(type, "memory") != 0)
1280 return 0;
1281
1282 reg = (cell_t *)get_flat_dt_prop(node, "reg", &l);
1283 if (reg == NULL)
1284 return 0;
1285
1286 endp = reg + (l / sizeof(cell_t));
1287
1288 DBG("memory scan node %s ..., reg size %ld, data: %x %x %x %x, ...\n",
1289 uname, l, reg[0], reg[1], reg[2], reg[3]);
1290
1291 while ((endp - reg) >= (dt_root_addr_cells + dt_root_size_cells)) {
1292 unsigned long base, size;
1293
1294 base = dt_mem_next_cell(dt_root_addr_cells, &reg);
1295 size = dt_mem_next_cell(dt_root_size_cells, &reg);
1296
1297 if (size == 0)
1298 continue;
1299 DBG(" - %lx , %lx\n", base, size);
1300#ifdef CONFIG_PPC64
1301 if (iommu_is_off) {
1302 if (base >= 0x80000000ul)
1303 continue;
1304 if ((base + size) > 0x80000000ul)
1305 size = 0x80000000ul - base;
1306 }
1307#endif
1308 lmb_add(base, size);
1309 }
1310 return 0;
1311}
1312
1313static void __init early_reserve_mem(void)
1314{
1315 unsigned long base, size;
1316 unsigned long *reserve_map;
1317
1318 reserve_map = (unsigned long *)(((unsigned long)initial_boot_params) +
1319 initial_boot_params->off_mem_rsvmap);
1320 while (1) {
1321 base = *(reserve_map++);
1322 size = *(reserve_map++);
1323 if (size == 0)
1324 break;
1325 DBG("reserving: %lx -> %lx\n", base, size);
1326 lmb_reserve(base, size);
1327 }
1328
1329#if 0
1330 DBG("memory reserved, lmbs :\n");
1331 lmb_dump_all();
1332#endif
1333}
1334
1335void __init early_init_devtree(void *params)
1336{
1337 DBG(" -> early_init_devtree()\n");
1338
1339 /* Setup flat device-tree pointer */
1340 initial_boot_params = params;
1341
1342 /* Retrieve various informations from the /chosen node of the
1343 * device-tree, including the platform type, initrd location and
1344 * size, TCE reserve, and more ...
1345 */
1346 scan_flat_dt(early_init_dt_scan_chosen, NULL);
1347
1348 /* Scan memory nodes and rebuild LMBs */
1349 lmb_init();
1350 scan_flat_dt(early_init_dt_scan_root, NULL);
1351 scan_flat_dt(early_init_dt_scan_memory, NULL);
1352 lmb_enforce_memory_limit(memory_limit);
1353 lmb_analyze();
1354#ifdef CONFIG_PPC64
1355 systemcfg->physicalMemorySize = lmb_phys_mem_size();
1356#endif
1357 lmb_reserve(0, __pa(klimit));
1358
1359 DBG("Phys. mem: %lx\n", lmb_phys_mem_size());
1360
1361 /* Reserve LMB regions used by kernel, initrd, dt, etc... */
1362 early_reserve_mem();
1363
1364 DBG("Scanning CPUs ...\n");
1365
1366 /* Retreive hash table size from flattened tree plus other
1367 * CPU related informations (altivec support, boot CPU ID, ...)
1368 */
1369 scan_flat_dt(early_init_dt_scan_cpus, NULL);
1370
1371 DBG(" <- early_init_devtree()\n");
1372}
1373
1374#undef printk
1375
1376int
1377prom_n_addr_cells(struct device_node* np)
1378{
1379 int* ip;
1380 do {
1381 if (np->parent)
1382 np = np->parent;
1383 ip = (int *) get_property(np, "#address-cells", NULL);
1384 if (ip != NULL)
1385 return *ip;
1386 } while (np->parent);
1387 /* No #address-cells property for the root node, default to 1 */
1388 return 1;
1389}
1390
1391int
1392prom_n_size_cells(struct device_node* np)
1393{
1394 int* ip;
1395 do {
1396 if (np->parent)
1397 np = np->parent;
1398 ip = (int *) get_property(np, "#size-cells", NULL);
1399 if (ip != NULL)
1400 return *ip;
1401 } while (np->parent);
1402 /* No #size-cells property for the root node, default to 1 */
1403 return 1;
1404}
1405
1406/**
1407 * Work out the sense (active-low level / active-high edge)
1408 * of each interrupt from the device tree.
1409 */
1410void __init prom_get_irq_senses(unsigned char *senses, int off, int max)
1411{
1412 struct device_node *np;
1413 int i, j;
1414
1415 /* default to level-triggered */
1416 memset(senses, IRQ_SENSE_LEVEL | IRQ_POLARITY_NEGATIVE, max - off);
1417
1418 for (np = allnodes; np != 0; np = np->allnext) {
1419 for (j = 0; j < np->n_intrs; j++) {
1420 i = np->intrs[j].line;
1421 if (i >= off && i < max)
1422 senses[i-off] = np->intrs[j].sense;
1423 }
1424 }
1425}
1426
1427/**
1428 * Construct and return a list of the device_nodes with a given name.
1429 */
1430struct device_node *find_devices(const char *name)
1431{
1432 struct device_node *head, **prevp, *np;
1433
1434 prevp = &head;
1435 for (np = allnodes; np != 0; np = np->allnext) {
1436 if (np->name != 0 && strcasecmp(np->name, name) == 0) {
1437 *prevp = np;
1438 prevp = &np->next;
1439 }
1440 }
1441 *prevp = NULL;
1442 return head;
1443}
1444EXPORT_SYMBOL(find_devices);
1445
1446/**
1447 * Construct and return a list of the device_nodes with a given type.
1448 */
1449struct device_node *find_type_devices(const char *type)
1450{
1451 struct device_node *head, **prevp, *np;
1452
1453 prevp = &head;
1454 for (np = allnodes; np != 0; np = np->allnext) {
1455 if (np->type != 0 && strcasecmp(np->type, type) == 0) {
1456 *prevp = np;
1457 prevp = &np->next;
1458 }
1459 }
1460 *prevp = NULL;
1461 return head;
1462}
1463EXPORT_SYMBOL(find_type_devices);
1464
1465/**
1466 * Returns all nodes linked together
1467 */
1468struct device_node *find_all_nodes(void)
1469{
1470 struct device_node *head, **prevp, *np;
1471
1472 prevp = &head;
1473 for (np = allnodes; np != 0; np = np->allnext) {
1474 *prevp = np;
1475 prevp = &np->next;
1476 }
1477 *prevp = NULL;
1478 return head;
1479}
1480EXPORT_SYMBOL(find_all_nodes);
1481
1482/** Checks if the given "compat" string matches one of the strings in
1483 * the device's "compatible" property
1484 */
1485int device_is_compatible(struct device_node *device, const char *compat)
1486{
1487 const char* cp;
1488 int cplen, l;
1489
1490 cp = (char *) get_property(device, "compatible", &cplen);
1491 if (cp == NULL)
1492 return 0;
1493 while (cplen > 0) {
1494 if (strncasecmp(cp, compat, strlen(compat)) == 0)
1495 return 1;
1496 l = strlen(cp) + 1;
1497 cp += l;
1498 cplen -= l;
1499 }
1500
1501 return 0;
1502}
1503EXPORT_SYMBOL(device_is_compatible);
1504
1505
1506/**
1507 * Indicates whether the root node has a given value in its
1508 * compatible property.
1509 */
1510int machine_is_compatible(const char *compat)
1511{
1512 struct device_node *root;
1513 int rc = 0;
1514
1515 root = of_find_node_by_path("/");
1516 if (root) {
1517 rc = device_is_compatible(root, compat);
1518 of_node_put(root);
1519 }
1520 return rc;
1521}
1522EXPORT_SYMBOL(machine_is_compatible);
1523
1524/**
1525 * Construct and return a list of the device_nodes with a given type
1526 * and compatible property.
1527 */
1528struct device_node *find_compatible_devices(const char *type,
1529 const char *compat)
1530{
1531 struct device_node *head, **prevp, *np;
1532
1533 prevp = &head;
1534 for (np = allnodes; np != 0; np = np->allnext) {
1535 if (type != NULL
1536 && !(np->type != 0 && strcasecmp(np->type, type) == 0))
1537 continue;
1538 if (device_is_compatible(np, compat)) {
1539 *prevp = np;
1540 prevp = &np->next;
1541 }
1542 }
1543 *prevp = NULL;
1544 return head;
1545}
1546EXPORT_SYMBOL(find_compatible_devices);
1547
1548/**
1549 * Find the device_node with a given full_name.
1550 */
1551struct device_node *find_path_device(const char *path)
1552{
1553 struct device_node *np;
1554
1555 for (np = allnodes; np != 0; np = np->allnext)
1556 if (np->full_name != 0 && strcasecmp(np->full_name, path) == 0)
1557 return np;
1558 return NULL;
1559}
1560EXPORT_SYMBOL(find_path_device);
1561
1562/*******
1563 *
1564 * New implementation of the OF "find" APIs, return a refcounted
1565 * object, call of_node_put() when done. The device tree and list
1566 * are protected by a rw_lock.
1567 *
1568 * Note that property management will need some locking as well,
1569 * this isn't dealt with yet.
1570 *
1571 *******/
1572
1573/**
1574 * of_find_node_by_name - Find a node by its "name" property
1575 * @from: The node to start searching from or NULL, the node
1576 * you pass will not be searched, only the next one
1577 * will; typically, you pass what the previous call
1578 * returned. of_node_put() will be called on it
1579 * @name: The name string to match against
1580 *
1581 * Returns a node pointer with refcount incremented, use
1582 * of_node_put() on it when done.
1583 */
1584struct device_node *of_find_node_by_name(struct device_node *from,
1585 const char *name)
1586{
1587 struct device_node *np;
1588
1589 read_lock(&devtree_lock);
1590 np = from ? from->allnext : allnodes;
1591 for (; np != 0; np = np->allnext)
1592 if (np->name != 0 && strcasecmp(np->name, name) == 0
1593 && of_node_get(np))
1594 break;
1595 if (from)
1596 of_node_put(from);
1597 read_unlock(&devtree_lock);
1598 return np;
1599}
1600EXPORT_SYMBOL(of_find_node_by_name);
1601
1602/**
1603 * of_find_node_by_type - Find a node by its "device_type" property
1604 * @from: The node to start searching from or NULL, the node
1605 * you pass will not be searched, only the next one
1606 * will; typically, you pass what the previous call
1607 * returned. of_node_put() will be called on it
1608 * @name: The type string to match against
1609 *
1610 * Returns a node pointer with refcount incremented, use
1611 * of_node_put() on it when done.
1612 */
1613struct device_node *of_find_node_by_type(struct device_node *from,
1614 const char *type)
1615{
1616 struct device_node *np;
1617
1618 read_lock(&devtree_lock);
1619 np = from ? from->allnext : allnodes;
1620 for (; np != 0; np = np->allnext)
1621 if (np->type != 0 && strcasecmp(np->type, type) == 0
1622 && of_node_get(np))
1623 break;
1624 if (from)
1625 of_node_put(from);
1626 read_unlock(&devtree_lock);
1627 return np;
1628}
1629EXPORT_SYMBOL(of_find_node_by_type);
1630
1631/**
1632 * of_find_compatible_node - Find a node based on type and one of the
1633 * tokens in its "compatible" property
1634 * @from: The node to start searching from or NULL, the node
1635 * you pass will not be searched, only the next one
1636 * will; typically, you pass what the previous call
1637 * returned. of_node_put() will be called on it
1638 * @type: The type string to match "device_type" or NULL to ignore
1639 * @compatible: The string to match to one of the tokens in the device
1640 * "compatible" list.
1641 *
1642 * Returns a node pointer with refcount incremented, use
1643 * of_node_put() on it when done.
1644 */
1645struct device_node *of_find_compatible_node(struct device_node *from,
1646 const char *type, const char *compatible)
1647{
1648 struct device_node *np;
1649
1650 read_lock(&devtree_lock);
1651 np = from ? from->allnext : allnodes;
1652 for (; np != 0; np = np->allnext) {
1653 if (type != NULL
1654 && !(np->type != 0 && strcasecmp(np->type, type) == 0))
1655 continue;
1656 if (device_is_compatible(np, compatible) && of_node_get(np))
1657 break;
1658 }
1659 if (from)
1660 of_node_put(from);
1661 read_unlock(&devtree_lock);
1662 return np;
1663}
1664EXPORT_SYMBOL(of_find_compatible_node);
1665
1666/**
1667 * of_find_node_by_path - Find a node matching a full OF path
1668 * @path: The full path to match
1669 *
1670 * Returns a node pointer with refcount incremented, use
1671 * of_node_put() on it when done.
1672 */
1673struct device_node *of_find_node_by_path(const char *path)
1674{
1675 struct device_node *np = allnodes;
1676
1677 read_lock(&devtree_lock);
1678 for (; np != 0; np = np->allnext) {
1679 if (np->full_name != 0 && strcasecmp(np->full_name, path) == 0
1680 && of_node_get(np))
1681 break;
1682 }
1683 read_unlock(&devtree_lock);
1684 return np;
1685}
1686EXPORT_SYMBOL(of_find_node_by_path);
1687
1688/**
1689 * of_find_node_by_phandle - Find a node given a phandle
1690 * @handle: phandle of the node to find
1691 *
1692 * Returns a node pointer with refcount incremented, use
1693 * of_node_put() on it when done.
1694 */
1695struct device_node *of_find_node_by_phandle(phandle handle)
1696{
1697 struct device_node *np;
1698
1699 read_lock(&devtree_lock);
1700 for (np = allnodes; np != 0; np = np->allnext)
1701 if (np->linux_phandle == handle)
1702 break;
1703 if (np)
1704 of_node_get(np);
1705 read_unlock(&devtree_lock);
1706 return np;
1707}
1708EXPORT_SYMBOL(of_find_node_by_phandle);
1709
1710/**
1711 * of_find_all_nodes - Get next node in global list
1712 * @prev: Previous node or NULL to start iteration
1713 * of_node_put() will be called on it
1714 *
1715 * Returns a node pointer with refcount incremented, use
1716 * of_node_put() on it when done.
1717 */
1718struct device_node *of_find_all_nodes(struct device_node *prev)
1719{
1720 struct device_node *np;
1721
1722 read_lock(&devtree_lock);
1723 np = prev ? prev->allnext : allnodes;
1724 for (; np != 0; np = np->allnext)
1725 if (of_node_get(np))
1726 break;
1727 if (prev)
1728 of_node_put(prev);
1729 read_unlock(&devtree_lock);
1730 return np;
1731}
1732EXPORT_SYMBOL(of_find_all_nodes);
1733
1734/**
1735 * of_get_parent - Get a node's parent if any
1736 * @node: Node to get parent
1737 *
1738 * Returns a node pointer with refcount incremented, use
1739 * of_node_put() on it when done.
1740 */
1741struct device_node *of_get_parent(const struct device_node *node)
1742{
1743 struct device_node *np;
1744
1745 if (!node)
1746 return NULL;
1747
1748 read_lock(&devtree_lock);
1749 np = of_node_get(node->parent);
1750 read_unlock(&devtree_lock);
1751 return np;
1752}
1753EXPORT_SYMBOL(of_get_parent);
1754
1755/**
1756 * of_get_next_child - Iterate a node childs
1757 * @node: parent node
1758 * @prev: previous child of the parent node, or NULL to get first
1759 *
1760 * Returns a node pointer with refcount incremented, use
1761 * of_node_put() on it when done.
1762 */
1763struct device_node *of_get_next_child(const struct device_node *node,
1764 struct device_node *prev)
1765{
1766 struct device_node *next;
1767
1768 read_lock(&devtree_lock);
1769 next = prev ? prev->sibling : node->child;
1770 for (; next != 0; next = next->sibling)
1771 if (of_node_get(next))
1772 break;
1773 if (prev)
1774 of_node_put(prev);
1775 read_unlock(&devtree_lock);
1776 return next;
1777}
1778EXPORT_SYMBOL(of_get_next_child);
1779
1780/**
1781 * of_node_get - Increment refcount of a node
1782 * @node: Node to inc refcount, NULL is supported to
1783 * simplify writing of callers
1784 *
1785 * Returns node.
1786 */
1787struct device_node *of_node_get(struct device_node *node)
1788{
1789 if (node)
1790 kref_get(&node->kref);
1791 return node;
1792}
1793EXPORT_SYMBOL(of_node_get);
1794
1795static inline struct device_node * kref_to_device_node(struct kref *kref)
1796{
1797 return container_of(kref, struct device_node, kref);
1798}
1799
1800/**
1801 * of_node_release - release a dynamically allocated node
1802 * @kref: kref element of the node to be released
1803 *
1804 * In of_node_put() this function is passed to kref_put()
1805 * as the destructor.
1806 */
1807static void of_node_release(struct kref *kref)
1808{
1809 struct device_node *node = kref_to_device_node(kref);
1810 struct property *prop = node->properties;
1811
1812 if (!OF_IS_DYNAMIC(node))
1813 return;
1814 while (prop) {
1815 struct property *next = prop->next;
1816 kfree(prop->name);
1817 kfree(prop->value);
1818 kfree(prop);
1819 prop = next;
1820 }
1821 kfree(node->intrs);
1822 kfree(node->addrs);
1823 kfree(node->full_name);
1824 kfree(node->data);
1825 kfree(node);
1826}
1827
1828/**
1829 * of_node_put - Decrement refcount of a node
1830 * @node: Node to dec refcount, NULL is supported to
1831 * simplify writing of callers
1832 *
1833 */
1834void of_node_put(struct device_node *node)
1835{
1836 if (node)
1837 kref_put(&node->kref, of_node_release);
1838}
1839EXPORT_SYMBOL(of_node_put);
1840
1841/*
1842 * Plug a device node into the tree and global list.
1843 */
1844void of_attach_node(struct device_node *np)
1845{
1846 write_lock(&devtree_lock);
1847 np->sibling = np->parent->child;
1848 np->allnext = allnodes;
1849 np->parent->child = np;
1850 allnodes = np;
1851 write_unlock(&devtree_lock);
1852}
1853
1854/*
1855 * "Unplug" a node from the device tree. The caller must hold
1856 * a reference to the node. The memory associated with the node
1857 * is not freed until its refcount goes to zero.
1858 */
1859void of_detach_node(const struct device_node *np)
1860{
1861 struct device_node *parent;
1862
1863 write_lock(&devtree_lock);
1864
1865 parent = np->parent;
1866
1867 if (allnodes == np)
1868 allnodes = np->allnext;
1869 else {
1870 struct device_node *prev;
1871 for (prev = allnodes;
1872 prev->allnext != np;
1873 prev = prev->allnext)
1874 ;
1875 prev->allnext = np->allnext;
1876 }
1877
1878 if (parent->child == np)
1879 parent->child = np->sibling;
1880 else {
1881 struct device_node *prevsib;
1882 for (prevsib = np->parent->child;
1883 prevsib->sibling != np;
1884 prevsib = prevsib->sibling)
1885 ;
1886 prevsib->sibling = np->sibling;
1887 }
1888
1889 write_unlock(&devtree_lock);
1890}
1891
1892#ifdef CONFIG_PPC_PSERIES
1893/*
1894 * Fix up the uninitialized fields in a new device node:
1895 * name, type, n_addrs, addrs, n_intrs, intrs, and pci-specific fields
1896 *
1897 * A lot of boot-time code is duplicated here, because functions such
1898 * as finish_node_interrupts, interpret_pci_props, etc. cannot use the
1899 * slab allocator.
1900 *
1901 * This should probably be split up into smaller chunks.
1902 */
1903
1904static int of_finish_dynamic_node(struct device_node *node,
1905 unsigned long *unused1, int unused2,
1906 int unused3, int unused4)
1907{
1908 struct device_node *parent = of_get_parent(node);
1909 int err = 0;
1910 phandle *ibm_phandle;
1911
1912 node->name = get_property(node, "name", NULL);
1913 node->type = get_property(node, "device_type", NULL);
1914
1915 if (!parent) {
1916 err = -ENODEV;
1917 goto out;
1918 }
1919
1920 /* We don't support that function on PowerMac, at least
1921 * not yet
1922 */
1923 if (systemcfg->platform == PLATFORM_POWERMAC)
1924 return -ENODEV;
1925
1926 /* fix up new node's linux_phandle field */
1927 if ((ibm_phandle = (unsigned int *)get_property(node, "ibm,phandle", NULL)))
1928 node->linux_phandle = *ibm_phandle;
1929
1930out:
1931 of_node_put(parent);
1932 return err;
1933}
1934
1935static int prom_reconfig_notifier(struct notifier_block *nb,
1936 unsigned long action, void *node)
1937{
1938 int err;
1939
1940 switch (action) {
1941 case PSERIES_RECONFIG_ADD:
1942 err = finish_node(node, NULL, of_finish_dynamic_node, 0, 0, 0);
1943 if (err < 0) {
1944 printk(KERN_ERR "finish_node returned %d\n", err);
1945 err = NOTIFY_BAD;
1946 }
1947 break;
1948 default:
1949 err = NOTIFY_DONE;
1950 break;
1951 }
1952 return err;
1953}
1954
1955static struct notifier_block prom_reconfig_nb = {
1956 .notifier_call = prom_reconfig_notifier,
1957 .priority = 10, /* This one needs to run first */
1958};
1959
1960static int __init prom_reconfig_setup(void)
1961{
1962 return pSeries_reconfig_notifier_register(&prom_reconfig_nb);
1963}
1964__initcall(prom_reconfig_setup);
1965#endif
1966
1967/*
1968 * Find a property with a given name for a given node
1969 * and return the value.
1970 */
1971unsigned char *get_property(struct device_node *np, const char *name,
1972 int *lenp)
1973{
1974 struct property *pp;
1975
1976 for (pp = np->properties; pp != 0; pp = pp->next)
1977 if (strcmp(pp->name, name) == 0) {
1978 if (lenp != 0)
1979 *lenp = pp->length;
1980 return pp->value;
1981 }
1982 return NULL;
1983}
1984EXPORT_SYMBOL(get_property);
1985
1986/*
1987 * Add a property to a node
1988 */
1989void prom_add_property(struct device_node* np, struct property* prop)
1990{
1991 struct property **next = &np->properties;
1992
1993 prop->next = NULL;
1994 while (*next)
1995 next = &(*next)->next;
1996 *next = prop;
1997}
1998
1999/* I quickly hacked that one, check against spec ! */
2000static inline unsigned long
2001bus_space_to_resource_flags(unsigned int bus_space)
2002{
2003 u8 space = (bus_space >> 24) & 0xf;
2004 if (space == 0)
2005 space = 0x02;
2006 if (space == 0x02)
2007 return IORESOURCE_MEM;
2008 else if (space == 0x01)
2009 return IORESOURCE_IO;
2010 else {
2011 printk(KERN_WARNING "prom.c: bus_space_to_resource_flags(), space: %x\n",
2012 bus_space);
2013 return 0;
2014 }
2015}
2016
2017#ifdef CONFIG_PCI
2018static struct resource *find_parent_pci_resource(struct pci_dev* pdev,
2019 struct address_range *range)
2020{
2021 unsigned long mask;
2022 int i;
2023
2024 /* Check this one */
2025 mask = bus_space_to_resource_flags(range->space);
2026 for (i=0; i<DEVICE_COUNT_RESOURCE; i++) {
2027 if ((pdev->resource[i].flags & mask) == mask &&
2028 pdev->resource[i].start <= range->address &&
2029 pdev->resource[i].end > range->address) {
2030 if ((range->address + range->size - 1) > pdev->resource[i].end) {
2031 /* Add better message */
2032 printk(KERN_WARNING "PCI/OF resource overlap !\n");
2033 return NULL;
2034 }
2035 break;
2036 }
2037 }
2038 if (i == DEVICE_COUNT_RESOURCE)
2039 return NULL;
2040 return &pdev->resource[i];
2041}
2042
2043/*
2044 * Request an OF device resource. Currently handles child of PCI devices,
2045 * or other nodes attached to the root node. Ultimately, put some
2046 * link to resources in the OF node.
2047 */
2048struct resource *request_OF_resource(struct device_node* node, int index,
2049 const char* name_postfix)
2050{
2051 struct pci_dev* pcidev;
2052 u8 pci_bus, pci_devfn;
2053 unsigned long iomask;
2054 struct device_node* nd;
2055 struct resource* parent;
2056 struct resource *res = NULL;
2057 int nlen, plen;
2058
2059 if (index >= node->n_addrs)
2060 goto fail;
2061
2062 /* Sanity check on bus space */
2063 iomask = bus_space_to_resource_flags(node->addrs[index].space);
2064 if (iomask & IORESOURCE_MEM)
2065 parent = &iomem_resource;
2066 else if (iomask & IORESOURCE_IO)
2067 parent = &ioport_resource;
2068 else
2069 goto fail;
2070
2071 /* Find a PCI parent if any */
2072 nd = node;
2073 pcidev = NULL;
2074 while (nd) {
2075 if (!pci_device_from_OF_node(nd, &pci_bus, &pci_devfn))
2076 pcidev = pci_find_slot(pci_bus, pci_devfn);
2077 if (pcidev) break;
2078 nd = nd->parent;
2079 }
2080 if (pcidev)
2081 parent = find_parent_pci_resource(pcidev, &node->addrs[index]);
2082 if (!parent) {
2083 printk(KERN_WARNING "request_OF_resource(%s), parent not found\n",
2084 node->name);
2085 goto fail;
2086 }
2087
2088 res = __request_region(parent, node->addrs[index].address,
2089 node->addrs[index].size, NULL);
2090 if (!res)
2091 goto fail;
2092 nlen = strlen(node->name);
2093 plen = name_postfix ? strlen(name_postfix) : 0;
2094 res->name = (const char *)kmalloc(nlen+plen+1, GFP_KERNEL);
2095 if (res->name) {
2096 strcpy((char *)res->name, node->name);
2097 if (plen)
2098 strcpy((char *)res->name+nlen, name_postfix);
2099 }
2100 return res;
2101fail:
2102 return NULL;
2103}
2104EXPORT_SYMBOL(request_OF_resource);
2105
2106int release_OF_resource(struct device_node *node, int index)
2107{
2108 struct pci_dev* pcidev;
2109 u8 pci_bus, pci_devfn;
2110 unsigned long iomask, start, end;
2111 struct device_node* nd;
2112 struct resource* parent;
2113 struct resource *res = NULL;
2114
2115 if (index >= node->n_addrs)
2116 return -EINVAL;
2117
2118 /* Sanity check on bus space */
2119 iomask = bus_space_to_resource_flags(node->addrs[index].space);
2120 if (iomask & IORESOURCE_MEM)
2121 parent = &iomem_resource;
2122 else if (iomask & IORESOURCE_IO)
2123 parent = &ioport_resource;
2124 else
2125 return -EINVAL;
2126
2127 /* Find a PCI parent if any */
2128 nd = node;
2129 pcidev = NULL;
2130 while(nd) {
2131 if (!pci_device_from_OF_node(nd, &pci_bus, &pci_devfn))
2132 pcidev = pci_find_slot(pci_bus, pci_devfn);
2133 if (pcidev) break;
2134 nd = nd->parent;
2135 }
2136 if (pcidev)
2137 parent = find_parent_pci_resource(pcidev, &node->addrs[index]);
2138 if (!parent) {
2139 printk(KERN_WARNING "release_OF_resource(%s), parent not found\n",
2140 node->name);
2141 return -ENODEV;
2142 }
2143
2144 /* Find us in the parent and its childs */
2145 res = parent->child;
2146 start = node->addrs[index].address;
2147 end = start + node->addrs[index].size - 1;
2148 while (res) {
2149 if (res->start == start && res->end == end &&
2150 (res->flags & IORESOURCE_BUSY))
2151 break;
2152 if (res->start <= start && res->end >= end)
2153 res = res->child;
2154 else
2155 res = res->sibling;
2156 }
2157 if (!res)
2158 return -ENODEV;
2159
2160 if (res->name) {
2161 kfree(res->name);
2162 res->name = NULL;
2163 }
2164 release_resource(res);
2165 kfree(res);
2166
2167 return 0;
2168}
2169EXPORT_SYMBOL(release_OF_resource);
2170#endif /* CONFIG_PCI */
diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c
new file mode 100644
index 000000000000..9750b3cd8ecd
--- /dev/null
+++ b/arch/powerpc/kernel/prom_init.c
@@ -0,0 +1,2109 @@
1/*
2 * Procedures for interfacing to Open Firmware.
3 *
4 * Paul Mackerras August 1996.
5 * Copyright (C) 1996-2005 Paul Mackerras.
6 *
7 * Adapted for 64bit PowerPC by Dave Engebretsen and Peter Bergner.
8 * {engebret|bergner}@us.ibm.com
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version
13 * 2 of the License, or (at your option) any later version.
14 */
15
16#undef DEBUG_PROM
17
18#include <stdarg.h>
19#include <linux/config.h>
20#include <linux/kernel.h>
21#include <linux/string.h>
22#include <linux/init.h>
23#include <linux/threads.h>
24#include <linux/spinlock.h>
25#include <linux/types.h>
26#include <linux/pci.h>
27#include <linux/proc_fs.h>
28#include <linux/stringify.h>
29#include <linux/delay.h>
30#include <linux/initrd.h>
31#include <linux/bitops.h>
32#include <asm/prom.h>
33#include <asm/rtas.h>
34#include <asm/page.h>
35#include <asm/processor.h>
36#include <asm/irq.h>
37#include <asm/io.h>
38#include <asm/smp.h>
39#include <asm/system.h>
40#include <asm/mmu.h>
41#include <asm/pgtable.h>
42#include <asm/pci.h>
43#include <asm/iommu.h>
44#include <asm/btext.h>
45#include <asm/sections.h>
46#include <asm/machdep.h>
47
48#ifdef CONFIG_LOGO_LINUX_CLUT224
49#include <linux/linux_logo.h>
50extern const struct linux_logo logo_linux_clut224;
51#endif
52
53/*
54 * Properties whose value is longer than this get excluded from our
55 * copy of the device tree. This value does need to be big enough to
56 * ensure that we don't lose things like the interrupt-map property
57 * on a PCI-PCI bridge.
58 */
59#define MAX_PROPERTY_LENGTH (1UL * 1024 * 1024)
60
61/*
62 * Eventually bump that one up
63 */
64#define DEVTREE_CHUNK_SIZE 0x100000
65
66/*
67 * This is the size of the local memory reserve map that gets copied
68 * into the boot params passed to the kernel. That size is totally
69 * flexible as the kernel just reads the list until it encounters an
70 * entry with size 0, so it can be changed without breaking binary
71 * compatibility
72 */
73#define MEM_RESERVE_MAP_SIZE 8
74
75/*
76 * prom_init() is called very early on, before the kernel text
77 * and data have been mapped to KERNELBASE. At this point the code
78 * is running at whatever address it has been loaded at.
79 * On ppc32 we compile with -mrelocatable, which means that references
80 * to extern and static variables get relocated automatically.
81 * On ppc64 we have to relocate the references explicitly with
82 * RELOC. (Note that strings count as static variables.)
83 *
84 * Because OF may have mapped I/O devices into the area starting at
85 * KERNELBASE, particularly on CHRP machines, we can't safely call
86 * OF once the kernel has been mapped to KERNELBASE. Therefore all
87 * OF calls must be done within prom_init().
88 *
89 * ADDR is used in calls to call_prom. The 4th and following
90 * arguments to call_prom should be 32-bit values.
91 * On ppc64, 64 bit values are truncated to 32 bits (and
92 * fortunately don't get interpreted as two arguments).
93 */
94#ifdef CONFIG_PPC64
95#define RELOC(x) (*PTRRELOC(&(x)))
96#define ADDR(x) (u32) add_reloc_offset((unsigned long)(x))
97#else
98#define RELOC(x) (x)
99#define ADDR(x) (u32) (x)
100#endif
101
102#define PROM_BUG() do { \
103 prom_printf("kernel BUG at %s line 0x%x!\n", \
104 RELOC(__FILE__), __LINE__); \
105 __asm__ __volatile__(".long " BUG_ILLEGAL_INSTR); \
106} while (0)
107
108#ifdef DEBUG_PROM
109#define prom_debug(x...) prom_printf(x)
110#else
111#define prom_debug(x...)
112#endif
113
114#ifdef CONFIG_PPC32
115#define PLATFORM_POWERMAC _MACH_Pmac
116#define PLATFORM_CHRP _MACH_chrp
117#endif
118
119
120typedef u32 prom_arg_t;
121
122struct prom_args {
123 u32 service;
124 u32 nargs;
125 u32 nret;
126 prom_arg_t args[10];
127};
128
129struct prom_t {
130 ihandle root;
131 ihandle chosen;
132 int cpu;
133 ihandle stdout;
134 ihandle mmumap;
135};
136
137struct mem_map_entry {
138 unsigned long base;
139 unsigned long size;
140};
141
142typedef u32 cell_t;
143
144extern void __start(unsigned long r3, unsigned long r4, unsigned long r5);
145
146#ifdef CONFIG_PPC64
147extern int enter_prom(struct prom_args *args, unsigned long entry);
148#else
149static inline int enter_prom(struct prom_args *args, unsigned long entry)
150{
151 return ((int (*)(struct prom_args *))entry)(args);
152}
153#endif
154
155extern void copy_and_flush(unsigned long dest, unsigned long src,
156 unsigned long size, unsigned long offset);
157
158/* prom structure */
159static struct prom_t __initdata prom;
160
161static unsigned long prom_entry __initdata;
162
163#define PROM_SCRATCH_SIZE 256
164
165static char __initdata of_stdout_device[256];
166static char __initdata prom_scratch[PROM_SCRATCH_SIZE];
167
168static unsigned long __initdata dt_header_start;
169static unsigned long __initdata dt_struct_start, dt_struct_end;
170static unsigned long __initdata dt_string_start, dt_string_end;
171
172static unsigned long __initdata prom_initrd_start, prom_initrd_end;
173
174#ifdef CONFIG_PPC64
175static int __initdata iommu_force_on;
176static int __initdata ppc64_iommu_off;
177static unsigned long __initdata prom_tce_alloc_start;
178static unsigned long __initdata prom_tce_alloc_end;
179#endif
180
181static int __initdata of_platform;
182
183static char __initdata prom_cmd_line[COMMAND_LINE_SIZE];
184
185static unsigned long __initdata prom_memory_limit;
186
187static unsigned long __initdata alloc_top;
188static unsigned long __initdata alloc_top_high;
189static unsigned long __initdata alloc_bottom;
190static unsigned long __initdata rmo_top;
191static unsigned long __initdata ram_top;
192
193static struct mem_map_entry __initdata mem_reserve_map[MEM_RESERVE_MAP_SIZE];
194static int __initdata mem_reserve_cnt;
195
196static cell_t __initdata regbuf[1024];
197
198
199#define MAX_CPU_THREADS 2
200
201/* TO GO */
202#ifdef CONFIG_HMT
203struct {
204 unsigned int pir;
205 unsigned int threadid;
206} hmt_thread_data[NR_CPUS];
207#endif /* CONFIG_HMT */
208
209/*
210 * Error results ... some OF calls will return "-1" on error, some
211 * will return 0, some will return either. To simplify, here are
212 * macros to use with any ihandle or phandle return value to check if
213 * it is valid
214 */
215
216#define PROM_ERROR (-1u)
217#define PHANDLE_VALID(p) ((p) != 0 && (p) != PROM_ERROR)
218#define IHANDLE_VALID(i) ((i) != 0 && (i) != PROM_ERROR)
219
220
221/* This is the one and *ONLY* place where we actually call open
222 * firmware.
223 */
224
225static int __init call_prom(const char *service, int nargs, int nret, ...)
226{
227 int i;
228 struct prom_args args;
229 va_list list;
230
231 args.service = ADDR(service);
232 args.nargs = nargs;
233 args.nret = nret;
234
235 va_start(list, nret);
236 for (i = 0; i < nargs; i++)
237 args.args[i] = va_arg(list, prom_arg_t);
238 va_end(list);
239
240 for (i = 0; i < nret; i++)
241 args.args[nargs+i] = 0;
242
243 if (enter_prom(&args, RELOC(prom_entry)) < 0)
244 return PROM_ERROR;
245
246 return (nret > 0) ? args.args[nargs] : 0;
247}
248
249static int __init call_prom_ret(const char *service, int nargs, int nret,
250 prom_arg_t *rets, ...)
251{
252 int i;
253 struct prom_args args;
254 va_list list;
255
256 args.service = ADDR(service);
257 args.nargs = nargs;
258 args.nret = nret;
259
260 va_start(list, rets);
261 for (i = 0; i < nargs; i++)
262 args.args[i] = va_arg(list, prom_arg_t);
263 va_end(list);
264
265 for (i = 0; i < nret; i++)
266 rets[nargs+i] = 0;
267
268 if (enter_prom(&args, RELOC(prom_entry)) < 0)
269 return PROM_ERROR;
270
271 if (rets != NULL)
272 for (i = 1; i < nret; ++i)
273 rets[i-1] = args.args[nargs+i];
274
275 return (nret > 0) ? args.args[nargs] : 0;
276}
277
278
279static void __init prom_print(const char *msg)
280{
281 const char *p, *q;
282 struct prom_t *_prom = &RELOC(prom);
283
284 if (_prom->stdout == 0)
285 return;
286
287 for (p = msg; *p != 0; p = q) {
288 for (q = p; *q != 0 && *q != '\n'; ++q)
289 ;
290 if (q > p)
291 call_prom("write", 3, 1, _prom->stdout, p, q - p);
292 if (*q == 0)
293 break;
294 ++q;
295 call_prom("write", 3, 1, _prom->stdout, ADDR("\r\n"), 2);
296 }
297}
298
299
300static void __init prom_print_hex(unsigned long val)
301{
302 int i, nibbles = sizeof(val)*2;
303 char buf[sizeof(val)*2+1];
304 struct prom_t *_prom = &RELOC(prom);
305
306 for (i = nibbles-1; i >= 0; i--) {
307 buf[i] = (val & 0xf) + '0';
308 if (buf[i] > '9')
309 buf[i] += ('a'-'0'-10);
310 val >>= 4;
311 }
312 buf[nibbles] = '\0';
313 call_prom("write", 3, 1, _prom->stdout, buf, nibbles);
314}
315
316
317static void __init prom_printf(const char *format, ...)
318{
319 const char *p, *q, *s;
320 va_list args;
321 unsigned long v;
322 struct prom_t *_prom = &RELOC(prom);
323
324 va_start(args, format);
325#ifdef CONFIG_PPC64
326 format = PTRRELOC(format);
327#endif
328 for (p = format; *p != 0; p = q) {
329 for (q = p; *q != 0 && *q != '\n' && *q != '%'; ++q)
330 ;
331 if (q > p)
332 call_prom("write", 3, 1, _prom->stdout, p, q - p);
333 if (*q == 0)
334 break;
335 if (*q == '\n') {
336 ++q;
337 call_prom("write", 3, 1, _prom->stdout,
338 ADDR("\r\n"), 2);
339 continue;
340 }
341 ++q;
342 if (*q == 0)
343 break;
344 switch (*q) {
345 case 's':
346 ++q;
347 s = va_arg(args, const char *);
348 prom_print(s);
349 break;
350 case 'x':
351 ++q;
352 v = va_arg(args, unsigned long);
353 prom_print_hex(v);
354 break;
355 }
356 }
357}
358
359
360static unsigned int __init prom_claim(unsigned long virt, unsigned long size,
361 unsigned long align)
362{
363 int ret;
364 struct prom_t *_prom = &RELOC(prom);
365
366 ret = call_prom("claim", 3, 1, (prom_arg_t)virt, (prom_arg_t)size,
367 (prom_arg_t)align);
368 if (ret != -1 && _prom->mmumap != 0)
369 /* old pmacs need us to map as well */
370 call_prom("call-method", 6, 1,
371 ADDR("map"), _prom->mmumap, 0, size, virt, virt);
372 return ret;
373}
374
375static void __init __attribute__((noreturn)) prom_panic(const char *reason)
376{
377#ifdef CONFIG_PPC64
378 reason = PTRRELOC(reason);
379#endif
380 prom_print(reason);
381 /* ToDo: should put up an SRC here on p/iSeries */
382 call_prom("exit", 0, 0);
383
384 for (;;) /* should never get here */
385 ;
386}
387
388
389static int __init prom_next_node(phandle *nodep)
390{
391 phandle node;
392
393 if ((node = *nodep) != 0
394 && (*nodep = call_prom("child", 1, 1, node)) != 0)
395 return 1;
396 if ((*nodep = call_prom("peer", 1, 1, node)) != 0)
397 return 1;
398 for (;;) {
399 if ((node = call_prom("parent", 1, 1, node)) == 0)
400 return 0;
401 if ((*nodep = call_prom("peer", 1, 1, node)) != 0)
402 return 1;
403 }
404}
405
406static int __init prom_getprop(phandle node, const char *pname,
407 void *value, size_t valuelen)
408{
409 return call_prom("getprop", 4, 1, node, ADDR(pname),
410 (u32)(unsigned long) value, (u32) valuelen);
411}
412
413static int __init prom_getproplen(phandle node, const char *pname)
414{
415 return call_prom("getproplen", 2, 1, node, ADDR(pname));
416}
417
418static int __init prom_setprop(phandle node, const char *pname,
419 void *value, size_t valuelen)
420{
421 return call_prom("setprop", 4, 1, node, ADDR(pname),
422 (u32)(unsigned long) value, (u32) valuelen);
423}
424
425/* We can't use the standard versions because of RELOC headaches. */
426#define isxdigit(c) (('0' <= (c) && (c) <= '9') \
427 || ('a' <= (c) && (c) <= 'f') \
428 || ('A' <= (c) && (c) <= 'F'))
429
430#define isdigit(c) ('0' <= (c) && (c) <= '9')
431#define islower(c) ('a' <= (c) && (c) <= 'z')
432#define toupper(c) (islower(c) ? ((c) - 'a' + 'A') : (c))
433
434unsigned long prom_strtoul(const char *cp, const char **endp)
435{
436 unsigned long result = 0, base = 10, value;
437
438 if (*cp == '0') {
439 base = 8;
440 cp++;
441 if (toupper(*cp) == 'X') {
442 cp++;
443 base = 16;
444 }
445 }
446
447 while (isxdigit(*cp) &&
448 (value = isdigit(*cp) ? *cp - '0' : toupper(*cp) - 'A' + 10) < base) {
449 result = result * base + value;
450 cp++;
451 }
452
453 if (endp)
454 *endp = cp;
455
456 return result;
457}
458
459unsigned long prom_memparse(const char *ptr, const char **retptr)
460{
461 unsigned long ret = prom_strtoul(ptr, retptr);
462 int shift = 0;
463
464 /*
465 * We can't use a switch here because GCC *may* generate a
466 * jump table which won't work, because we're not running at
467 * the address we're linked at.
468 */
469 if ('G' == **retptr || 'g' == **retptr)
470 shift = 30;
471
472 if ('M' == **retptr || 'm' == **retptr)
473 shift = 20;
474
475 if ('K' == **retptr || 'k' == **retptr)
476 shift = 10;
477
478 if (shift) {
479 ret <<= shift;
480 (*retptr)++;
481 }
482
483 return ret;
484}
485
486/*
487 * Early parsing of the command line passed to the kernel, used for
488 * "mem=x" and the options that affect the iommu
489 */
490static void __init early_cmdline_parse(void)
491{
492 struct prom_t *_prom = &RELOC(prom);
493 char *opt, *p;
494 int l = 0;
495
496 RELOC(prom_cmd_line[0]) = 0;
497 p = RELOC(prom_cmd_line);
498 if ((long)_prom->chosen > 0)
499 l = prom_getprop(_prom->chosen, "bootargs", p, COMMAND_LINE_SIZE-1);
500#ifdef CONFIG_CMDLINE
501 if (l == 0) /* dbl check */
502 strlcpy(RELOC(prom_cmd_line),
503 RELOC(CONFIG_CMDLINE), sizeof(prom_cmd_line));
504#endif /* CONFIG_CMDLINE */
505 prom_printf("command line: %s\n", RELOC(prom_cmd_line));
506
507#ifdef CONFIG_PPC64
508 opt = strstr(RELOC(prom_cmd_line), RELOC("iommu="));
509 if (opt) {
510 prom_printf("iommu opt is: %s\n", opt);
511 opt += 6;
512 while (*opt && *opt == ' ')
513 opt++;
514 if (!strncmp(opt, RELOC("off"), 3))
515 RELOC(ppc64_iommu_off) = 1;
516 else if (!strncmp(opt, RELOC("force"), 5))
517 RELOC(iommu_force_on) = 1;
518 }
519#endif
520
521 opt = strstr(RELOC(prom_cmd_line), RELOC("mem="));
522 if (opt) {
523 opt += 4;
524 RELOC(prom_memory_limit) = prom_memparse(opt, (const char **)&opt);
525#ifdef CONFIG_PPC64
526 /* Align to 16 MB == size of ppc64 large page */
527 RELOC(prom_memory_limit) = ALIGN(RELOC(prom_memory_limit), 0x1000000);
528#endif
529 }
530}
531
532#ifdef CONFIG_PPC_PSERIES
533/*
534 * To tell the firmware what our capabilities are, we have to pass
535 * it a fake 32-bit ELF header containing a couple of PT_NOTE sections
536 * that contain structures that contain the actual values.
537 */
538static struct fake_elf {
539 Elf32_Ehdr elfhdr;
540 Elf32_Phdr phdr[2];
541 struct chrpnote {
542 u32 namesz;
543 u32 descsz;
544 u32 type;
545 char name[8]; /* "PowerPC" */
546 struct chrpdesc {
547 u32 real_mode;
548 u32 real_base;
549 u32 real_size;
550 u32 virt_base;
551 u32 virt_size;
552 u32 load_base;
553 } chrpdesc;
554 } chrpnote;
555 struct rpanote {
556 u32 namesz;
557 u32 descsz;
558 u32 type;
559 char name[24]; /* "IBM,RPA-Client-Config" */
560 struct rpadesc {
561 u32 lpar_affinity;
562 u32 min_rmo_size;
563 u32 min_rmo_percent;
564 u32 max_pft_size;
565 u32 splpar;
566 u32 min_load;
567 u32 new_mem_def;
568 u32 ignore_me;
569 } rpadesc;
570 } rpanote;
571} fake_elf = {
572 .elfhdr = {
573 .e_ident = { 0x7f, 'E', 'L', 'F',
574 ELFCLASS32, ELFDATA2MSB, EV_CURRENT },
575 .e_type = ET_EXEC, /* yeah right */
576 .e_machine = EM_PPC,
577 .e_version = EV_CURRENT,
578 .e_phoff = offsetof(struct fake_elf, phdr),
579 .e_phentsize = sizeof(Elf32_Phdr),
580 .e_phnum = 2
581 },
582 .phdr = {
583 [0] = {
584 .p_type = PT_NOTE,
585 .p_offset = offsetof(struct fake_elf, chrpnote),
586 .p_filesz = sizeof(struct chrpnote)
587 }, [1] = {
588 .p_type = PT_NOTE,
589 .p_offset = offsetof(struct fake_elf, rpanote),
590 .p_filesz = sizeof(struct rpanote)
591 }
592 },
593 .chrpnote = {
594 .namesz = sizeof("PowerPC"),
595 .descsz = sizeof(struct chrpdesc),
596 .type = 0x1275,
597 .name = "PowerPC",
598 .chrpdesc = {
599 .real_mode = ~0U, /* ~0 means "don't care" */
600 .real_base = ~0U,
601 .real_size = ~0U,
602 .virt_base = ~0U,
603 .virt_size = ~0U,
604 .load_base = ~0U
605 },
606 },
607 .rpanote = {
608 .namesz = sizeof("IBM,RPA-Client-Config"),
609 .descsz = sizeof(struct rpadesc),
610 .type = 0x12759999,
611 .name = "IBM,RPA-Client-Config",
612 .rpadesc = {
613 .lpar_affinity = 0,
614 .min_rmo_size = 64, /* in megabytes */
615 .min_rmo_percent = 0,
616 .max_pft_size = 48, /* 2^48 bytes max PFT size */
617 .splpar = 1,
618 .min_load = ~0U,
619 .new_mem_def = 0
620 }
621 }
622};
623
624static void __init prom_send_capabilities(void)
625{
626 ihandle elfloader;
627
628 elfloader = call_prom("open", 1, 1, ADDR("/packages/elf-loader"));
629 if (elfloader == 0) {
630 prom_printf("couldn't open /packages/elf-loader\n");
631 return;
632 }
633 call_prom("call-method", 3, 1, ADDR("process-elf-header"),
634 elfloader, ADDR(&fake_elf));
635 call_prom("close", 1, 0, elfloader);
636}
637#endif
638
639/*
640 * Memory allocation strategy... our layout is normally:
641 *
642 * at 14Mb or more we have vmlinux, then a gap and initrd. In some
643 * rare cases, initrd might end up being before the kernel though.
644 * We assume this won't override the final kernel at 0, we have no
645 * provision to handle that in this version, but it should hopefully
646 * never happen.
647 *
648 * alloc_top is set to the top of RMO, eventually shrink down if the
649 * TCEs overlap
650 *
651 * alloc_bottom is set to the top of kernel/initrd
652 *
653 * from there, allocations are done this way : rtas is allocated
654 * topmost, and the device-tree is allocated from the bottom. We try
655 * to grow the device-tree allocation as we progress. If we can't,
656 * then we fail, we don't currently have a facility to restart
657 * elsewhere, but that shouldn't be necessary.
658 *
659 * Note that calls to reserve_mem have to be done explicitly, memory
660 * allocated with either alloc_up or alloc_down isn't automatically
661 * reserved.
662 */
663
664
665/*
666 * Allocates memory in the RMO upward from the kernel/initrd
667 *
668 * When align is 0, this is a special case, it means to allocate in place
669 * at the current location of alloc_bottom or fail (that is basically
670 * extending the previous allocation). Used for the device-tree flattening
671 */
672static unsigned long __init alloc_up(unsigned long size, unsigned long align)
673{
674 unsigned long base = RELOC(alloc_bottom);
675 unsigned long addr = 0;
676
677 if (align)
678 base = _ALIGN_UP(base, align);
679 prom_debug("alloc_up(%x, %x)\n", size, align);
680 if (RELOC(ram_top) == 0)
681 prom_panic("alloc_up() called with mem not initialized\n");
682
683 if (align)
684 base = _ALIGN_UP(RELOC(alloc_bottom), align);
685 else
686 base = RELOC(alloc_bottom);
687
688 for(; (base + size) <= RELOC(alloc_top);
689 base = _ALIGN_UP(base + 0x100000, align)) {
690 prom_debug(" trying: 0x%x\n\r", base);
691 addr = (unsigned long)prom_claim(base, size, 0);
692 if (addr != PROM_ERROR && addr != 0)
693 break;
694 addr = 0;
695 if (align == 0)
696 break;
697 }
698 if (addr == 0)
699 return 0;
700 RELOC(alloc_bottom) = addr;
701
702 prom_debug(" -> %x\n", addr);
703 prom_debug(" alloc_bottom : %x\n", RELOC(alloc_bottom));
704 prom_debug(" alloc_top : %x\n", RELOC(alloc_top));
705 prom_debug(" alloc_top_hi : %x\n", RELOC(alloc_top_high));
706 prom_debug(" rmo_top : %x\n", RELOC(rmo_top));
707 prom_debug(" ram_top : %x\n", RELOC(ram_top));
708
709 return addr;
710}
711
712/*
713 * Allocates memory downward, either from top of RMO, or if highmem
714 * is set, from the top of RAM. Note that this one doesn't handle
715 * failures. It does claim memory if highmem is not set.
716 */
717static unsigned long __init alloc_down(unsigned long size, unsigned long align,
718 int highmem)
719{
720 unsigned long base, addr = 0;
721
722 prom_debug("alloc_down(%x, %x, %s)\n", size, align,
723 highmem ? RELOC("(high)") : RELOC("(low)"));
724 if (RELOC(ram_top) == 0)
725 prom_panic("alloc_down() called with mem not initialized\n");
726
727 if (highmem) {
728 /* Carve out storage for the TCE table. */
729 addr = _ALIGN_DOWN(RELOC(alloc_top_high) - size, align);
730 if (addr <= RELOC(alloc_bottom))
731 return 0;
732 /* Will we bump into the RMO ? If yes, check out that we
733 * didn't overlap existing allocations there, if we did,
734 * we are dead, we must be the first in town !
735 */
736 if (addr < RELOC(rmo_top)) {
737 /* Good, we are first */
738 if (RELOC(alloc_top) == RELOC(rmo_top))
739 RELOC(alloc_top) = RELOC(rmo_top) = addr;
740 else
741 return 0;
742 }
743 RELOC(alloc_top_high) = addr;
744 goto bail;
745 }
746
747 base = _ALIGN_DOWN(RELOC(alloc_top) - size, align);
748 for (; base > RELOC(alloc_bottom);
749 base = _ALIGN_DOWN(base - 0x100000, align)) {
750 prom_debug(" trying: 0x%x\n\r", base);
751 addr = (unsigned long)prom_claim(base, size, 0);
752 if (addr != PROM_ERROR && addr != 0)
753 break;
754 addr = 0;
755 }
756 if (addr == 0)
757 return 0;
758 RELOC(alloc_top) = addr;
759
760 bail:
761 prom_debug(" -> %x\n", addr);
762 prom_debug(" alloc_bottom : %x\n", RELOC(alloc_bottom));
763 prom_debug(" alloc_top : %x\n", RELOC(alloc_top));
764 prom_debug(" alloc_top_hi : %x\n", RELOC(alloc_top_high));
765 prom_debug(" rmo_top : %x\n", RELOC(rmo_top));
766 prom_debug(" ram_top : %x\n", RELOC(ram_top));
767
768 return addr;
769}
770
771/*
772 * Parse a "reg" cell
773 */
774static unsigned long __init prom_next_cell(int s, cell_t **cellp)
775{
776 cell_t *p = *cellp;
777 unsigned long r = 0;
778
779 /* Ignore more than 2 cells */
780 while (s > sizeof(unsigned long) / 4) {
781 p++;
782 s--;
783 }
784 r = *p++;
785#ifdef CONFIG_PPC64
786 if (s > 1) {
787 r <<= 32;
788 r |= *(p++);
789 }
790#endif
791 *cellp = p;
792 return r;
793}
794
795/*
796 * Very dumb function for adding to the memory reserve list, but
797 * we don't need anything smarter at this point
798 *
799 * XXX Eventually check for collisions. They should NEVER happen.
800 * If problems seem to show up, it would be a good start to track
801 * them down.
802 */
803static void reserve_mem(unsigned long base, unsigned long size)
804{
805 unsigned long top = base + size;
806 unsigned long cnt = RELOC(mem_reserve_cnt);
807
808 if (size == 0)
809 return;
810
811 /* We need to always keep one empty entry so that we
812 * have our terminator with "size" set to 0 since we are
813 * dumb and just copy this entire array to the boot params
814 */
815 base = _ALIGN_DOWN(base, PAGE_SIZE);
816 top = _ALIGN_UP(top, PAGE_SIZE);
817 size = top - base;
818
819 if (cnt >= (MEM_RESERVE_MAP_SIZE - 1))
820 prom_panic("Memory reserve map exhausted !\n");
821 RELOC(mem_reserve_map)[cnt].base = base;
822 RELOC(mem_reserve_map)[cnt].size = size;
823 RELOC(mem_reserve_cnt) = cnt + 1;
824}
825
826/*
827 * Initialize memory allocation mecanism, parse "memory" nodes and
828 * obtain that way the top of memory and RMO to setup out local allocator
829 */
830static void __init prom_init_mem(void)
831{
832 phandle node;
833 char *path, type[64];
834 unsigned int plen;
835 cell_t *p, *endp;
836 struct prom_t *_prom = &RELOC(prom);
837 u32 rac, rsc;
838
839 /*
840 * We iterate the memory nodes to find
841 * 1) top of RMO (first node)
842 * 2) top of memory
843 */
844 rac = 2;
845 prom_getprop(_prom->root, "#address-cells", &rac, sizeof(rac));
846 rsc = 1;
847 prom_getprop(_prom->root, "#size-cells", &rsc, sizeof(rsc));
848 prom_debug("root_addr_cells: %x\n", (unsigned long) rac);
849 prom_debug("root_size_cells: %x\n", (unsigned long) rsc);
850
851 prom_debug("scanning memory:\n");
852 path = RELOC(prom_scratch);
853
854 for (node = 0; prom_next_node(&node); ) {
855 type[0] = 0;
856 prom_getprop(node, "device_type", type, sizeof(type));
857
858 if (type[0] == 0) {
859 /*
860 * CHRP Longtrail machines have no device_type
861 * on the memory node, so check the name instead...
862 */
863 prom_getprop(node, "name", type, sizeof(type));
864 }
865 if (strcmp(type, RELOC("memory")))
866 continue;
867
868 plen = prom_getprop(node, "reg", RELOC(regbuf), sizeof(regbuf));
869 if (plen > sizeof(regbuf)) {
870 prom_printf("memory node too large for buffer !\n");
871 plen = sizeof(regbuf);
872 }
873 p = RELOC(regbuf);
874 endp = p + (plen / sizeof(cell_t));
875
876#ifdef DEBUG_PROM
877 memset(path, 0, PROM_SCRATCH_SIZE);
878 call_prom("package-to-path", 3, 1, node, path, PROM_SCRATCH_SIZE-1);
879 prom_debug(" node %s :\n", path);
880#endif /* DEBUG_PROM */
881
882 while ((endp - p) >= (rac + rsc)) {
883 unsigned long base, size;
884
885 base = prom_next_cell(rac, &p);
886 size = prom_next_cell(rsc, &p);
887
888 if (size == 0)
889 continue;
890 prom_debug(" %x %x\n", base, size);
891 if (base == 0)
892 RELOC(rmo_top) = size;
893 if ((base + size) > RELOC(ram_top))
894 RELOC(ram_top) = base + size;
895 }
896 }
897
898 RELOC(alloc_bottom) = PAGE_ALIGN((unsigned long)&RELOC(_end) + 0x4000);
899
900 /* Check if we have an initrd after the kernel, if we do move our bottom
901 * point to after it
902 */
903 if (RELOC(prom_initrd_start)) {
904 if (RELOC(prom_initrd_end) > RELOC(alloc_bottom))
905 RELOC(alloc_bottom) = PAGE_ALIGN(RELOC(prom_initrd_end));
906 }
907
908 /*
909 * If prom_memory_limit is set we reduce the upper limits *except* for
910 * alloc_top_high. This must be the real top of RAM so we can put
911 * TCE's up there.
912 */
913
914 RELOC(alloc_top_high) = RELOC(ram_top);
915
916 if (RELOC(prom_memory_limit)) {
917 if (RELOC(prom_memory_limit) <= RELOC(alloc_bottom)) {
918 prom_printf("Ignoring mem=%x <= alloc_bottom.\n",
919 RELOC(prom_memory_limit));
920 RELOC(prom_memory_limit) = 0;
921 } else if (RELOC(prom_memory_limit) >= RELOC(ram_top)) {
922 prom_printf("Ignoring mem=%x >= ram_top.\n",
923 RELOC(prom_memory_limit));
924 RELOC(prom_memory_limit) = 0;
925 } else {
926 RELOC(ram_top) = RELOC(prom_memory_limit);
927 RELOC(rmo_top) = min(RELOC(rmo_top), RELOC(prom_memory_limit));
928 }
929 }
930
931 /*
932 * Setup our top alloc point, that is top of RMO or top of
933 * segment 0 when running non-LPAR.
934 * Some RS64 machines have buggy firmware where claims up at
935 * 1GB fail. Cap at 768MB as a workaround.
936 * Since 768MB is plenty of room, and we need to cap to something
937 * reasonable on 32-bit, cap at 768MB on all machines.
938 */
939 if (!RELOC(rmo_top))
940 RELOC(rmo_top) = RELOC(ram_top);
941 RELOC(rmo_top) = min(0x30000000ul, RELOC(rmo_top));
942 RELOC(alloc_top) = RELOC(rmo_top);
943
944 prom_printf("memory layout at init:\n");
945 prom_printf(" memory_limit : %x (16 MB aligned)\n", RELOC(prom_memory_limit));
946 prom_printf(" alloc_bottom : %x\n", RELOC(alloc_bottom));
947 prom_printf(" alloc_top : %x\n", RELOC(alloc_top));
948 prom_printf(" alloc_top_hi : %x\n", RELOC(alloc_top_high));
949 prom_printf(" rmo_top : %x\n", RELOC(rmo_top));
950 prom_printf(" ram_top : %x\n", RELOC(ram_top));
951}
952
953
954/*
955 * Allocate room for and instantiate RTAS
956 */
957static void __init prom_instantiate_rtas(void)
958{
959 phandle rtas_node;
960 ihandle rtas_inst;
961 u32 base, entry = 0;
962 u32 size = 0;
963
964 prom_debug("prom_instantiate_rtas: start...\n");
965
966 rtas_node = call_prom("finddevice", 1, 1, ADDR("/rtas"));
967 prom_debug("rtas_node: %x\n", rtas_node);
968 if (!PHANDLE_VALID(rtas_node))
969 return;
970
971 prom_getprop(rtas_node, "rtas-size", &size, sizeof(size));
972 if (size == 0)
973 return;
974
975 base = alloc_down(size, PAGE_SIZE, 0);
976 if (base == 0) {
977 prom_printf("RTAS allocation failed !\n");
978 return;
979 }
980
981 rtas_inst = call_prom("open", 1, 1, ADDR("/rtas"));
982 if (!IHANDLE_VALID(rtas_inst)) {
983 prom_printf("opening rtas package failed");
984 return;
985 }
986
987 prom_printf("instantiating rtas at 0x%x ...", base);
988
989 if (call_prom_ret("call-method", 3, 2, &entry,
990 ADDR("instantiate-rtas"),
991 rtas_inst, base) == PROM_ERROR
992 || entry == 0) {
993 prom_printf(" failed\n");
994 return;
995 }
996 prom_printf(" done\n");
997
998 reserve_mem(base, size);
999
1000 prom_setprop(rtas_node, "linux,rtas-base", &base, sizeof(base));
1001 prom_setprop(rtas_node, "linux,rtas-entry", &entry, sizeof(entry));
1002
1003 prom_debug("rtas base = 0x%x\n", base);
1004 prom_debug("rtas entry = 0x%x\n", entry);
1005 prom_debug("rtas size = 0x%x\n", (long)size);
1006
1007 prom_debug("prom_instantiate_rtas: end...\n");
1008}
1009
1010#ifdef CONFIG_PPC64
1011/*
1012 * Allocate room for and initialize TCE tables
1013 */
1014static void __init prom_initialize_tce_table(void)
1015{
1016 phandle node;
1017 ihandle phb_node;
1018 char compatible[64], type[64], model[64];
1019 char *path = RELOC(prom_scratch);
1020 u64 base, align;
1021 u32 minalign, minsize;
1022 u64 tce_entry, *tce_entryp;
1023 u64 local_alloc_top, local_alloc_bottom;
1024 u64 i;
1025
1026 if (RELOC(ppc64_iommu_off))
1027 return;
1028
1029 prom_debug("starting prom_initialize_tce_table\n");
1030
1031 /* Cache current top of allocs so we reserve a single block */
1032 local_alloc_top = RELOC(alloc_top_high);
1033 local_alloc_bottom = local_alloc_top;
1034
1035 /* Search all nodes looking for PHBs. */
1036 for (node = 0; prom_next_node(&node); ) {
1037 compatible[0] = 0;
1038 type[0] = 0;
1039 model[0] = 0;
1040 prom_getprop(node, "compatible",
1041 compatible, sizeof(compatible));
1042 prom_getprop(node, "device_type", type, sizeof(type));
1043 prom_getprop(node, "model", model, sizeof(model));
1044
1045 if ((type[0] == 0) || (strstr(type, RELOC("pci")) == NULL))
1046 continue;
1047
1048 /* Keep the old logic in tack to avoid regression. */
1049 if (compatible[0] != 0) {
1050 if ((strstr(compatible, RELOC("python")) == NULL) &&
1051 (strstr(compatible, RELOC("Speedwagon")) == NULL) &&
1052 (strstr(compatible, RELOC("Winnipeg")) == NULL))
1053 continue;
1054 } else if (model[0] != 0) {
1055 if ((strstr(model, RELOC("ython")) == NULL) &&
1056 (strstr(model, RELOC("peedwagon")) == NULL) &&
1057 (strstr(model, RELOC("innipeg")) == NULL))
1058 continue;
1059 }
1060
1061 if (prom_getprop(node, "tce-table-minalign", &minalign,
1062 sizeof(minalign)) == PROM_ERROR)
1063 minalign = 0;
1064 if (prom_getprop(node, "tce-table-minsize", &minsize,
1065 sizeof(minsize)) == PROM_ERROR)
1066 minsize = 4UL << 20;
1067
1068 /*
1069 * Even though we read what OF wants, we just set the table
1070 * size to 4 MB. This is enough to map 2GB of PCI DMA space.
1071 * By doing this, we avoid the pitfalls of trying to DMA to
1072 * MMIO space and the DMA alias hole.
1073 *
1074 * On POWER4, firmware sets the TCE region by assuming
1075 * each TCE table is 8MB. Using this memory for anything
1076 * else will impact performance, so we always allocate 8MB.
1077 * Anton
1078 */
1079 if (__is_processor(PV_POWER4) || __is_processor(PV_POWER4p))
1080 minsize = 8UL << 20;
1081 else
1082 minsize = 4UL << 20;
1083
1084 /* Align to the greater of the align or size */
1085 align = max(minalign, minsize);
1086 base = alloc_down(minsize, align, 1);
1087 if (base == 0)
1088 prom_panic("ERROR, cannot find space for TCE table.\n");
1089 if (base < local_alloc_bottom)
1090 local_alloc_bottom = base;
1091
1092 /* Save away the TCE table attributes for later use. */
1093 prom_setprop(node, "linux,tce-base", &base, sizeof(base));
1094 prom_setprop(node, "linux,tce-size", &minsize, sizeof(minsize));
1095
1096 /* It seems OF doesn't null-terminate the path :-( */
1097 memset(path, 0, sizeof(path));
1098 /* Call OF to setup the TCE hardware */
1099 if (call_prom("package-to-path", 3, 1, node,
1100 path, PROM_SCRATCH_SIZE-1) == PROM_ERROR) {
1101 prom_printf("package-to-path failed\n");
1102 }
1103
1104 prom_debug("TCE table: %s\n", path);
1105 prom_debug("\tnode = 0x%x\n", node);
1106 prom_debug("\tbase = 0x%x\n", base);
1107 prom_debug("\tsize = 0x%x\n", minsize);
1108
1109 /* Initialize the table to have a one-to-one mapping
1110 * over the allocated size.
1111 */
1112 tce_entryp = (unsigned long *)base;
1113 for (i = 0; i < (minsize >> 3) ;tce_entryp++, i++) {
1114 tce_entry = (i << PAGE_SHIFT);
1115 tce_entry |= 0x3;
1116 *tce_entryp = tce_entry;
1117 }
1118
1119 prom_printf("opening PHB %s", path);
1120 phb_node = call_prom("open", 1, 1, path);
1121 if (phb_node == 0)
1122 prom_printf("... failed\n");
1123 else
1124 prom_printf("... done\n");
1125
1126 call_prom("call-method", 6, 0, ADDR("set-64-bit-addressing"),
1127 phb_node, -1, minsize,
1128 (u32) base, (u32) (base >> 32));
1129 call_prom("close", 1, 0, phb_node);
1130 }
1131
1132 reserve_mem(local_alloc_bottom, local_alloc_top - local_alloc_bottom);
1133
1134 if (RELOC(prom_memory_limit)) {
1135 /*
1136 * We align the start to a 16MB boundary so we can map
1137 * the TCE area using large pages if possible.
1138 * The end should be the top of RAM so no need to align it.
1139 */
1140 RELOC(prom_tce_alloc_start) = _ALIGN_DOWN(local_alloc_bottom,
1141 0x1000000);
1142 RELOC(prom_tce_alloc_end) = local_alloc_top;
1143 }
1144
1145 /* Flag the first invalid entry */
1146 prom_debug("ending prom_initialize_tce_table\n");
1147}
1148#endif
1149
1150/*
1151 * With CHRP SMP we need to use the OF to start the other processors.
1152 * We can't wait until smp_boot_cpus (the OF is trashed by then)
1153 * so we have to put the processors into a holding pattern controlled
1154 * by the kernel (not OF) before we destroy the OF.
1155 *
1156 * This uses a chunk of low memory, puts some holding pattern
1157 * code there and sends the other processors off to there until
1158 * smp_boot_cpus tells them to do something. The holding pattern
1159 * checks that address until its cpu # is there, when it is that
1160 * cpu jumps to __secondary_start(). smp_boot_cpus() takes care
1161 * of setting those values.
1162 *
1163 * We also use physical address 0x4 here to tell when a cpu
1164 * is in its holding pattern code.
1165 *
1166 * -- Cort
1167 */
1168extern void __secondary_hold(void);
1169extern unsigned long __secondary_hold_spinloop;
1170extern unsigned long __secondary_hold_acknowledge;
1171
1172/*
1173 * We want to reference the copy of __secondary_hold_* in the
1174 * 0 - 0x100 address range
1175 */
1176#define LOW_ADDR(x) (((unsigned long) &(x)) & 0xff)
1177
1178static void __init prom_hold_cpus(void)
1179{
1180 unsigned long i;
1181 unsigned int reg;
1182 phandle node;
1183 char type[64];
1184 int cpuid = 0;
1185 unsigned int interrupt_server[MAX_CPU_THREADS];
1186 unsigned int cpu_threads, hw_cpu_num;
1187 int propsize;
1188 struct prom_t *_prom = &RELOC(prom);
1189 unsigned long *spinloop
1190 = (void *) LOW_ADDR(__secondary_hold_spinloop);
1191 unsigned long *acknowledge
1192 = (void *) LOW_ADDR(__secondary_hold_acknowledge);
1193#ifdef CONFIG_PPC64
1194 /* __secondary_hold is actually a descriptor, not the text address */
1195 unsigned long secondary_hold
1196 = __pa(*PTRRELOC((unsigned long *)__secondary_hold));
1197#else
1198 unsigned long secondary_hold = LOW_ADDR(__secondary_hold);
1199#endif
1200
1201 prom_debug("prom_hold_cpus: start...\n");
1202 prom_debug(" 1) spinloop = 0x%x\n", (unsigned long)spinloop);
1203 prom_debug(" 1) *spinloop = 0x%x\n", *spinloop);
1204 prom_debug(" 1) acknowledge = 0x%x\n",
1205 (unsigned long)acknowledge);
1206 prom_debug(" 1) *acknowledge = 0x%x\n", *acknowledge);
1207 prom_debug(" 1) secondary_hold = 0x%x\n", secondary_hold);
1208
1209 /* Set the common spinloop variable, so all of the secondary cpus
1210 * will block when they are awakened from their OF spinloop.
1211 * This must occur for both SMP and non SMP kernels, since OF will
1212 * be trashed when we move the kernel.
1213 */
1214 *spinloop = 0;
1215
1216#ifdef CONFIG_HMT
1217 for (i = 0; i < NR_CPUS; i++)
1218 RELOC(hmt_thread_data)[i].pir = 0xdeadbeef;
1219#endif
1220 /* look for cpus */
1221 for (node = 0; prom_next_node(&node); ) {
1222 type[0] = 0;
1223 prom_getprop(node, "device_type", type, sizeof(type));
1224 if (strcmp(type, RELOC("cpu")) != 0)
1225 continue;
1226
1227 /* Skip non-configured cpus. */
1228 if (prom_getprop(node, "status", type, sizeof(type)) > 0)
1229 if (strcmp(type, RELOC("okay")) != 0)
1230 continue;
1231
1232 reg = -1;
1233 prom_getprop(node, "reg", &reg, sizeof(reg));
1234
1235 prom_debug("\ncpuid = 0x%x\n", cpuid);
1236 prom_debug("cpu hw idx = 0x%x\n", reg);
1237
1238 /* Init the acknowledge var which will be reset by
1239 * the secondary cpu when it awakens from its OF
1240 * spinloop.
1241 */
1242 *acknowledge = (unsigned long)-1;
1243
1244 propsize = prom_getprop(node, "ibm,ppc-interrupt-server#s",
1245 &interrupt_server,
1246 sizeof(interrupt_server));
1247 if (propsize < 0) {
1248 /* no property. old hardware has no SMT */
1249 cpu_threads = 1;
1250 interrupt_server[0] = reg; /* fake it with phys id */
1251 } else {
1252 /* We have a threaded processor */
1253 cpu_threads = propsize / sizeof(u32);
1254 if (cpu_threads > MAX_CPU_THREADS) {
1255 prom_printf("SMT: too many threads!\n"
1256 "SMT: found %x, max is %x\n",
1257 cpu_threads, MAX_CPU_THREADS);
1258 cpu_threads = 1; /* ToDo: panic? */
1259 }
1260 }
1261
1262 hw_cpu_num = interrupt_server[0];
1263 if (hw_cpu_num != _prom->cpu) {
1264 /* Primary Thread of non-boot cpu */
1265 prom_printf("%x : starting cpu hw idx %x... ", cpuid, reg);
1266 call_prom("start-cpu", 3, 0, node,
1267 secondary_hold, reg);
1268
1269 for (i = 0; (i < 100000000) &&
1270 (*acknowledge == ((unsigned long)-1)); i++ )
1271 mb();
1272
1273 if (*acknowledge == reg)
1274 prom_printf("done\n");
1275 else
1276 prom_printf("failed: %x\n", *acknowledge);
1277 }
1278#ifdef CONFIG_SMP
1279 else
1280 prom_printf("%x : boot cpu %x\n", cpuid, reg);
1281#endif /* CONFIG_SMP */
1282
1283 /* Reserve cpu #s for secondary threads. They start later. */
1284 cpuid += cpu_threads;
1285 }
1286#ifdef CONFIG_HMT
1287 /* Only enable HMT on processors that provide support. */
1288 if (__is_processor(PV_PULSAR) ||
1289 __is_processor(PV_ICESTAR) ||
1290 __is_processor(PV_SSTAR)) {
1291 prom_printf(" starting secondary threads\n");
1292
1293 for (i = 0; i < NR_CPUS; i += 2) {
1294 if (!cpu_online(i))
1295 continue;
1296
1297 if (i == 0) {
1298 unsigned long pir = mfspr(SPRN_PIR);
1299 if (__is_processor(PV_PULSAR)) {
1300 RELOC(hmt_thread_data)[i].pir =
1301 pir & 0x1f;
1302 } else {
1303 RELOC(hmt_thread_data)[i].pir =
1304 pir & 0x3ff;
1305 }
1306 }
1307 }
1308 } else {
1309 prom_printf("Processor is not HMT capable\n");
1310 }
1311#endif
1312
1313 if (cpuid > NR_CPUS)
1314 prom_printf("WARNING: maximum CPUs (" __stringify(NR_CPUS)
1315 ") exceeded: ignoring extras\n");
1316
1317 prom_debug("prom_hold_cpus: end...\n");
1318}
1319
1320
1321static void __init prom_init_client_services(unsigned long pp)
1322{
1323 struct prom_t *_prom = &RELOC(prom);
1324
1325 /* Get a handle to the prom entry point before anything else */
1326 RELOC(prom_entry) = pp;
1327
1328 /* get a handle for the stdout device */
1329 _prom->chosen = call_prom("finddevice", 1, 1, ADDR("/chosen"));
1330 if (!PHANDLE_VALID(_prom->chosen))
1331 prom_panic("cannot find chosen"); /* msg won't be printed :( */
1332
1333 /* get device tree root */
1334 _prom->root = call_prom("finddevice", 1, 1, ADDR("/"));
1335 if (!PHANDLE_VALID(_prom->root))
1336 prom_panic("cannot find device tree root"); /* msg won't be printed :( */
1337
1338 _prom->mmumap = 0;
1339}
1340
1341#ifdef CONFIG_PPC32
1342/*
1343 * For really old powermacs, we need to map things we claim.
1344 * For that, we need the ihandle of the mmu.
1345 */
1346static void __init prom_find_mmu(void)
1347{
1348 struct prom_t *_prom = &RELOC(prom);
1349 phandle oprom;
1350 char version[64];
1351
1352 oprom = call_prom("finddevice", 1, 1, ADDR("/openprom"));
1353 if (!PHANDLE_VALID(oprom))
1354 return;
1355 if (prom_getprop(oprom, "model", version, sizeof(version)) <= 0)
1356 return;
1357 version[sizeof(version) - 1] = 0;
1358 prom_printf("OF version is '%s'\n", version);
1359 /* XXX might need to add other versions here */
1360 if (strcmp(version, "Open Firmware, 1.0.5") != 0)
1361 return;
1362 prom_getprop(_prom->chosen, "mmu", &_prom->mmumap,
1363 sizeof(_prom->mmumap));
1364}
1365#else
1366#define prom_find_mmu()
1367#endif
1368
1369static void __init prom_init_stdout(void)
1370{
1371 struct prom_t *_prom = &RELOC(prom);
1372 char *path = RELOC(of_stdout_device);
1373 char type[16];
1374 u32 val;
1375
1376 if (prom_getprop(_prom->chosen, "stdout", &val, sizeof(val)) <= 0)
1377 prom_panic("cannot find stdout");
1378
1379 _prom->stdout = val;
1380
1381 /* Get the full OF pathname of the stdout device */
1382 memset(path, 0, 256);
1383 call_prom("instance-to-path", 3, 1, _prom->stdout, path, 255);
1384 val = call_prom("instance-to-package", 1, 1, _prom->stdout);
1385 prom_setprop(_prom->chosen, "linux,stdout-package", &val, sizeof(val));
1386 prom_printf("OF stdout device is: %s\n", RELOC(of_stdout_device));
1387 prom_setprop(_prom->chosen, "linux,stdout-path",
1388 RELOC(of_stdout_device), strlen(RELOC(of_stdout_device))+1);
1389
1390 /* If it's a display, note it */
1391 memset(type, 0, sizeof(type));
1392 prom_getprop(val, "device_type", type, sizeof(type));
1393 if (strcmp(type, RELOC("display")) == 0)
1394 prom_setprop(val, "linux,boot-display", NULL, 0);
1395}
1396
1397static void __init prom_close_stdin(void)
1398{
1399 struct prom_t *_prom = &RELOC(prom);
1400 ihandle val;
1401
1402 if (prom_getprop(_prom->chosen, "stdin", &val, sizeof(val)) > 0)
1403 call_prom("close", 1, 0, val);
1404}
1405
1406static int __init prom_find_machine_type(void)
1407{
1408 struct prom_t *_prom = &RELOC(prom);
1409 char compat[256];
1410 int len, i = 0;
1411 phandle rtas;
1412
1413 len = prom_getprop(_prom->root, "compatible",
1414 compat, sizeof(compat)-1);
1415 if (len > 0) {
1416 compat[len] = 0;
1417 while (i < len) {
1418 char *p = &compat[i];
1419 int sl = strlen(p);
1420 if (sl == 0)
1421 break;
1422 if (strstr(p, RELOC("Power Macintosh")) ||
1423 strstr(p, RELOC("MacRISC")))
1424 return PLATFORM_POWERMAC;
1425#ifdef CONFIG_PPC64
1426 if (strstr(p, RELOC("Momentum,Maple")))
1427 return PLATFORM_MAPLE;
1428#endif
1429 i += sl + 1;
1430 }
1431 }
1432#ifdef CONFIG_PPC64
1433 /* Default to pSeries. We need to know if we are running LPAR */
1434 rtas = call_prom("finddevice", 1, 1, ADDR("/rtas"));
1435 if (PHANDLE_VALID(rtas)) {
1436 int x = prom_getproplen(rtas, "ibm,hypertas-functions");
1437 if (x != PROM_ERROR) {
1438 prom_printf("Hypertas detected, assuming LPAR !\n");
1439 return PLATFORM_PSERIES_LPAR;
1440 }
1441 }
1442 return PLATFORM_PSERIES;
1443#else
1444 return PLATFORM_CHRP;
1445#endif
1446}
1447
1448static int __init prom_set_color(ihandle ih, int i, int r, int g, int b)
1449{
1450 return call_prom("call-method", 6, 1, ADDR("color!"), ih, i, b, g, r);
1451}
1452
1453/*
1454 * If we have a display that we don't know how to drive,
1455 * we will want to try to execute OF's open method for it
1456 * later. However, OF will probably fall over if we do that
1457 * we've taken over the MMU.
1458 * So we check whether we will need to open the display,
1459 * and if so, open it now.
1460 */
1461static void __init prom_check_displays(void)
1462{
1463 char type[16], *path;
1464 phandle node;
1465 ihandle ih;
1466 int i;
1467
1468 static unsigned char default_colors[] = {
1469 0x00, 0x00, 0x00,
1470 0x00, 0x00, 0xaa,
1471 0x00, 0xaa, 0x00,
1472 0x00, 0xaa, 0xaa,
1473 0xaa, 0x00, 0x00,
1474 0xaa, 0x00, 0xaa,
1475 0xaa, 0xaa, 0x00,
1476 0xaa, 0xaa, 0xaa,
1477 0x55, 0x55, 0x55,
1478 0x55, 0x55, 0xff,
1479 0x55, 0xff, 0x55,
1480 0x55, 0xff, 0xff,
1481 0xff, 0x55, 0x55,
1482 0xff, 0x55, 0xff,
1483 0xff, 0xff, 0x55,
1484 0xff, 0xff, 0xff
1485 };
1486 const unsigned char *clut;
1487
1488 prom_printf("Looking for displays\n");
1489 for (node = 0; prom_next_node(&node); ) {
1490 memset(type, 0, sizeof(type));
1491 prom_getprop(node, "device_type", type, sizeof(type));
1492 if (strcmp(type, RELOC("display")) != 0)
1493 continue;
1494
1495 /* It seems OF doesn't null-terminate the path :-( */
1496 path = RELOC(prom_scratch);
1497 memset(path, 0, PROM_SCRATCH_SIZE);
1498
1499 /*
1500 * leave some room at the end of the path for appending extra
1501 * arguments
1502 */
1503 if (call_prom("package-to-path", 3, 1, node, path,
1504 PROM_SCRATCH_SIZE-10) == PROM_ERROR)
1505 continue;
1506 prom_printf("found display : %s, opening ... ", path);
1507
1508 ih = call_prom("open", 1, 1, path);
1509 if (ih == 0) {
1510 prom_printf("failed\n");
1511 continue;
1512 }
1513
1514 /* Success */
1515 prom_printf("done\n");
1516 prom_setprop(node, "linux,opened", NULL, 0);
1517
1518 /* Setup a usable color table when the appropriate
1519 * method is available. Should update this to set-colors */
1520 clut = RELOC(default_colors);
1521 for (i = 0; i < 32; i++, clut += 3)
1522 if (prom_set_color(ih, i, clut[0], clut[1],
1523 clut[2]) != 0)
1524 break;
1525
1526#ifdef CONFIG_LOGO_LINUX_CLUT224
1527 clut = PTRRELOC(RELOC(logo_linux_clut224.clut));
1528 for (i = 0; i < RELOC(logo_linux_clut224.clutsize); i++, clut += 3)
1529 if (prom_set_color(ih, i + 32, clut[0], clut[1],
1530 clut[2]) != 0)
1531 break;
1532#endif /* CONFIG_LOGO_LINUX_CLUT224 */
1533 }
1534}
1535
1536
1537/* Return (relocated) pointer to this much memory: moves initrd if reqd. */
1538static void __init *make_room(unsigned long *mem_start, unsigned long *mem_end,
1539 unsigned long needed, unsigned long align)
1540{
1541 void *ret;
1542
1543 *mem_start = _ALIGN(*mem_start, align);
1544 while ((*mem_start + needed) > *mem_end) {
1545 unsigned long room, chunk;
1546
1547 prom_debug("Chunk exhausted, claiming more at %x...\n",
1548 RELOC(alloc_bottom));
1549 room = RELOC(alloc_top) - RELOC(alloc_bottom);
1550 if (room > DEVTREE_CHUNK_SIZE)
1551 room = DEVTREE_CHUNK_SIZE;
1552 if (room < PAGE_SIZE)
1553 prom_panic("No memory for flatten_device_tree (no room)");
1554 chunk = alloc_up(room, 0);
1555 if (chunk == 0)
1556 prom_panic("No memory for flatten_device_tree (claim failed)");
1557 *mem_end = RELOC(alloc_top);
1558 }
1559
1560 ret = (void *)*mem_start;
1561 *mem_start += needed;
1562
1563 return ret;
1564}
1565
1566#define dt_push_token(token, mem_start, mem_end) \
1567 do { *((u32 *)make_room(mem_start, mem_end, 4, 4)) = token; } while(0)
1568
1569static unsigned long __init dt_find_string(char *str)
1570{
1571 char *s, *os;
1572
1573 s = os = (char *)RELOC(dt_string_start);
1574 s += 4;
1575 while (s < (char *)RELOC(dt_string_end)) {
1576 if (strcmp(s, str) == 0)
1577 return s - os;
1578 s += strlen(s) + 1;
1579 }
1580 return 0;
1581}
1582
1583/*
1584 * The Open Firmware 1275 specification states properties must be 31 bytes or
1585 * less, however not all firmwares obey this. Make it 64 bytes to be safe.
1586 */
1587#define MAX_PROPERTY_NAME 64
1588
1589static void __init scan_dt_build_strings(phandle node,
1590 unsigned long *mem_start,
1591 unsigned long *mem_end)
1592{
1593 char *prev_name, *namep, *sstart;
1594 unsigned long soff;
1595 phandle child;
1596
1597 sstart = (char *)RELOC(dt_string_start);
1598
1599 /* get and store all property names */
1600 prev_name = RELOC("");
1601 for (;;) {
1602 /* 64 is max len of name including nul. */
1603 namep = make_room(mem_start, mem_end, MAX_PROPERTY_NAME, 1);
1604 if (call_prom("nextprop", 3, 1, node, prev_name, namep) != 1) {
1605 /* No more nodes: unwind alloc */
1606 *mem_start = (unsigned long)namep;
1607 break;
1608 }
1609
1610 /* skip "name" */
1611 if (strcmp(namep, RELOC("name")) == 0) {
1612 *mem_start = (unsigned long)namep;
1613 prev_name = RELOC("name");
1614 continue;
1615 }
1616 /* get/create string entry */
1617 soff = dt_find_string(namep);
1618 if (soff != 0) {
1619 *mem_start = (unsigned long)namep;
1620 namep = sstart + soff;
1621 } else {
1622 /* Trim off some if we can */
1623 *mem_start = (unsigned long)namep + strlen(namep) + 1;
1624 RELOC(dt_string_end) = *mem_start;
1625 }
1626 prev_name = namep;
1627 }
1628
1629 /* do all our children */
1630 child = call_prom("child", 1, 1, node);
1631 while (child != 0) {
1632 scan_dt_build_strings(child, mem_start, mem_end);
1633 child = call_prom("peer", 1, 1, child);
1634 }
1635}
1636
1637static void __init scan_dt_build_struct(phandle node, unsigned long *mem_start,
1638 unsigned long *mem_end)
1639{
1640 phandle child;
1641 char *namep, *prev_name, *sstart, *p, *ep, *lp, *path;
1642 unsigned long soff;
1643 unsigned char *valp;
1644 static char pname[MAX_PROPERTY_NAME];
1645 int l, room;
1646
1647 dt_push_token(OF_DT_BEGIN_NODE, mem_start, mem_end);
1648
1649 /* get the node's full name */
1650 namep = (char *)*mem_start;
1651 room = *mem_end - *mem_start;
1652 if (room > 255)
1653 room = 255;
1654 l = call_prom("package-to-path", 3, 1, node, namep, room);
1655 if (l >= 0) {
1656 /* Didn't fit? Get more room. */
1657 if (l >= room) {
1658 if (l >= *mem_end - *mem_start)
1659 namep = make_room(mem_start, mem_end, l+1, 1);
1660 call_prom("package-to-path", 3, 1, node, namep, l);
1661 }
1662 namep[l] = '\0';
1663
1664 /* Fixup an Apple bug where they have bogus \0 chars in the
1665 * middle of the path in some properties, and extract
1666 * the unit name (everything after the last '/').
1667 */
1668 for (lp = p = namep, ep = namep + l; p < ep; p++) {
1669 if (*p == '/')
1670 lp = namep;
1671 else if (*p != 0)
1672 *lp++ = *p;
1673 }
1674 *lp = 0;
1675 *mem_start = _ALIGN((unsigned long)lp + 1, 4);
1676 }
1677
1678 /* get it again for debugging */
1679 path = RELOC(prom_scratch);
1680 memset(path, 0, PROM_SCRATCH_SIZE);
1681 call_prom("package-to-path", 3, 1, node, path, PROM_SCRATCH_SIZE-1);
1682
1683 /* get and store all properties */
1684 prev_name = RELOC("");
1685 sstart = (char *)RELOC(dt_string_start);
1686 for (;;) {
1687 if (call_prom("nextprop", 3, 1, node, prev_name,
1688 RELOC(pname)) != 1)
1689 break;
1690
1691 /* skip "name" */
1692 if (strcmp(RELOC(pname), RELOC("name")) == 0) {
1693 prev_name = RELOC("name");
1694 continue;
1695 }
1696
1697 /* find string offset */
1698 soff = dt_find_string(RELOC(pname));
1699 if (soff == 0) {
1700 prom_printf("WARNING: Can't find string index for"
1701 " <%s>, node %s\n", RELOC(pname), path);
1702 break;
1703 }
1704 prev_name = sstart + soff;
1705
1706 /* get length */
1707 l = call_prom("getproplen", 2, 1, node, RELOC(pname));
1708
1709 /* sanity checks */
1710 if (l == PROM_ERROR)
1711 continue;
1712 if (l > MAX_PROPERTY_LENGTH) {
1713 prom_printf("WARNING: ignoring large property ");
1714 /* It seems OF doesn't null-terminate the path :-( */
1715 prom_printf("[%s] ", path);
1716 prom_printf("%s length 0x%x\n", RELOC(pname), l);
1717 continue;
1718 }
1719
1720 /* push property head */
1721 dt_push_token(OF_DT_PROP, mem_start, mem_end);
1722 dt_push_token(l, mem_start, mem_end);
1723 dt_push_token(soff, mem_start, mem_end);
1724
1725 /* push property content */
1726 valp = make_room(mem_start, mem_end, l, 4);
1727 call_prom("getprop", 4, 1, node, RELOC(pname), valp, l);
1728 *mem_start = _ALIGN(*mem_start, 4);
1729 }
1730
1731 /* Add a "linux,phandle" property. */
1732 soff = dt_find_string(RELOC("linux,phandle"));
1733 if (soff == 0)
1734 prom_printf("WARNING: Can't find string index for"
1735 " <linux-phandle> node %s\n", path);
1736 else {
1737 dt_push_token(OF_DT_PROP, mem_start, mem_end);
1738 dt_push_token(4, mem_start, mem_end);
1739 dt_push_token(soff, mem_start, mem_end);
1740 valp = make_room(mem_start, mem_end, 4, 4);
1741 *(u32 *)valp = node;
1742 }
1743
1744 /* do all our children */
1745 child = call_prom("child", 1, 1, node);
1746 while (child != 0) {
1747 scan_dt_build_struct(child, mem_start, mem_end);
1748 child = call_prom("peer", 1, 1, child);
1749 }
1750
1751 dt_push_token(OF_DT_END_NODE, mem_start, mem_end);
1752}
1753
1754static void __init flatten_device_tree(void)
1755{
1756 phandle root;
1757 unsigned long mem_start, mem_end, room;
1758 struct boot_param_header *hdr;
1759 struct prom_t *_prom = &RELOC(prom);
1760 char *namep;
1761 u64 *rsvmap;
1762
1763 /*
1764 * Check how much room we have between alloc top & bottom (+/- a
1765 * few pages), crop to 4Mb, as this is our "chuck" size
1766 */
1767 room = RELOC(alloc_top) - RELOC(alloc_bottom) - 0x4000;
1768 if (room > DEVTREE_CHUNK_SIZE)
1769 room = DEVTREE_CHUNK_SIZE;
1770 prom_debug("starting device tree allocs at %x\n", RELOC(alloc_bottom));
1771
1772 /* Now try to claim that */
1773 mem_start = (unsigned long)alloc_up(room, PAGE_SIZE);
1774 if (mem_start == 0)
1775 prom_panic("Can't allocate initial device-tree chunk\n");
1776 mem_end = RELOC(alloc_top);
1777
1778 /* Get root of tree */
1779 root = call_prom("peer", 1, 1, (phandle)0);
1780 if (root == (phandle)0)
1781 prom_panic ("couldn't get device tree root\n");
1782
1783 /* Build header and make room for mem rsv map */
1784 mem_start = _ALIGN(mem_start, 4);
1785 hdr = make_room(&mem_start, &mem_end,
1786 sizeof(struct boot_param_header), 4);
1787 RELOC(dt_header_start) = (unsigned long)hdr;
1788 rsvmap = make_room(&mem_start, &mem_end, sizeof(mem_reserve_map), 8);
1789
1790 /* Start of strings */
1791 mem_start = PAGE_ALIGN(mem_start);
1792 RELOC(dt_string_start) = mem_start;
1793 mem_start += 4; /* hole */
1794
1795 /* Add "linux,phandle" in there, we'll need it */
1796 namep = make_room(&mem_start, &mem_end, 16, 1);
1797 strcpy(namep, RELOC("linux,phandle"));
1798 mem_start = (unsigned long)namep + strlen(namep) + 1;
1799
1800 /* Build string array */
1801 prom_printf("Building dt strings...\n");
1802 scan_dt_build_strings(root, &mem_start, &mem_end);
1803 RELOC(dt_string_end) = mem_start;
1804
1805 /* Build structure */
1806 mem_start = PAGE_ALIGN(mem_start);
1807 RELOC(dt_struct_start) = mem_start;
1808 prom_printf("Building dt structure...\n");
1809 scan_dt_build_struct(root, &mem_start, &mem_end);
1810 dt_push_token(OF_DT_END, &mem_start, &mem_end);
1811 RELOC(dt_struct_end) = PAGE_ALIGN(mem_start);
1812
1813 /* Finish header */
1814 hdr->boot_cpuid_phys = _prom->cpu;
1815 hdr->magic = OF_DT_HEADER;
1816 hdr->totalsize = RELOC(dt_struct_end) - RELOC(dt_header_start);
1817 hdr->off_dt_struct = RELOC(dt_struct_start) - RELOC(dt_header_start);
1818 hdr->off_dt_strings = RELOC(dt_string_start) - RELOC(dt_header_start);
1819 hdr->dt_strings_size = RELOC(dt_string_end) - RELOC(dt_string_start);
1820 hdr->off_mem_rsvmap = ((unsigned long)rsvmap) - RELOC(dt_header_start);
1821 hdr->version = OF_DT_VERSION;
1822 /* Version 16 is not backward compatible */
1823 hdr->last_comp_version = 0x10;
1824
1825 /* Reserve the whole thing and copy the reserve map in, we
1826 * also bump mem_reserve_cnt to cause further reservations to
1827 * fail since it's too late.
1828 */
1829 reserve_mem(RELOC(dt_header_start), hdr->totalsize);
1830 memcpy(rsvmap, RELOC(mem_reserve_map), sizeof(mem_reserve_map));
1831
1832#ifdef DEBUG_PROM
1833 {
1834 int i;
1835 prom_printf("reserved memory map:\n");
1836 for (i = 0; i < RELOC(mem_reserve_cnt); i++)
1837 prom_printf(" %x - %x\n",
1838 RELOC(mem_reserve_map)[i].base,
1839 RELOC(mem_reserve_map)[i].size);
1840 }
1841#endif
1842 RELOC(mem_reserve_cnt) = MEM_RESERVE_MAP_SIZE;
1843
1844 prom_printf("Device tree strings 0x%x -> 0x%x\n",
1845 RELOC(dt_string_start), RELOC(dt_string_end));
1846 prom_printf("Device tree struct 0x%x -> 0x%x\n",
1847 RELOC(dt_struct_start), RELOC(dt_struct_end));
1848
1849}
1850
1851
1852static void __init fixup_device_tree(void)
1853{
1854#if defined(CONFIG_PPC64) && defined(CONFIG_PPC_PMAC)
1855 phandle u3, i2c, mpic;
1856 u32 u3_rev;
1857 u32 interrupts[2];
1858 u32 parent;
1859
1860 /* Some G5s have a missing interrupt definition, fix it up here */
1861 u3 = call_prom("finddevice", 1, 1, ADDR("/u3@0,f8000000"));
1862 if (!PHANDLE_VALID(u3))
1863 return;
1864 i2c = call_prom("finddevice", 1, 1, ADDR("/u3@0,f8000000/i2c@f8001000"));
1865 if (!PHANDLE_VALID(i2c))
1866 return;
1867 mpic = call_prom("finddevice", 1, 1, ADDR("/u3@0,f8000000/mpic@f8040000"));
1868 if (!PHANDLE_VALID(mpic))
1869 return;
1870
1871 /* check if proper rev of u3 */
1872 if (prom_getprop(u3, "device-rev", &u3_rev, sizeof(u3_rev))
1873 == PROM_ERROR)
1874 return;
1875 if (u3_rev != 0x35 && u3_rev != 0x37)
1876 return;
1877 /* does it need fixup ? */
1878 if (prom_getproplen(i2c, "interrupts") > 0)
1879 return;
1880
1881 prom_printf("fixing up bogus interrupts for u3 i2c...\n");
1882
1883 /* interrupt on this revision of u3 is number 0 and level */
1884 interrupts[0] = 0;
1885 interrupts[1] = 1;
1886 prom_setprop(i2c, "interrupts", &interrupts, sizeof(interrupts));
1887 parent = (u32)mpic;
1888 prom_setprop(i2c, "interrupt-parent", &parent, sizeof(parent));
1889#endif
1890}
1891
1892
1893static void __init prom_find_boot_cpu(void)
1894{
1895 struct prom_t *_prom = &RELOC(prom);
1896 u32 getprop_rval;
1897 ihandle prom_cpu;
1898 phandle cpu_pkg;
1899
1900 _prom->cpu = 0;
1901 if (prom_getprop(_prom->chosen, "cpu", &prom_cpu, sizeof(prom_cpu)) <= 0)
1902 return;
1903
1904 cpu_pkg = call_prom("instance-to-package", 1, 1, prom_cpu);
1905
1906 prom_getprop(cpu_pkg, "reg", &getprop_rval, sizeof(getprop_rval));
1907 _prom->cpu = getprop_rval;
1908
1909 prom_debug("Booting CPU hw index = 0x%x\n", _prom->cpu);
1910}
1911
1912static void __init prom_check_initrd(unsigned long r3, unsigned long r4)
1913{
1914#ifdef CONFIG_BLK_DEV_INITRD
1915 struct prom_t *_prom = &RELOC(prom);
1916
1917 if (r3 && r4 && r4 != 0xdeadbeef) {
1918 unsigned long val;
1919
1920 RELOC(prom_initrd_start) = (r3 >= KERNELBASE) ? __pa(r3) : r3;
1921 RELOC(prom_initrd_end) = RELOC(prom_initrd_start) + r4;
1922
1923 val = RELOC(prom_initrd_start);
1924 prom_setprop(_prom->chosen, "linux,initrd-start", &val,
1925 sizeof(val));
1926 val = RELOC(prom_initrd_end);
1927 prom_setprop(_prom->chosen, "linux,initrd-end", &val,
1928 sizeof(val));
1929
1930 reserve_mem(RELOC(prom_initrd_start),
1931 RELOC(prom_initrd_end) - RELOC(prom_initrd_start));
1932
1933 prom_debug("initrd_start=0x%x\n", RELOC(prom_initrd_start));
1934 prom_debug("initrd_end=0x%x\n", RELOC(prom_initrd_end));
1935 }
1936#endif /* CONFIG_BLK_DEV_INITRD */
1937}
1938
1939/*
1940 * We enter here early on, when the Open Firmware prom is still
1941 * handling exceptions and the MMU hash table for us.
1942 */
1943
1944unsigned long __init prom_init(unsigned long r3, unsigned long r4,
1945 unsigned long pp,
1946 unsigned long r6, unsigned long r7)
1947{
1948 struct prom_t *_prom;
1949 unsigned long hdr;
1950 u32 getprop_rval;
1951 unsigned long offset = reloc_offset();
1952
1953#ifdef CONFIG_PPC32
1954 reloc_got2(offset);
1955#endif
1956
1957 _prom = &RELOC(prom);
1958
1959 /*
1960 * First zero the BSS
1961 */
1962 memset(&RELOC(__bss_start), 0, __bss_stop - __bss_start);
1963
1964 /*
1965 * Init interface to Open Firmware, get some node references,
1966 * like /chosen
1967 */
1968 prom_init_client_services(pp);
1969
1970 /*
1971 * Init prom stdout device
1972 */
1973 prom_init_stdout();
1974
1975 /*
1976 * See if this OF is old enough that we need to do explicit maps
1977 */
1978 prom_find_mmu();
1979
1980 /*
1981 * Check for an initrd
1982 */
1983 prom_check_initrd(r3, r4);
1984
1985 /*
1986 * Get default machine type. At this point, we do not differentiate
1987 * between pSeries SMP and pSeries LPAR
1988 */
1989 RELOC(of_platform) = prom_find_machine_type();
1990 getprop_rval = RELOC(of_platform);
1991 prom_setprop(_prom->chosen, "linux,platform",
1992 &getprop_rval, sizeof(getprop_rval));
1993
1994#ifdef CONFIG_PPC_PSERIES
1995 /*
1996 * On pSeries, inform the firmware about our capabilities
1997 */
1998 if (RELOC(of_platform) & PLATFORM_PSERIES)
1999 prom_send_capabilities();
2000#endif
2001
2002 /*
2003 * On pSeries and BPA, copy the CPU hold code
2004 */
2005 if (RELOC(of_platform) != PLATFORM_POWERMAC)
2006 copy_and_flush(0, KERNELBASE + offset, 0x100, 0);
2007
2008 /*
2009 * Do early parsing of command line
2010 */
2011 early_cmdline_parse();
2012
2013 /*
2014 * Initialize memory management within prom_init
2015 */
2016 prom_init_mem();
2017
2018 /*
2019 * Determine which cpu is actually running right _now_
2020 */
2021 prom_find_boot_cpu();
2022
2023 /*
2024 * Initialize display devices
2025 */
2026 prom_check_displays();
2027
2028#ifdef CONFIG_PPC64
2029 /*
2030 * Initialize IOMMU (TCE tables) on pSeries. Do that before anything else
2031 * that uses the allocator, we need to make sure we get the top of memory
2032 * available for us here...
2033 */
2034 if (RELOC(of_platform) == PLATFORM_PSERIES)
2035 prom_initialize_tce_table();
2036#endif
2037
2038 /*
2039 * On non-powermacs, try to instantiate RTAS and puts all CPUs
2040 * in spin-loops. PowerMacs don't have a working RTAS and use
2041 * a different way to spin CPUs
2042 */
2043 if (RELOC(of_platform) != PLATFORM_POWERMAC) {
2044 prom_instantiate_rtas();
2045 prom_hold_cpus();
2046 }
2047
2048 /*
2049 * Fill in some infos for use by the kernel later on
2050 */
2051 if (RELOC(prom_memory_limit))
2052 prom_setprop(_prom->chosen, "linux,memory-limit",
2053 &RELOC(prom_memory_limit),
2054 sizeof(prom_memory_limit));
2055#ifdef CONFIG_PPC64
2056 if (RELOC(ppc64_iommu_off))
2057 prom_setprop(_prom->chosen, "linux,iommu-off", NULL, 0);
2058
2059 if (RELOC(iommu_force_on))
2060 prom_setprop(_prom->chosen, "linux,iommu-force-on", NULL, 0);
2061
2062 if (RELOC(prom_tce_alloc_start)) {
2063 prom_setprop(_prom->chosen, "linux,tce-alloc-start",
2064 &RELOC(prom_tce_alloc_start),
2065 sizeof(prom_tce_alloc_start));
2066 prom_setprop(_prom->chosen, "linux,tce-alloc-end",
2067 &RELOC(prom_tce_alloc_end),
2068 sizeof(prom_tce_alloc_end));
2069 }
2070#endif
2071
2072 /*
2073 * Fixup any known bugs in the device-tree
2074 */
2075 fixup_device_tree();
2076
2077 /*
2078 * Now finally create the flattened device-tree
2079 */
2080 prom_printf("copying OF device tree ...\n");
2081 flatten_device_tree();
2082
2083 /* in case stdin is USB and still active on IBM machines... */
2084 prom_close_stdin();
2085
2086 /*
2087 * Call OF "quiesce" method to shut down pending DMA's from
2088 * devices etc...
2089 */
2090 prom_printf("Calling quiesce ...\n");
2091 call_prom("quiesce", 0, 0);
2092
2093 /*
2094 * And finally, call the kernel passing it the flattened device
2095 * tree and NULL as r5, thus triggering the new entry point which
2096 * is common to us and kexec
2097 */
2098 hdr = RELOC(dt_header_start);
2099 prom_printf("returning from prom_init\n");
2100 prom_debug("->dt_header_start=0x%x\n", hdr);
2101
2102#ifdef CONFIG_PPC32
2103 reloc_got2(-offset);
2104#endif
2105
2106 __start(hdr, KERNELBASE + offset, 0);
2107
2108 return 0;
2109}
diff --git a/arch/ppc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c
index e7aee4108dea..568ea335d616 100644
--- a/arch/ppc/kernel/ptrace.c
+++ b/arch/powerpc/kernel/ptrace.c
@@ -1,6 +1,4 @@
1/* 1/*
2 * arch/ppc/kernel/ptrace.c
3 *
4 * PowerPC version 2 * PowerPC version
5 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) 3 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
6 * 4 *
@@ -10,13 +8,14 @@
10 * linux/kernel/ptrace.c is by Ross Biro 1/23/92, edited by Linus Torvalds 8 * linux/kernel/ptrace.c is by Ross Biro 1/23/92, edited by Linus Torvalds
11 * 9 *
12 * Modified by Cort Dougan (cort@hq.fsmlabs.com) 10 * Modified by Cort Dougan (cort@hq.fsmlabs.com)
13 * and Paul Mackerras (paulus@linuxcare.com.au). 11 * and Paul Mackerras (paulus@samba.org).
14 * 12 *
15 * This file is subject to the terms and conditions of the GNU General 13 * This file is subject to the terms and conditions of the GNU General
16 * Public License. See the file README.legal in the main directory of 14 * Public License. See the file README.legal in the main directory of
17 * this archive for more details. 15 * this archive for more details.
18 */ 16 */
19 17
18#include <linux/config.h>
20#include <linux/kernel.h> 19#include <linux/kernel.h>
21#include <linux/sched.h> 20#include <linux/sched.h>
22#include <linux/mm.h> 21#include <linux/mm.h>
@@ -29,13 +28,19 @@
29#include <linux/signal.h> 28#include <linux/signal.h>
30#include <linux/seccomp.h> 29#include <linux/seccomp.h>
31#include <linux/audit.h> 30#include <linux/audit.h>
31#ifdef CONFIG_PPC32
32#include <linux/module.h> 32#include <linux/module.h>
33#endif
33 34
34#include <asm/uaccess.h> 35#include <asm/uaccess.h>
35#include <asm/page.h> 36#include <asm/page.h>
36#include <asm/pgtable.h> 37#include <asm/pgtable.h>
37#include <asm/system.h> 38#include <asm/system.h>
39#ifdef CONFIG_PPC64
40#include <asm/ptrace-common.h>
41#endif
38 42
43#ifdef CONFIG_PPC32
39/* 44/*
40 * Set of msr bits that gdb can change on behalf of a process. 45 * Set of msr bits that gdb can change on behalf of a process.
41 */ 46 */
@@ -44,12 +49,14 @@
44#else 49#else
45#define MSR_DEBUGCHANGE (MSR_SE | MSR_BE) 50#define MSR_DEBUGCHANGE (MSR_SE | MSR_BE)
46#endif 51#endif
52#endif /* CONFIG_PPC32 */
47 53
48/* 54/*
49 * does not yet catch signals sent when the child dies. 55 * does not yet catch signals sent when the child dies.
50 * in exit.c or in signal.c. 56 * in exit.c or in signal.c.
51 */ 57 */
52 58
59#ifdef CONFIG_PPC32
53/* 60/*
54 * Get contents of register REGNO in task TASK. 61 * Get contents of register REGNO in task TASK.
55 */ 62 */
@@ -228,6 +235,7 @@ clear_single_step(struct task_struct *task)
228#endif 235#endif
229 } 236 }
230} 237}
238#endif /* CONFIG_PPC32 */
231 239
232/* 240/*
233 * Called by kernel/ptrace.c when detaching.. 241 * Called by kernel/ptrace.c when detaching..
@@ -240,7 +248,7 @@ void ptrace_disable(struct task_struct *child)
240 clear_single_step(child); 248 clear_single_step(child);
241} 249}
242 250
243int sys_ptrace(long request, long pid, long addr, long data) 251long sys_ptrace(long request, long pid, long addr, long data)
244{ 252{
245 struct task_struct *child; 253 struct task_struct *child;
246 int ret = -EPERM; 254 int ret = -EPERM;
@@ -296,25 +304,28 @@ int sys_ptrace(long request, long pid, long addr, long data)
296 } 304 }
297 305
298 /* read the word at location addr in the USER area. */ 306 /* read the word at location addr in the USER area. */
299 /* XXX this will need fixing for 64-bit */
300 case PTRACE_PEEKUSR: { 307 case PTRACE_PEEKUSR: {
301 unsigned long index, tmp; 308 unsigned long index, tmp;
302 309
303 ret = -EIO; 310 ret = -EIO;
304 /* convert to index and check */ 311 /* convert to index and check */
312#ifdef CONFIG_PPC32
305 index = (unsigned long) addr >> 2; 313 index = (unsigned long) addr >> 2;
306 if ((addr & 3) || index > PT_FPSCR 314 if ((addr & 3) || (index > PT_FPSCR)
307 || child->thread.regs == NULL) 315 || (child->thread.regs == NULL))
316#else
317 index = (unsigned long) addr >> 3;
318 if ((addr & 7) || (index > PT_FPSCR))
319#endif
308 break; 320 break;
309 321
322#ifdef CONFIG_PPC32
310 CHECK_FULL_REGS(child->thread.regs); 323 CHECK_FULL_REGS(child->thread.regs);
324#endif
311 if (index < PT_FPR0) { 325 if (index < PT_FPR0) {
312 tmp = get_reg(child, (int) index); 326 tmp = get_reg(child, (int) index);
313 } else { 327 } else {
314 preempt_disable(); 328 flush_fp_to_thread(child);
315 if (child->thread.regs->msr & MSR_FP)
316 giveup_fpu(child);
317 preempt_enable();
318 tmp = ((unsigned long *)child->thread.fpr)[index - PT_FPR0]; 329 tmp = ((unsigned long *)child->thread.fpr)[index - PT_FPR0];
319 } 330 }
320 ret = put_user(tmp,(unsigned long __user *) data); 331 ret = put_user(tmp,(unsigned long __user *) data);
@@ -325,7 +336,8 @@ int sys_ptrace(long request, long pid, long addr, long data)
325 case PTRACE_POKETEXT: /* write the word at location addr. */ 336 case PTRACE_POKETEXT: /* write the word at location addr. */
326 case PTRACE_POKEDATA: 337 case PTRACE_POKEDATA:
327 ret = 0; 338 ret = 0;
328 if (access_process_vm(child, addr, &data, sizeof(data), 1) == sizeof(data)) 339 if (access_process_vm(child, addr, &data, sizeof(data), 1)
340 == sizeof(data))
329 break; 341 break;
330 ret = -EIO; 342 ret = -EIO;
331 break; 343 break;
@@ -336,21 +348,25 @@ int sys_ptrace(long request, long pid, long addr, long data)
336 348
337 ret = -EIO; 349 ret = -EIO;
338 /* convert to index and check */ 350 /* convert to index and check */
351#ifdef CONFIG_PPC32
339 index = (unsigned long) addr >> 2; 352 index = (unsigned long) addr >> 2;
340 if ((addr & 3) || index > PT_FPSCR 353 if ((addr & 3) || (index > PT_FPSCR)
341 || child->thread.regs == NULL) 354 || (child->thread.regs == NULL))
355#else
356 index = (unsigned long) addr >> 3;
357 if ((addr & 7) || (index > PT_FPSCR))
358#endif
342 break; 359 break;
343 360
361#ifdef CONFIG_PPC32
344 CHECK_FULL_REGS(child->thread.regs); 362 CHECK_FULL_REGS(child->thread.regs);
363#endif
345 if (index == PT_ORIG_R3) 364 if (index == PT_ORIG_R3)
346 break; 365 break;
347 if (index < PT_FPR0) { 366 if (index < PT_FPR0) {
348 ret = put_reg(child, index, data); 367 ret = put_reg(child, index, data);
349 } else { 368 } else {
350 preempt_disable(); 369 flush_fp_to_thread(child);
351 if (child->thread.regs->msr & MSR_FP)
352 giveup_fpu(child);
353 preempt_enable();
354 ((unsigned long *)child->thread.fpr)[index - PT_FPR0] = data; 370 ((unsigned long *)child->thread.fpr)[index - PT_FPR0] = data;
355 ret = 0; 371 ret = 0;
356 } 372 }
@@ -362,11 +378,10 @@ int sys_ptrace(long request, long pid, long addr, long data)
362 ret = -EIO; 378 ret = -EIO;
363 if (!valid_signal(data)) 379 if (!valid_signal(data))
364 break; 380 break;
365 if (request == PTRACE_SYSCALL) { 381 if (request == PTRACE_SYSCALL)
366 set_tsk_thread_flag(child, TIF_SYSCALL_TRACE); 382 set_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
367 } else { 383 else
368 clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE); 384 clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
369 }
370 child->exit_code = data; 385 child->exit_code = data;
371 /* make sure the single step bit is not set. */ 386 /* make sure the single step bit is not set. */
372 clear_single_step(child); 387 clear_single_step(child);
@@ -404,28 +419,102 @@ int sys_ptrace(long request, long pid, long addr, long data)
404 break; 419 break;
405 } 420 }
406 421
422#ifdef CONFIG_PPC64
423 case PTRACE_GET_DEBUGREG: {
424 ret = -EINVAL;
425 /* We only support one DABR and no IABRS at the moment */
426 if (addr > 0)
427 break;
428 ret = put_user(child->thread.dabr,
429 (unsigned long __user *)data);
430 break;
431 }
432
433 case PTRACE_SET_DEBUGREG:
434 ret = ptrace_set_debugreg(child, addr, data);
435 break;
436#endif
437
407 case PTRACE_DETACH: 438 case PTRACE_DETACH:
408 ret = ptrace_detach(child, data); 439 ret = ptrace_detach(child, data);
409 break; 440 break;
410 441
442#ifdef CONFIG_PPC64
443 case PPC_PTRACE_GETREGS: { /* Get GPRs 0 - 31. */
444 int i;
445 unsigned long *reg = &((unsigned long *)child->thread.regs)[0];
446 unsigned long __user *tmp = (unsigned long __user *)addr;
447
448 for (i = 0; i < 32; i++) {
449 ret = put_user(*reg, tmp);
450 if (ret)
451 break;
452 reg++;
453 tmp++;
454 }
455 break;
456 }
457
458 case PPC_PTRACE_SETREGS: { /* Set GPRs 0 - 31. */
459 int i;
460 unsigned long *reg = &((unsigned long *)child->thread.regs)[0];
461 unsigned long __user *tmp = (unsigned long __user *)addr;
462
463 for (i = 0; i < 32; i++) {
464 ret = get_user(*reg, tmp);
465 if (ret)
466 break;
467 reg++;
468 tmp++;
469 }
470 break;
471 }
472
473 case PPC_PTRACE_GETFPREGS: { /* Get FPRs 0 - 31. */
474 int i;
475 unsigned long *reg = &((unsigned long *)child->thread.fpr)[0];
476 unsigned long __user *tmp = (unsigned long __user *)addr;
477
478 flush_fp_to_thread(child);
479
480 for (i = 0; i < 32; i++) {
481 ret = put_user(*reg, tmp);
482 if (ret)
483 break;
484 reg++;
485 tmp++;
486 }
487 break;
488 }
489
490 case PPC_PTRACE_SETFPREGS: { /* Get FPRs 0 - 31. */
491 int i;
492 unsigned long *reg = &((unsigned long *)child->thread.fpr)[0];
493 unsigned long __user *tmp = (unsigned long __user *)addr;
494
495 flush_fp_to_thread(child);
496
497 for (i = 0; i < 32; i++) {
498 ret = get_user(*reg, tmp);
499 if (ret)
500 break;
501 reg++;
502 tmp++;
503 }
504 break;
505 }
506#endif /* CONFIG_PPC64 */
507
411#ifdef CONFIG_ALTIVEC 508#ifdef CONFIG_ALTIVEC
412 case PTRACE_GETVRREGS: 509 case PTRACE_GETVRREGS:
413 /* Get the child altivec register state. */ 510 /* Get the child altivec register state. */
414 preempt_disable(); 511 flush_altivec_to_thread(child);
415 if (child->thread.regs->msr & MSR_VEC)
416 giveup_altivec(child);
417 preempt_enable();
418 ret = get_vrregs((unsigned long __user *)data, child); 512 ret = get_vrregs((unsigned long __user *)data, child);
419 break; 513 break;
420 514
421 case PTRACE_SETVRREGS: 515 case PTRACE_SETVRREGS:
422 /* Set the child altivec register state. */ 516 /* Set the child altivec register state. */
423 /* this is to clear the MSR_VEC bit to force a reload 517 flush_altivec_to_thread(child);
424 * of register state from memory */
425 preempt_disable();
426 if (child->thread.regs->msr & MSR_VEC)
427 giveup_altivec(child);
428 preempt_enable();
429 ret = set_vrregs(child, (unsigned long __user *)data); 518 ret = set_vrregs(child, (unsigned long __user *)data);
430 break; 519 break;
431#endif 520#endif
@@ -478,12 +567,21 @@ static void do_syscall_trace(void)
478 567
479void do_syscall_trace_enter(struct pt_regs *regs) 568void do_syscall_trace_enter(struct pt_regs *regs)
480{ 569{
570#ifdef CONFIG_PPC64
571 secure_computing(regs->gpr[0]);
572#endif
573
481 if (test_thread_flag(TIF_SYSCALL_TRACE) 574 if (test_thread_flag(TIF_SYSCALL_TRACE)
482 && (current->ptrace & PT_PTRACED)) 575 && (current->ptrace & PT_PTRACED))
483 do_syscall_trace(); 576 do_syscall_trace();
484 577
485 if (unlikely(current->audit_context)) 578 if (unlikely(current->audit_context))
486 audit_syscall_entry(current, AUDIT_ARCH_PPC, 579 audit_syscall_entry(current,
580#ifdef CONFIG_PPC32
581 AUDIT_ARCH_PPC,
582#else
583 test_thread_flag(TIF_32BIT)?AUDIT_ARCH_PPC:AUDIT_ARCH_PPC64,
584#endif
487 regs->gpr[0], 585 regs->gpr[0],
488 regs->gpr[3], regs->gpr[4], 586 regs->gpr[3], regs->gpr[4],
489 regs->gpr[5], regs->gpr[6]); 587 regs->gpr[5], regs->gpr[6]);
@@ -491,17 +589,25 @@ void do_syscall_trace_enter(struct pt_regs *regs)
491 589
492void do_syscall_trace_leave(struct pt_regs *regs) 590void do_syscall_trace_leave(struct pt_regs *regs)
493{ 591{
592#ifdef CONFIG_PPC32
494 secure_computing(regs->gpr[0]); 593 secure_computing(regs->gpr[0]);
594#endif
495 595
496 if (unlikely(current->audit_context)) 596 if (unlikely(current->audit_context))
497 audit_syscall_exit(current, 597 audit_syscall_exit(current,
498 (regs->ccr&0x1000)?AUDITSC_FAILURE:AUDITSC_SUCCESS, 598 (regs->ccr&0x1000)?AUDITSC_FAILURE:AUDITSC_SUCCESS,
499 regs->result); 599 regs->result);
500 600
501 if ((test_thread_flag(TIF_SYSCALL_TRACE)) 601 if ((test_thread_flag(TIF_SYSCALL_TRACE)
602#ifdef CONFIG_PPC64
603 || test_thread_flag(TIF_SINGLESTEP)
604#endif
605 )
502 && (current->ptrace & PT_PTRACED)) 606 && (current->ptrace & PT_PTRACED))
503 do_syscall_trace(); 607 do_syscall_trace();
504} 608}
505 609
610#ifdef CONFIG_PPC32
506EXPORT_SYMBOL(do_syscall_trace_enter); 611EXPORT_SYMBOL(do_syscall_trace_enter);
507EXPORT_SYMBOL(do_syscall_trace_leave); 612EXPORT_SYMBOL(do_syscall_trace_leave);
613#endif
diff --git a/arch/ppc64/kernel/ptrace32.c b/arch/powerpc/kernel/ptrace32.c
index fb8c22d6084a..91eb952e0293 100644
--- a/arch/ppc64/kernel/ptrace32.c
+++ b/arch/powerpc/kernel/ptrace32.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * linux/arch/ppc64/kernel/ptrace32.c 2 * ptrace for 32-bit processes running on a 64-bit kernel.
3 * 3 *
4 * PowerPC version 4 * PowerPC version
5 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) 5 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
@@ -10,10 +10,10 @@
10 * linux/kernel/ptrace.c is by Ross Biro 1/23/92, edited by Linus Torvalds 10 * linux/kernel/ptrace.c is by Ross Biro 1/23/92, edited by Linus Torvalds
11 * 11 *
12 * Modified by Cort Dougan (cort@hq.fsmlabs.com) 12 * Modified by Cort Dougan (cort@hq.fsmlabs.com)
13 * and Paul Mackerras (paulus@linuxcare.com.au). 13 * and Paul Mackerras (paulus@samba.org).
14 * 14 *
15 * This file is subject to the terms and conditions of the GNU General 15 * This file is subject to the terms and conditions of the GNU General
16 * Public License. See the file README.legal in the main directory of 16 * Public License. See the file COPYING in the main directory of
17 * this archive for more details. 17 * this archive for more details.
18 */ 18 */
19 19
@@ -40,7 +40,8 @@
40 * in exit.c or in signal.c. 40 * in exit.c or in signal.c.
41 */ 41 */
42 42
43int sys32_ptrace(long request, long pid, unsigned long addr, unsigned long data) 43long compat_sys_ptrace(int request, int pid, unsigned long addr,
44 unsigned long data)
44{ 45{
45 struct task_struct *child; 46 struct task_struct *child;
46 int ret = -EPERM; 47 int ret = -EPERM;
diff --git a/arch/ppc64/kernel/rtas.c b/arch/powerpc/kernel/rtas.c
index 5e8eb33b8e54..4d22eeeeb91d 100644
--- a/arch/ppc64/kernel/rtas.c
+++ b/arch/powerpc/kernel/rtas.c
@@ -25,28 +25,29 @@
25#include <asm/page.h> 25#include <asm/page.h>
26#include <asm/param.h> 26#include <asm/param.h>
27#include <asm/system.h> 27#include <asm/system.h>
28#include <asm/abs_addr.h>
29#include <asm/udbg.h>
30#include <asm/delay.h> 28#include <asm/delay.h>
31#include <asm/uaccess.h> 29#include <asm/uaccess.h>
30#include <asm/lmb.h>
31#ifdef CONFIG_PPC64
32#include <asm/systemcfg.h> 32#include <asm/systemcfg.h>
33#endif
33 34
34struct flash_block_list_header rtas_firmware_flash_list = {0, NULL}; 35struct rtas_t rtas = {
35
36struct rtas_t rtas = {
37 .lock = SPIN_LOCK_UNLOCKED 36 .lock = SPIN_LOCK_UNLOCKED
38}; 37};
39 38
40EXPORT_SYMBOL(rtas); 39EXPORT_SYMBOL(rtas);
41 40
42char rtas_err_buf[RTAS_ERROR_LOG_MAX];
43
44DEFINE_SPINLOCK(rtas_data_buf_lock); 41DEFINE_SPINLOCK(rtas_data_buf_lock);
45char rtas_data_buf[RTAS_DATA_BUF_SIZE]__page_aligned; 42char rtas_data_buf[RTAS_DATA_BUF_SIZE] __cacheline_aligned;
46unsigned long rtas_rmo_buf; 43unsigned long rtas_rmo_buf;
47 44
48void 45/*
49call_rtas_display_status(unsigned char c) 46 * call_rtas_display_status and call_rtas_display_status_delay
47 * are designed only for very early low-level debugging, which
48 * is why the token is hard-coded to 10.
49 */
50void call_rtas_display_status(unsigned char c)
50{ 51{
51 struct rtas_args *args = &rtas.args; 52 struct rtas_args *args = &rtas.args;
52 unsigned long s; 53 unsigned long s;
@@ -66,8 +67,7 @@ call_rtas_display_status(unsigned char c)
66 spin_unlock_irqrestore(&rtas.lock, s); 67 spin_unlock_irqrestore(&rtas.lock, s);
67} 68}
68 69
69void 70void call_rtas_display_status_delay(unsigned char c)
70call_rtas_display_status_delay(unsigned char c)
71{ 71{
72 static int pending_newline = 0; /* did last write end with unprinted newline? */ 72 static int pending_newline = 0; /* did last write end with unprinted newline? */
73 static int width = 16; 73 static int width = 16;
@@ -91,8 +91,7 @@ call_rtas_display_status_delay(unsigned char c)
91 } 91 }
92} 92}
93 93
94void 94void rtas_progress(char *s, unsigned short hex)
95rtas_progress(char *s, unsigned short hex)
96{ 95{
97 struct device_node *root; 96 struct device_node *root;
98 int width, *p; 97 int width, *p;
@@ -208,18 +207,16 @@ rtas_progress(char *s, unsigned short hex)
208 spin_unlock(&progress_lock); 207 spin_unlock(&progress_lock);
209} 208}
210 209
211int 210int rtas_token(const char *service)
212rtas_token(const char *service)
213{ 211{
214 int *tokp; 212 int *tokp;
215 if (rtas.dev == NULL) { 213 if (rtas.dev == NULL)
216 PPCDBG(PPCDBG_RTAS,"\tNo rtas device in device-tree...\n");
217 return RTAS_UNKNOWN_SERVICE; 214 return RTAS_UNKNOWN_SERVICE;
218 }
219 tokp = (int *) get_property(rtas.dev, service, NULL); 215 tokp = (int *) get_property(rtas.dev, service, NULL);
220 return tokp ? *tokp : RTAS_UNKNOWN_SERVICE; 216 return tokp ? *tokp : RTAS_UNKNOWN_SERVICE;
221} 217}
222 218
219#ifdef CONFIG_RTAS_ERROR_LOGGING
223/* 220/*
224 * Return the firmware-specified size of the error log buffer 221 * Return the firmware-specified size of the error log buffer
225 * for all rtas calls that require an error buffer argument. 222 * for all rtas calls that require an error buffer argument.
@@ -234,31 +231,38 @@ int rtas_get_error_log_max(void)
234 rtas_error_log_max = rtas_token ("rtas-error-log-max"); 231 rtas_error_log_max = rtas_token ("rtas-error-log-max");
235 if ((rtas_error_log_max == RTAS_UNKNOWN_SERVICE) || 232 if ((rtas_error_log_max == RTAS_UNKNOWN_SERVICE) ||
236 (rtas_error_log_max > RTAS_ERROR_LOG_MAX)) { 233 (rtas_error_log_max > RTAS_ERROR_LOG_MAX)) {
237 printk (KERN_WARNING "RTAS: bad log buffer size %d\n", rtas_error_log_max); 234 printk (KERN_WARNING "RTAS: bad log buffer size %d\n",
235 rtas_error_log_max);
238 rtas_error_log_max = RTAS_ERROR_LOG_MAX; 236 rtas_error_log_max = RTAS_ERROR_LOG_MAX;
239 } 237 }
240 return rtas_error_log_max; 238 return rtas_error_log_max;
241} 239}
240EXPORT_SYMBOL(rtas_get_error_log_max);
242 241
243 242
243char rtas_err_buf[RTAS_ERROR_LOG_MAX];
244int rtas_last_error_token;
245
244/** Return a copy of the detailed error text associated with the 246/** Return a copy of the detailed error text associated with the
245 * most recent failed call to rtas. Because the error text 247 * most recent failed call to rtas. Because the error text
246 * might go stale if there are any other intervening rtas calls, 248 * might go stale if there are any other intervening rtas calls,
247 * this routine must be called atomically with whatever produced 249 * this routine must be called atomically with whatever produced
248 * the error (i.e. with rtas.lock still held from the previous call). 250 * the error (i.e. with rtas.lock still held from the previous call).
249 */ 251 */
250static int 252static char *__fetch_rtas_last_error(char *altbuf)
251__fetch_rtas_last_error(void)
252{ 253{
253 struct rtas_args err_args, save_args; 254 struct rtas_args err_args, save_args;
254 u32 bufsz; 255 u32 bufsz;
256 char *buf = NULL;
257
258 if (rtas_last_error_token == -1)
259 return NULL;
255 260
256 bufsz = rtas_get_error_log_max(); 261 bufsz = rtas_get_error_log_max();
257 262
258 err_args.token = rtas_token("rtas-last-error"); 263 err_args.token = rtas_last_error_token;
259 err_args.nargs = 2; 264 err_args.nargs = 2;
260 err_args.nret = 1; 265 err_args.nret = 1;
261
262 err_args.args[0] = (rtas_arg_t)__pa(rtas_err_buf); 266 err_args.args[0] = (rtas_arg_t)__pa(rtas_err_buf);
263 err_args.args[1] = bufsz; 267 err_args.args[1] = bufsz;
264 err_args.args[2] = 0; 268 err_args.args[2] = 0;
@@ -271,23 +275,38 @@ __fetch_rtas_last_error(void)
271 err_args = rtas.args; 275 err_args = rtas.args;
272 rtas.args = save_args; 276 rtas.args = save_args;
273 277
274 return err_args.args[2]; 278 /* Log the error in the unlikely case that there was one. */
279 if (unlikely(err_args.args[2] == 0)) {
280 if (altbuf) {
281 buf = altbuf;
282 } else {
283 buf = rtas_err_buf;
284 if (mem_init_done)
285 buf = kmalloc(RTAS_ERROR_LOG_MAX, GFP_ATOMIC);
286 }
287 if (buf)
288 memcpy(buf, rtas_err_buf, RTAS_ERROR_LOG_MAX);
289 }
290
291 return buf;
275} 292}
276 293
294#define get_errorlog_buffer() kmalloc(RTAS_ERROR_LOG_MAX, GFP_KERNEL)
295
296#else /* CONFIG_RTAS_ERROR_LOGGING */
297#define __fetch_rtas_last_error(x) NULL
298#define get_errorlog_buffer() NULL
299#endif
300
277int rtas_call(int token, int nargs, int nret, int *outputs, ...) 301int rtas_call(int token, int nargs, int nret, int *outputs, ...)
278{ 302{
279 va_list list; 303 va_list list;
280 int i, logit = 0; 304 int i;
281 unsigned long s; 305 unsigned long s;
282 struct rtas_args *rtas_args; 306 struct rtas_args *rtas_args;
283 char * buff_copy = NULL; 307 char *buff_copy = NULL;
284 int ret; 308 int ret;
285 309
286 PPCDBG(PPCDBG_RTAS, "Entering rtas_call\n");
287 PPCDBG(PPCDBG_RTAS, "\ttoken = 0x%x\n", token);
288 PPCDBG(PPCDBG_RTAS, "\tnargs = %d\n", nargs);
289 PPCDBG(PPCDBG_RTAS, "\tnret = %d\n", nret);
290 PPCDBG(PPCDBG_RTAS, "\t&outputs = 0x%lx\n", outputs);
291 if (token == RTAS_UNKNOWN_SERVICE) 310 if (token == RTAS_UNKNOWN_SERVICE)
292 return -1; 311 return -1;
293 312
@@ -300,46 +319,25 @@ int rtas_call(int token, int nargs, int nret, int *outputs, ...)
300 rtas_args->nret = nret; 319 rtas_args->nret = nret;
301 rtas_args->rets = (rtas_arg_t *)&(rtas_args->args[nargs]); 320 rtas_args->rets = (rtas_arg_t *)&(rtas_args->args[nargs]);
302 va_start(list, outputs); 321 va_start(list, outputs);
303 for (i = 0; i < nargs; ++i) { 322 for (i = 0; i < nargs; ++i)
304 rtas_args->args[i] = va_arg(list, rtas_arg_t); 323 rtas_args->args[i] = va_arg(list, rtas_arg_t);
305 PPCDBG(PPCDBG_RTAS, "\tnarg[%d] = 0x%x\n", i, rtas_args->args[i]);
306 }
307 va_end(list); 324 va_end(list);
308 325
309 for (i = 0; i < nret; ++i) 326 for (i = 0; i < nret; ++i)
310 rtas_args->rets[i] = 0; 327 rtas_args->rets[i] = 0;
311 328
312 PPCDBG(PPCDBG_RTAS, "\tentering rtas with 0x%lx\n",
313 __pa(rtas_args));
314 enter_rtas(__pa(rtas_args)); 329 enter_rtas(__pa(rtas_args));
315 PPCDBG(PPCDBG_RTAS, "\treturned from rtas ...\n");
316 330
317 /* A -1 return code indicates that the last command couldn't 331 /* A -1 return code indicates that the last command couldn't
318 be completed due to a hardware error. */ 332 be completed due to a hardware error. */
319 if (rtas_args->rets[0] == -1) 333 if (rtas_args->rets[0] == -1)
320 logit = (__fetch_rtas_last_error() == 0); 334 buff_copy = __fetch_rtas_last_error(NULL);
321
322 ifppcdebug(PPCDBG_RTAS) {
323 for(i=0; i < nret ;i++)
324 udbg_printf("\tnret[%d] = 0x%lx\n", i, (ulong)rtas_args->rets[i]);
325 }
326 335
327 if (nret > 1 && outputs != NULL) 336 if (nret > 1 && outputs != NULL)
328 for (i = 0; i < nret-1; ++i) 337 for (i = 0; i < nret-1; ++i)
329 outputs[i] = rtas_args->rets[i+1]; 338 outputs[i] = rtas_args->rets[i+1];
330 ret = (nret > 0)? rtas_args->rets[0]: 0; 339 ret = (nret > 0)? rtas_args->rets[0]: 0;
331 340
332 /* Log the error in the unlikely case that there was one. */
333 if (unlikely(logit)) {
334 buff_copy = rtas_err_buf;
335 if (mem_init_done) {
336 buff_copy = kmalloc(RTAS_ERROR_LOG_MAX, GFP_ATOMIC);
337 if (buff_copy)
338 memcpy(buff_copy, rtas_err_buf,
339 RTAS_ERROR_LOG_MAX);
340 }
341 }
342
343 /* Gotta do something different here, use global lock for now... */ 341 /* Gotta do something different here, use global lock for now... */
344 spin_unlock_irqrestore(&rtas.lock, s); 342 spin_unlock_irqrestore(&rtas.lock, s);
345 343
@@ -354,8 +352,7 @@ int rtas_call(int token, int nargs, int nret, int *outputs, ...)
354/* Given an RTAS status code of 990n compute the hinted delay of 10^n 352/* Given an RTAS status code of 990n compute the hinted delay of 10^n
355 * (last digit) milliseconds. For now we bound at n=5 (100 sec). 353 * (last digit) milliseconds. For now we bound at n=5 (100 sec).
356 */ 354 */
357unsigned int 355unsigned int rtas_extended_busy_delay_time(int status)
358rtas_extended_busy_delay_time(int status)
359{ 356{
360 int order = status - 9900; 357 int order = status - 9900;
361 unsigned long ms; 358 unsigned long ms;
@@ -366,7 +363,7 @@ rtas_extended_busy_delay_time(int status)
366 order = 5; /* bound */ 363 order = 5; /* bound */
367 364
368 /* Use microseconds for reasonable accuracy */ 365 /* Use microseconds for reasonable accuracy */
369 for (ms=1; order > 0; order--) 366 for (ms = 1; order > 0; order--)
370 ms *= 10; 367 ms *= 10;
371 368
372 return ms; 369 return ms;
@@ -493,112 +490,23 @@ int rtas_set_indicator(int indicator, int index, int new_value)
493 return rc; 490 return rc;
494} 491}
495 492
496#define FLASH_BLOCK_LIST_VERSION (1UL) 493void rtas_restart(char *cmd)
497static void
498rtas_flash_firmware(void)
499{
500 unsigned long image_size;
501 struct flash_block_list *f, *next, *flist;
502 unsigned long rtas_block_list;
503 int i, status, update_token;
504
505 update_token = rtas_token("ibm,update-flash-64-and-reboot");
506 if (update_token == RTAS_UNKNOWN_SERVICE) {
507 printk(KERN_ALERT "FLASH: ibm,update-flash-64-and-reboot is not available -- not a service partition?\n");
508 printk(KERN_ALERT "FLASH: firmware will not be flashed\n");
509 return;
510 }
511
512 /* NOTE: the "first" block list is a global var with no data
513 * blocks in the kernel data segment. We do this because
514 * we want to ensure this block_list addr is under 4GB.
515 */
516 rtas_firmware_flash_list.num_blocks = 0;
517 flist = (struct flash_block_list *)&rtas_firmware_flash_list;
518 rtas_block_list = virt_to_abs(flist);
519 if (rtas_block_list >= 4UL*1024*1024*1024) {
520 printk(KERN_ALERT "FLASH: kernel bug...flash list header addr above 4GB\n");
521 return;
522 }
523
524 printk(KERN_ALERT "FLASH: preparing saved firmware image for flash\n");
525 /* Update the block_list in place. */
526 image_size = 0;
527 for (f = flist; f; f = next) {
528 /* Translate data addrs to absolute */
529 for (i = 0; i < f->num_blocks; i++) {
530 f->blocks[i].data = (char *)virt_to_abs(f->blocks[i].data);
531 image_size += f->blocks[i].length;
532 }
533 next = f->next;
534 /* Don't translate NULL pointer for last entry */
535 if (f->next)
536 f->next = (struct flash_block_list *)virt_to_abs(f->next);
537 else
538 f->next = NULL;
539 /* make num_blocks into the version/length field */
540 f->num_blocks = (FLASH_BLOCK_LIST_VERSION << 56) | ((f->num_blocks+1)*16);
541 }
542
543 printk(KERN_ALERT "FLASH: flash image is %ld bytes\n", image_size);
544 printk(KERN_ALERT "FLASH: performing flash and reboot\n");
545 rtas_progress("Flashing \n", 0x0);
546 rtas_progress("Please Wait... ", 0x0);
547 printk(KERN_ALERT "FLASH: this will take several minutes. Do not power off!\n");
548 status = rtas_call(update_token, 1, 1, NULL, rtas_block_list);
549 switch (status) { /* should only get "bad" status */
550 case 0:
551 printk(KERN_ALERT "FLASH: success\n");
552 break;
553 case -1:
554 printk(KERN_ALERT "FLASH: hardware error. Firmware may not be not flashed\n");
555 break;
556 case -3:
557 printk(KERN_ALERT "FLASH: image is corrupt or not correct for this platform. Firmware not flashed\n");
558 break;
559 case -4:
560 printk(KERN_ALERT "FLASH: flash failed when partially complete. System may not reboot\n");
561 break;
562 default:
563 printk(KERN_ALERT "FLASH: unknown flash return code %d\n", status);
564 break;
565 }
566}
567
568void rtas_flash_bypass_warning(void)
569{
570 printk(KERN_ALERT "FLASH: firmware flash requires a reboot\n");
571 printk(KERN_ALERT "FLASH: the firmware image will NOT be flashed\n");
572}
573
574
575void
576rtas_restart(char *cmd)
577{ 494{
578 if (rtas_firmware_flash_list.next)
579 rtas_flash_firmware();
580
581 printk("RTAS system-reboot returned %d\n", 495 printk("RTAS system-reboot returned %d\n",
582 rtas_call(rtas_token("system-reboot"), 0, 1, NULL)); 496 rtas_call(rtas_token("system-reboot"), 0, 1, NULL));
583 for (;;); 497 for (;;);
584} 498}
585 499
586void 500void rtas_power_off(void)
587rtas_power_off(void)
588{ 501{
589 if (rtas_firmware_flash_list.next)
590 rtas_flash_bypass_warning();
591 /* allow power on only with power button press */ 502 /* allow power on only with power button press */
592 printk("RTAS power-off returned %d\n", 503 printk("RTAS power-off returned %d\n",
593 rtas_call(rtas_token("power-off"), 2, 1, NULL, -1, -1)); 504 rtas_call(rtas_token("power-off"), 2, 1, NULL, -1, -1));
594 for (;;); 505 for (;;);
595} 506}
596 507
597void 508void rtas_halt(void)
598rtas_halt(void)
599{ 509{
600 if (rtas_firmware_flash_list.next)
601 rtas_flash_bypass_warning();
602 rtas_power_off(); 510 rtas_power_off();
603} 511}
604 512
@@ -631,9 +539,8 @@ asmlinkage int ppc_rtas(struct rtas_args __user *uargs)
631{ 539{
632 struct rtas_args args; 540 struct rtas_args args;
633 unsigned long flags; 541 unsigned long flags;
634 char * buff_copy; 542 char *buff_copy, *errbuf = NULL;
635 int nargs; 543 int nargs;
636 int err_rc = 0;
637 544
638 if (!capable(CAP_SYS_ADMIN)) 545 if (!capable(CAP_SYS_ADMIN))
639 return -EPERM; 546 return -EPERM;
@@ -652,7 +559,7 @@ asmlinkage int ppc_rtas(struct rtas_args __user *uargs)
652 nargs * sizeof(rtas_arg_t)) != 0) 559 nargs * sizeof(rtas_arg_t)) != 0)
653 return -EFAULT; 560 return -EFAULT;
654 561
655 buff_copy = kmalloc(RTAS_ERROR_LOG_MAX, GFP_KERNEL); 562 buff_copy = get_errorlog_buffer();
656 563
657 spin_lock_irqsave(&rtas.lock, flags); 564 spin_lock_irqsave(&rtas.lock, flags);
658 565
@@ -664,19 +571,14 @@ asmlinkage int ppc_rtas(struct rtas_args __user *uargs)
664 571
665 /* A -1 return code indicates that the last command couldn't 572 /* A -1 return code indicates that the last command couldn't
666 be completed due to a hardware error. */ 573 be completed due to a hardware error. */
667 if (args.rets[0] == -1) { 574 if (args.rets[0] == -1)
668 err_rc = __fetch_rtas_last_error(); 575 errbuf = __fetch_rtas_last_error(buff_copy);
669 if ((err_rc == 0) && buff_copy) {
670 memcpy(buff_copy, rtas_err_buf, RTAS_ERROR_LOG_MAX);
671 }
672 }
673 576
674 spin_unlock_irqrestore(&rtas.lock, flags); 577 spin_unlock_irqrestore(&rtas.lock, flags);
675 578
676 if (buff_copy) { 579 if (buff_copy) {
677 if ((args.rets[0] == -1) && (err_rc == 0)) { 580 if (errbuf)
678 log_error(buff_copy, ERR_TYPE_RTAS_LOG, 0); 581 log_error(errbuf, ERR_TYPE_RTAS_LOG, 0);
679 }
680 kfree(buff_copy); 582 kfree(buff_copy);
681 } 583 }
682 584
@@ -689,6 +591,7 @@ asmlinkage int ppc_rtas(struct rtas_args __user *uargs)
689 return 0; 591 return 0;
690} 592}
691 593
594#ifdef CONFIG_SMP
692/* This version can't take the spinlock, because it never returns */ 595/* This version can't take the spinlock, because it never returns */
693 596
694struct rtas_args rtas_stop_self_args = { 597struct rtas_args rtas_stop_self_args = {
@@ -713,6 +616,7 @@ void rtas_stop_self(void)
713 616
714 panic("Alas, I survived.\n"); 617 panic("Alas, I survived.\n");
715} 618}
619#endif
716 620
717/* 621/*
718 * Call early during boot, before mem init or bootmem, to retreive the RTAS 622 * Call early during boot, before mem init or bootmem, to retreive the RTAS
@@ -721,6 +625,8 @@ void rtas_stop_self(void)
721 */ 625 */
722void __init rtas_initialize(void) 626void __init rtas_initialize(void)
723{ 627{
628 unsigned long rtas_region = RTAS_INSTANTIATE_MAX;
629
724 /* Get RTAS dev node and fill up our "rtas" structure with infos 630 /* Get RTAS dev node and fill up our "rtas" structure with infos
725 * about it. 631 * about it.
726 */ 632 */
@@ -742,26 +648,27 @@ void __init rtas_initialize(void)
742 } else 648 } else
743 rtas.dev = NULL; 649 rtas.dev = NULL;
744 } 650 }
651 if (!rtas.dev)
652 return;
653
745 /* If RTAS was found, allocate the RMO buffer for it and look for 654 /* If RTAS was found, allocate the RMO buffer for it and look for
746 * the stop-self token if any 655 * the stop-self token if any
747 */ 656 */
748 if (rtas.dev) { 657#ifdef CONFIG_PPC64
749 unsigned long rtas_region = RTAS_INSTANTIATE_MAX; 658 if (systemcfg->platform == PLATFORM_PSERIES_LPAR)
750 if (systemcfg->platform == PLATFORM_PSERIES_LPAR) 659 rtas_region = min(lmb.rmo_size, RTAS_INSTANTIATE_MAX);
751 rtas_region = min(lmb.rmo_size, RTAS_INSTANTIATE_MAX); 660#endif
752 661 rtas_rmo_buf = lmb_alloc_base(RTAS_RMOBUF_MAX, PAGE_SIZE, rtas_region);
753 rtas_rmo_buf = lmb_alloc_base(RTAS_RMOBUF_MAX, PAGE_SIZE,
754 rtas_region);
755 662
756#ifdef CONFIG_HOTPLUG_CPU 663#ifdef CONFIG_HOTPLUG_CPU
757 rtas_stop_self_args.token = rtas_token("stop-self"); 664 rtas_stop_self_args.token = rtas_token("stop-self");
758#endif /* CONFIG_HOTPLUG_CPU */ 665#endif /* CONFIG_HOTPLUG_CPU */
759 } 666#ifdef CONFIG_RTAS_ERROR_LOGGING
760 667 rtas_last_error_token = rtas_token("rtas-last-error");
668#endif
761} 669}
762 670
763 671
764EXPORT_SYMBOL(rtas_firmware_flash_list);
765EXPORT_SYMBOL(rtas_token); 672EXPORT_SYMBOL(rtas_token);
766EXPORT_SYMBOL(rtas_call); 673EXPORT_SYMBOL(rtas_call);
767EXPORT_SYMBOL(rtas_data_buf); 674EXPORT_SYMBOL(rtas_data_buf);
@@ -771,4 +678,3 @@ EXPORT_SYMBOL(rtas_get_sensor);
771EXPORT_SYMBOL(rtas_get_power_level); 678EXPORT_SYMBOL(rtas_get_power_level);
772EXPORT_SYMBOL(rtas_set_power_level); 679EXPORT_SYMBOL(rtas_set_power_level);
773EXPORT_SYMBOL(rtas_set_indicator); 680EXPORT_SYMBOL(rtas_set_indicator);
774EXPORT_SYMBOL(rtas_get_error_log_max);
diff --git a/arch/powerpc/kernel/semaphore.c b/arch/powerpc/kernel/semaphore.c
new file mode 100644
index 000000000000..2f8c3c951394
--- /dev/null
+++ b/arch/powerpc/kernel/semaphore.c
@@ -0,0 +1,135 @@
1/*
2 * PowerPC-specific semaphore code.
3 *
4 * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu>
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 *
11 * April 2001 - Reworked by Paul Mackerras <paulus@samba.org>
12 * to eliminate the SMP races in the old version between the updates
13 * of `count' and `waking'. Now we use negative `count' values to
14 * indicate that some process(es) are waiting for the semaphore.
15 */
16
17#include <linux/sched.h>
18#include <linux/init.h>
19#include <linux/module.h>
20
21#include <asm/atomic.h>
22#include <asm/semaphore.h>
23#include <asm/errno.h>
24
25/*
26 * Atomically update sem->count.
27 * This does the equivalent of the following:
28 *
29 * old_count = sem->count;
30 * tmp = MAX(old_count, 0) + incr;
31 * sem->count = tmp;
32 * return old_count;
33 */
34static inline int __sem_update_count(struct semaphore *sem, int incr)
35{
36 int old_count, tmp;
37
38 __asm__ __volatile__("\n"
39"1: lwarx %0,0,%3\n"
40" srawi %1,%0,31\n"
41" andc %1,%0,%1\n"
42" add %1,%1,%4\n"
43 PPC405_ERR77(0,%3)
44" stwcx. %1,0,%3\n"
45" bne 1b"
46 : "=&r" (old_count), "=&r" (tmp), "=m" (sem->count)
47 : "r" (&sem->count), "r" (incr), "m" (sem->count)
48 : "cc");
49
50 return old_count;
51}
52
53void __up(struct semaphore *sem)
54{
55 /*
56 * Note that we incremented count in up() before we came here,
57 * but that was ineffective since the result was <= 0, and
58 * any negative value of count is equivalent to 0.
59 * This ends up setting count to 1, unless count is now > 0
60 * (i.e. because some other cpu has called up() in the meantime),
61 * in which case we just increment count.
62 */
63 __sem_update_count(sem, 1);
64 wake_up(&sem->wait);
65}
66EXPORT_SYMBOL(__up);
67
68/*
69 * Note that when we come in to __down or __down_interruptible,
70 * we have already decremented count, but that decrement was
71 * ineffective since the result was < 0, and any negative value
72 * of count is equivalent to 0.
73 * Thus it is only when we decrement count from some value > 0
74 * that we have actually got the semaphore.
75 */
76void __sched __down(struct semaphore *sem)
77{
78 struct task_struct *tsk = current;
79 DECLARE_WAITQUEUE(wait, tsk);
80
81 __set_task_state(tsk, TASK_UNINTERRUPTIBLE);
82 add_wait_queue_exclusive(&sem->wait, &wait);
83
84 /*
85 * Try to get the semaphore. If the count is > 0, then we've
86 * got the semaphore; we decrement count and exit the loop.
87 * If the count is 0 or negative, we set it to -1, indicating
88 * that we are asleep, and then sleep.
89 */
90 while (__sem_update_count(sem, -1) <= 0) {
91 schedule();
92 set_task_state(tsk, TASK_UNINTERRUPTIBLE);
93 }
94 remove_wait_queue(&sem->wait, &wait);
95 __set_task_state(tsk, TASK_RUNNING);
96
97 /*
98 * If there are any more sleepers, wake one of them up so
99 * that it can either get the semaphore, or set count to -1
100 * indicating that there are still processes sleeping.
101 */
102 wake_up(&sem->wait);
103}
104EXPORT_SYMBOL(__down);
105
106int __sched __down_interruptible(struct semaphore * sem)
107{
108 int retval = 0;
109 struct task_struct *tsk = current;
110 DECLARE_WAITQUEUE(wait, tsk);
111
112 __set_task_state(tsk, TASK_INTERRUPTIBLE);
113 add_wait_queue_exclusive(&sem->wait, &wait);
114
115 while (__sem_update_count(sem, -1) <= 0) {
116 if (signal_pending(current)) {
117 /*
118 * A signal is pending - give up trying.
119 * Set sem->count to 0 if it is negative,
120 * since we are no longer sleeping.
121 */
122 __sem_update_count(sem, 0);
123 retval = -EINTR;
124 break;
125 }
126 schedule();
127 set_task_state(tsk, TASK_INTERRUPTIBLE);
128 }
129 remove_wait_queue(&sem->wait, &wait);
130 __set_task_state(tsk, TASK_RUNNING);
131
132 wake_up(&sem->wait);
133 return retval;
134}
135EXPORT_SYMBOL(__down_interruptible);
diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c
new file mode 100644
index 000000000000..1292460fcde2
--- /dev/null
+++ b/arch/powerpc/kernel/setup-common.c
@@ -0,0 +1,410 @@
1/*
2 * Common boot and setup code for both 32-bit and 64-bit.
3 * Extracted from arch/powerpc/kernel/setup_64.c.
4 *
5 * Copyright (C) 2001 PPC64 Team, IBM Corp
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
11 */
12#include <linux/config.h>
13#include <linux/module.h>
14#include <linux/string.h>
15#include <linux/sched.h>
16#include <linux/init.h>
17#include <linux/kernel.h>
18#include <linux/reboot.h>
19#include <linux/delay.h>
20#include <linux/initrd.h>
21#include <linux/ide.h>
22#include <linux/seq_file.h>
23#include <linux/ioport.h>
24#include <linux/console.h>
25#include <linux/utsname.h>
26#include <linux/tty.h>
27#include <linux/root_dev.h>
28#include <linux/notifier.h>
29#include <linux/cpu.h>
30#include <linux/unistd.h>
31#include <linux/serial.h>
32#include <linux/serial_8250.h>
33#include <asm/io.h>
34#include <asm/prom.h>
35#include <asm/processor.h>
36#include <asm/pgtable.h>
37#include <asm/smp.h>
38#include <asm/elf.h>
39#include <asm/machdep.h>
40#include <asm/time.h>
41#include <asm/cputable.h>
42#include <asm/sections.h>
43#include <asm/btext.h>
44#include <asm/nvram.h>
45#include <asm/setup.h>
46#include <asm/system.h>
47#include <asm/rtas.h>
48#include <asm/iommu.h>
49#include <asm/serial.h>
50#include <asm/cache.h>
51#include <asm/page.h>
52#include <asm/mmu.h>
53#include <asm/lmb.h>
54
55#undef DEBUG
56
57#ifdef DEBUG
58#define DBG(fmt...) udbg_printf(fmt)
59#else
60#define DBG(fmt...)
61#endif
62
63/*
64 * This still seems to be needed... -- paulus
65 */
66struct screen_info screen_info = {
67 .orig_x = 0,
68 .orig_y = 25,
69 .orig_video_cols = 80,
70 .orig_video_lines = 25,
71 .orig_video_isVGA = 1,
72 .orig_video_points = 16
73};
74
75#ifdef __DO_IRQ_CANON
76/* XXX should go elsewhere eventually */
77int ppc_do_canonicalize_irqs;
78EXPORT_SYMBOL(ppc_do_canonicalize_irqs);
79#endif
80
81/* also used by kexec */
82void machine_shutdown(void)
83{
84 if (ppc_md.nvram_sync)
85 ppc_md.nvram_sync();
86}
87
88void machine_restart(char *cmd)
89{
90 machine_shutdown();
91 ppc_md.restart(cmd);
92#ifdef CONFIG_SMP
93 smp_send_stop();
94#endif
95 printk(KERN_EMERG "System Halted, OK to turn off power\n");
96 local_irq_disable();
97 while (1) ;
98}
99
100void machine_power_off(void)
101{
102 machine_shutdown();
103 ppc_md.power_off();
104#ifdef CONFIG_SMP
105 smp_send_stop();
106#endif
107 printk(KERN_EMERG "System Halted, OK to turn off power\n");
108 local_irq_disable();
109 while (1) ;
110}
111/* Used by the G5 thermal driver */
112EXPORT_SYMBOL_GPL(machine_power_off);
113
114void (*pm_power_off)(void) = machine_power_off;
115EXPORT_SYMBOL_GPL(pm_power_off);
116
117void machine_halt(void)
118{
119 machine_shutdown();
120 ppc_md.halt();
121#ifdef CONFIG_SMP
122 smp_send_stop();
123#endif
124 printk(KERN_EMERG "System Halted, OK to turn off power\n");
125 local_irq_disable();
126 while (1) ;
127}
128
129
130#ifdef CONFIG_TAU
131extern u32 cpu_temp(unsigned long cpu);
132extern u32 cpu_temp_both(unsigned long cpu);
133#endif /* CONFIG_TAU */
134
135#ifdef CONFIG_SMP
136DEFINE_PER_CPU(unsigned int, pvr);
137#endif
138
139static int show_cpuinfo(struct seq_file *m, void *v)
140{
141 unsigned long cpu_id = (unsigned long)v - 1;
142 unsigned int pvr;
143 unsigned short maj;
144 unsigned short min;
145
146 if (cpu_id == NR_CPUS) {
147#if defined(CONFIG_SMP) && defined(CONFIG_PPC32)
148 unsigned long bogosum = 0;
149 int i;
150 for (i = 0; i < NR_CPUS; ++i)
151 if (cpu_online(i))
152 bogosum += loops_per_jiffy;
153 seq_printf(m, "total bogomips\t: %lu.%02lu\n",
154 bogosum/(500000/HZ), bogosum/(5000/HZ) % 100);
155#endif /* CONFIG_SMP && CONFIG_PPC32 */
156 seq_printf(m, "timebase\t: %lu\n", ppc_tb_freq);
157
158 if (ppc_md.show_cpuinfo != NULL)
159 ppc_md.show_cpuinfo(m);
160
161 return 0;
162 }
163
164 /* We only show online cpus: disable preempt (overzealous, I
165 * knew) to prevent cpu going down. */
166 preempt_disable();
167 if (!cpu_online(cpu_id)) {
168 preempt_enable();
169 return 0;
170 }
171
172#ifdef CONFIG_SMP
173#ifdef CONFIG_PPC64 /* XXX for now */
174 pvr = per_cpu(pvr, cpu_id);
175#else
176 pvr = cpu_data[cpu_id].pvr;
177#endif
178#else
179 pvr = mfspr(SPRN_PVR);
180#endif
181 maj = (pvr >> 8) & 0xFF;
182 min = pvr & 0xFF;
183
184 seq_printf(m, "processor\t: %lu\n", cpu_id);
185 seq_printf(m, "cpu\t\t: ");
186
187 if (cur_cpu_spec->pvr_mask)
188 seq_printf(m, "%s", cur_cpu_spec->cpu_name);
189 else
190 seq_printf(m, "unknown (%08x)", pvr);
191
192#ifdef CONFIG_ALTIVEC
193 if (cpu_has_feature(CPU_FTR_ALTIVEC))
194 seq_printf(m, ", altivec supported");
195#endif /* CONFIG_ALTIVEC */
196
197 seq_printf(m, "\n");
198
199#ifdef CONFIG_TAU
200 if (cur_cpu_spec->cpu_features & CPU_FTR_TAU) {
201#ifdef CONFIG_TAU_AVERAGE
202 /* more straightforward, but potentially misleading */
203 seq_printf(m, "temperature \t: %u C (uncalibrated)\n",
204 cpu_temp(i));
205#else
206 /* show the actual temp sensor range */
207 u32 temp;
208 temp = cpu_temp_both(i);
209 seq_printf(m, "temperature \t: %u-%u C (uncalibrated)\n",
210 temp & 0xff, temp >> 16);
211#endif
212 }
213#endif /* CONFIG_TAU */
214
215 /*
216 * Assume here that all clock rates are the same in a
217 * smp system. -- Cort
218 */
219 if (ppc_proc_freq)
220 seq_printf(m, "clock\t\t: %lu.%06luMHz\n",
221 ppc_proc_freq / 1000000, ppc_proc_freq % 1000000);
222
223 if (ppc_md.show_percpuinfo != NULL)
224 ppc_md.show_percpuinfo(m, cpu_id);
225
226 /* If we are a Freescale core do a simple check so
227 * we dont have to keep adding cases in the future */
228 if (PVR_VER(pvr) & 0x8000) {
229 maj = PVR_MAJ(pvr);
230 min = PVR_MIN(pvr);
231 } else {
232 switch (PVR_VER(pvr)) {
233 case 0x0020: /* 403 family */
234 maj = PVR_MAJ(pvr) + 1;
235 min = PVR_MIN(pvr);
236 break;
237 case 0x1008: /* 740P/750P ?? */
238 maj = ((pvr >> 8) & 0xFF) - 1;
239 min = pvr & 0xFF;
240 break;
241 default:
242 maj = (pvr >> 8) & 0xFF;
243 min = pvr & 0xFF;
244 break;
245 }
246 }
247
248 seq_printf(m, "revision\t: %hd.%hd (pvr %04x %04x)\n",
249 maj, min, PVR_VER(pvr), PVR_REV(pvr));
250
251#ifdef CONFIG_PPC32
252 seq_printf(m, "bogomips\t: %lu.%02lu\n",
253 loops_per_jiffy / (500000/HZ),
254 (loops_per_jiffy / (5000/HZ)) % 100);
255#endif
256
257#ifdef CONFIG_SMP
258 seq_printf(m, "\n");
259#endif
260
261 preempt_enable();
262 return 0;
263}
264
265static void *c_start(struct seq_file *m, loff_t *pos)
266{
267 unsigned long i = *pos;
268
269 return i <= NR_CPUS ? (void *)(i + 1) : NULL;
270}
271
272static void *c_next(struct seq_file *m, void *v, loff_t *pos)
273{
274 ++*pos;
275 return c_start(m, pos);
276}
277
278static void c_stop(struct seq_file *m, void *v)
279{
280}
281
282struct seq_operations cpuinfo_op = {
283 .start =c_start,
284 .next = c_next,
285 .stop = c_stop,
286 .show = show_cpuinfo,
287};
288
289#ifdef CONFIG_PPC_MULTIPLATFORM
290static int __init set_preferred_console(void)
291{
292 struct device_node *prom_stdout = NULL;
293 char *name;
294 u32 *spd;
295 int offset = 0;
296
297 DBG(" -> set_preferred_console()\n");
298
299 /* The user has requested a console so this is already set up. */
300 if (strstr(saved_command_line, "console=")) {
301 DBG(" console was specified !\n");
302 return -EBUSY;
303 }
304
305 if (!of_chosen) {
306 DBG(" of_chosen is NULL !\n");
307 return -ENODEV;
308 }
309 /* We are getting a weird phandle from OF ... */
310 /* ... So use the full path instead */
311 name = (char *)get_property(of_chosen, "linux,stdout-path", NULL);
312 if (name == NULL) {
313 DBG(" no linux,stdout-path !\n");
314 return -ENODEV;
315 }
316 prom_stdout = of_find_node_by_path(name);
317 if (!prom_stdout) {
318 DBG(" can't find stdout package %s !\n", name);
319 return -ENODEV;
320 }
321 DBG("stdout is %s\n", prom_stdout->full_name);
322
323 name = (char *)get_property(prom_stdout, "name", NULL);
324 if (!name) {
325 DBG(" stdout package has no name !\n");
326 goto not_found;
327 }
328 spd = (u32 *)get_property(prom_stdout, "current-speed", NULL);
329
330 if (0)
331 ;
332#ifdef CONFIG_SERIAL_8250_CONSOLE
333 else if (strcmp(name, "serial") == 0) {
334 int i;
335 u32 *reg = (u32 *)get_property(prom_stdout, "reg", &i);
336 if (i > 8) {
337 switch (reg[1]) {
338 case 0x3f8:
339 offset = 0;
340 break;
341 case 0x2f8:
342 offset = 1;
343 break;
344 case 0x898:
345 offset = 2;
346 break;
347 case 0x890:
348 offset = 3;
349 break;
350 default:
351 /* We dont recognise the serial port */
352 goto not_found;
353 }
354 }
355 }
356#endif /* CONFIG_SERIAL_8250_CONSOLE */
357#ifdef CONFIG_PPC_PSERIES
358 else if (strcmp(name, "vty") == 0) {
359 u32 *reg = (u32 *)get_property(prom_stdout, "reg", NULL);
360 char *compat = (char *)get_property(prom_stdout, "compatible", NULL);
361
362 if (reg && compat && (strcmp(compat, "hvterm-protocol") == 0)) {
363 /* Host Virtual Serial Interface */
364 switch (reg[0]) {
365 case 0x30000000:
366 offset = 0;
367 break;
368 case 0x30000001:
369 offset = 1;
370 break;
371 default:
372 goto not_found;
373 }
374 of_node_put(prom_stdout);
375 DBG("Found hvsi console at offset %d\n", offset);
376 return add_preferred_console("hvsi", offset, NULL);
377 } else {
378 /* pSeries LPAR virtual console */
379 of_node_put(prom_stdout);
380 DBG("Found hvc console\n");
381 return add_preferred_console("hvc", 0, NULL);
382 }
383 }
384#endif /* CONFIG_PPC_PSERIES */
385#ifdef CONFIG_SERIAL_PMACZILOG_CONSOLE
386 else if (strcmp(name, "ch-a") == 0)
387 offset = 0;
388 else if (strcmp(name, "ch-b") == 0)
389 offset = 1;
390#endif /* CONFIG_SERIAL_PMACZILOG_CONSOLE */
391 else
392 goto not_found;
393 of_node_put(prom_stdout);
394
395 DBG("Found serial console at ttyS%d\n", offset);
396
397 if (spd) {
398 static char __initdata opt[16];
399 sprintf(opt, "%d", *spd);
400 return add_preferred_console("ttyS", offset, opt);
401 } else
402 return add_preferred_console("ttyS", offset, NULL);
403
404 not_found:
405 DBG("No preferred console found !\n");
406 of_node_put(prom_stdout);
407 return -ENODEV;
408}
409console_initcall(set_preferred_console);
410#endif /* CONFIG_PPC_MULTIPLATFORM */
diff --git a/arch/powerpc/kernel/setup_32.c b/arch/powerpc/kernel/setup_32.c
new file mode 100644
index 000000000000..9680ae99b084
--- /dev/null
+++ b/arch/powerpc/kernel/setup_32.c
@@ -0,0 +1,372 @@
1/*
2 * Common prep/pmac/chrp boot and setup code.
3 */
4
5#include <linux/config.h>
6#include <linux/module.h>
7#include <linux/string.h>
8#include <linux/sched.h>
9#include <linux/init.h>
10#include <linux/kernel.h>
11#include <linux/reboot.h>
12#include <linux/delay.h>
13#include <linux/initrd.h>
14#include <linux/ide.h>
15#include <linux/tty.h>
16#include <linux/bootmem.h>
17#include <linux/seq_file.h>
18#include <linux/root_dev.h>
19#include <linux/cpu.h>
20#include <linux/console.h>
21
22#include <asm/residual.h>
23#include <asm/io.h>
24#include <asm/prom.h>
25#include <asm/processor.h>
26#include <asm/pgtable.h>
27#include <asm/setup.h>
28#include <asm/amigappc.h>
29#include <asm/smp.h>
30#include <asm/elf.h>
31#include <asm/cputable.h>
32#include <asm/bootx.h>
33#include <asm/btext.h>
34#include <asm/machdep.h>
35#include <asm/uaccess.h>
36#include <asm/system.h>
37#include <asm/pmac_feature.h>
38#include <asm/sections.h>
39#include <asm/nvram.h>
40#include <asm/xmon.h>
41#include <asm/time.h>
42
43#define DBG(fmt...)
44
45#if defined CONFIG_KGDB
46#include <asm/kgdb.h>
47#endif
48
49extern void platform_init(void);
50extern void bootx_init(unsigned long r4, unsigned long phys);
51
52extern void ppc6xx_idle(void);
53extern void power4_idle(void);
54
55boot_infos_t *boot_infos;
56struct ide_machdep_calls ppc_ide_md;
57
58/* XXX should go elsewhere */
59int __irq_offset_value;
60EXPORT_SYMBOL(__irq_offset_value);
61
62int boot_cpuid;
63EXPORT_SYMBOL_GPL(boot_cpuid);
64int boot_cpuid_phys;
65
66unsigned long ISA_DMA_THRESHOLD;
67unsigned int DMA_MODE_READ;
68unsigned int DMA_MODE_WRITE;
69
70int have_of = 1;
71
72#ifdef CONFIG_PPC_MULTIPLATFORM
73int _machine = 0;
74
75extern void prep_init(void);
76extern void pmac_init(void);
77extern void chrp_init(void);
78
79dev_t boot_dev;
80#endif /* CONFIG_PPC_MULTIPLATFORM */
81
82#ifdef CONFIG_MAGIC_SYSRQ
83unsigned long SYSRQ_KEY = 0x54;
84#endif /* CONFIG_MAGIC_SYSRQ */
85
86#ifdef CONFIG_VGA_CONSOLE
87unsigned long vgacon_remap_base;
88#endif
89
90struct machdep_calls ppc_md;
91EXPORT_SYMBOL(ppc_md);
92
93/*
94 * These are used in binfmt_elf.c to put aux entries on the stack
95 * for each elf executable being started.
96 */
97int dcache_bsize;
98int icache_bsize;
99int ucache_bsize;
100
101/*
102 * We're called here very early in the boot. We determine the machine
103 * type and call the appropriate low-level setup functions.
104 * -- Cort <cort@fsmlabs.com>
105 *
106 * Note that the kernel may be running at an address which is different
107 * from the address that it was linked at, so we must use RELOC/PTRRELOC
108 * to access static data (including strings). -- paulus
109 */
110unsigned long __init early_init(unsigned long dt_ptr)
111{
112 unsigned long offset = reloc_offset();
113
114 /* First zero the BSS -- use memset_io, some platforms don't have
115 * caches on yet */
116 memset_io(PTRRELOC(&__bss_start), 0, _end - __bss_start);
117
118 /*
119 * Identify the CPU type and fix up code sections
120 * that depend on which cpu we have.
121 */
122 identify_cpu(offset, 0);
123 do_cpu_ftr_fixups(offset);
124
125 return KERNELBASE + offset;
126}
127
128#ifdef CONFIG_PPC_MULTIPLATFORM
129/*
130 * The PPC_MULTIPLATFORM version of platform_init...
131 */
132void __init platform_init(void)
133{
134 /* if we didn't get any bootinfo telling us what we are... */
135 if (_machine == 0) {
136 /* prep boot loader tells us if we're prep or not */
137 if ( *(unsigned long *)(KERNELBASE) == (0xdeadc0de) )
138 _machine = _MACH_prep;
139 }
140
141#ifdef CONFIG_PPC_PREP
142 /* not much more to do here, if prep */
143 if (_machine == _MACH_prep) {
144 prep_init();
145 return;
146 }
147#endif
148
149#ifdef CONFIG_ADB
150 if (strstr(cmd_line, "adb_sync")) {
151 extern int __adb_probe_sync;
152 __adb_probe_sync = 1;
153 }
154#endif /* CONFIG_ADB */
155
156 switch (_machine) {
157#ifdef CONFIG_PPC_PMAC
158 case _MACH_Pmac:
159 pmac_init();
160 break;
161#endif
162#ifdef CONFIG_PPC_CHRP
163 case _MACH_chrp:
164 chrp_init();
165 break;
166#endif
167 }
168}
169#endif
170
171/*
172 * Find out what kind of machine we're on and save any data we need
173 * from the early boot process (devtree is copied on pmac by prom_init()).
174 * This is called very early on the boot process, after a minimal
175 * MMU environment has been set up but before MMU_init is called.
176 */
177void __init machine_init(unsigned long dt_ptr, unsigned long phys)
178{
179 early_init_devtree(__va(dt_ptr));
180
181#ifdef CONFIG_CMDLINE
182 strlcpy(cmd_line, CONFIG_CMDLINE, sizeof(cmd_line));
183#endif /* CONFIG_CMDLINE */
184
185 platform_init();
186
187#ifdef CONFIG_6xx
188 ppc_md.power_save = ppc6xx_idle;
189#endif
190
191 if (ppc_md.progress)
192 ppc_md.progress("id mach(): done", 0x200);
193}
194
195#ifdef CONFIG_BOOKE_WDT
196/* Checks wdt=x and wdt_period=xx command-line option */
197int __init early_parse_wdt(char *p)
198{
199 if (p && strncmp(p, "0", 1) != 0)
200 booke_wdt_enabled = 1;
201
202 return 0;
203}
204early_param("wdt", early_parse_wdt);
205
206int __init early_parse_wdt_period (char *p)
207{
208 if (p)
209 booke_wdt_period = simple_strtoul(p, NULL, 0);
210
211 return 0;
212}
213early_param("wdt_period", early_parse_wdt_period);
214#endif /* CONFIG_BOOKE_WDT */
215
216/* Checks "l2cr=xxxx" command-line option */
217int __init ppc_setup_l2cr(char *str)
218{
219 if (cpu_has_feature(CPU_FTR_L2CR)) {
220 unsigned long val = simple_strtoul(str, NULL, 0);
221 printk(KERN_INFO "l2cr set to %lx\n", val);
222 _set_L2CR(0); /* force invalidate by disable cache */
223 _set_L2CR(val); /* and enable it */
224 }
225 return 1;
226}
227__setup("l2cr=", ppc_setup_l2cr);
228
229#ifdef CONFIG_GENERIC_NVRAM
230
231/* Generic nvram hooks used by drivers/char/gen_nvram.c */
232unsigned char nvram_read_byte(int addr)
233{
234 if (ppc_md.nvram_read_val)
235 return ppc_md.nvram_read_val(addr);
236 return 0xff;
237}
238EXPORT_SYMBOL(nvram_read_byte);
239
240void nvram_write_byte(unsigned char val, int addr)
241{
242 if (ppc_md.nvram_write_val)
243 ppc_md.nvram_write_val(addr, val);
244}
245EXPORT_SYMBOL(nvram_write_byte);
246
247void nvram_sync(void)
248{
249 if (ppc_md.nvram_sync)
250 ppc_md.nvram_sync();
251}
252EXPORT_SYMBOL(nvram_sync);
253
254#endif /* CONFIG_NVRAM */
255
256static struct cpu cpu_devices[NR_CPUS];
257
258int __init ppc_init(void)
259{
260 int i;
261
262 /* clear the progress line */
263 if ( ppc_md.progress ) ppc_md.progress(" ", 0xffff);
264
265 /* register CPU devices */
266 for (i = 0; i < NR_CPUS; i++)
267 if (cpu_possible(i))
268 register_cpu(&cpu_devices[i], i, NULL);
269
270 /* call platform init */
271 if (ppc_md.init != NULL) {
272 ppc_md.init();
273 }
274 return 0;
275}
276
277arch_initcall(ppc_init);
278
279/* Warning, IO base is not yet inited */
280void __init setup_arch(char **cmdline_p)
281{
282 extern char *klimit;
283 extern void do_init_bootmem(void);
284
285 /* so udelay does something sensible, assume <= 1000 bogomips */
286 loops_per_jiffy = 500000000 / HZ;
287
288 unflatten_device_tree();
289 finish_device_tree();
290
291#ifdef CONFIG_BOOTX_TEXT
292 init_boot_display();
293#endif
294
295#ifdef CONFIG_PPC_PMAC
296 /* This could be called "early setup arch", it must be done
297 * now because xmon need it
298 */
299 if (_machine == _MACH_Pmac)
300 pmac_feature_init(); /* New cool way */
301#endif
302
303#ifdef CONFIG_XMON
304 xmon_map_scc();
305 if (strstr(cmd_line, "xmon")) {
306 xmon_init(1);
307 debugger(NULL);
308 }
309#endif /* CONFIG_XMON */
310 if ( ppc_md.progress ) ppc_md.progress("setup_arch: enter", 0x3eab);
311
312#if defined(CONFIG_KGDB)
313 if (ppc_md.kgdb_map_scc)
314 ppc_md.kgdb_map_scc();
315 set_debug_traps();
316 if (strstr(cmd_line, "gdb")) {
317 if (ppc_md.progress)
318 ppc_md.progress("setup_arch: kgdb breakpoint", 0x4000);
319 printk("kgdb breakpoint activated\n");
320 breakpoint();
321 }
322#endif
323
324 /*
325 * Set cache line size based on type of cpu as a default.
326 * Systems with OF can look in the properties on the cpu node(s)
327 * for a possibly more accurate value.
328 */
329 if (cpu_has_feature(CPU_FTR_SPLIT_ID_CACHE)) {
330 dcache_bsize = cur_cpu_spec->dcache_bsize;
331 icache_bsize = cur_cpu_spec->icache_bsize;
332 ucache_bsize = 0;
333 } else
334 ucache_bsize = dcache_bsize = icache_bsize
335 = cur_cpu_spec->dcache_bsize;
336
337 /* reboot on panic */
338 panic_timeout = 180;
339
340 init_mm.start_code = PAGE_OFFSET;
341 init_mm.end_code = (unsigned long) _etext;
342 init_mm.end_data = (unsigned long) _edata;
343 init_mm.brk = (unsigned long) klimit;
344
345 /* Save unparsed command line copy for /proc/cmdline */
346 strlcpy(saved_command_line, cmd_line, COMMAND_LINE_SIZE);
347 *cmdline_p = cmd_line;
348
349 parse_early_param();
350
351 /* set up the bootmem stuff with available memory */
352 do_init_bootmem();
353 if ( ppc_md.progress ) ppc_md.progress("setup_arch: bootmem", 0x3eab);
354
355#ifdef CONFIG_PPC_OCP
356 /* Initialize OCP device list */
357 ocp_early_init();
358 if ( ppc_md.progress ) ppc_md.progress("ocp: exit", 0x3eab);
359#endif
360
361#ifdef CONFIG_DUMMY_CONSOLE
362 conswitchp = &dummy_con;
363#endif
364
365 ppc_md.setup_arch();
366 if ( ppc_md.progress ) ppc_md.progress("arch: exit", 0x3eab);
367
368 paging_init();
369
370 /* this is for modules since _machine can be a define -- Cort */
371 ppc_md.ppc_machine = _machine;
372}
diff --git a/arch/ppc64/kernel/setup.c b/arch/powerpc/kernel/setup_64.c
index 5ac48bd64891..40c48100bf1b 100644
--- a/arch/ppc64/kernel/setup.c
+++ b/arch/powerpc/kernel/setup_64.c
@@ -37,7 +37,6 @@
37#include <asm/prom.h> 37#include <asm/prom.h>
38#include <asm/processor.h> 38#include <asm/processor.h>
39#include <asm/pgtable.h> 39#include <asm/pgtable.h>
40#include <asm/bootinfo.h>
41#include <asm/smp.h> 40#include <asm/smp.h>
42#include <asm/elf.h> 41#include <asm/elf.h>
43#include <asm/machdep.h> 42#include <asm/machdep.h>
@@ -58,6 +57,9 @@
58#include <asm/mmu.h> 57#include <asm/mmu.h>
59#include <asm/lmb.h> 58#include <asm/lmb.h>
60#include <asm/iSeries/ItLpNaca.h> 59#include <asm/iSeries/ItLpNaca.h>
60#include <asm/firmware.h>
61#include <asm/systemcfg.h>
62#include <asm/xmon.h>
61 63
62#ifdef DEBUG 64#ifdef DEBUG
63#define DBG(fmt...) udbg_printf(fmt) 65#define DBG(fmt...) udbg_printf(fmt)
@@ -136,24 +138,7 @@ static struct notifier_block ppc64_panic_block = {
136 .priority = INT_MIN /* may not return; must be done last */ 138 .priority = INT_MIN /* may not return; must be done last */
137}; 139};
138 140
139/* 141#ifdef CONFIG_SMP
140 * Perhaps we can put the pmac screen_info[] here
141 * on pmac as well so we don't need the ifdef's.
142 * Until we get multiple-console support in here
143 * that is. -- Cort
144 * Maybe tie it to serial consoles, since this is really what
145 * these processors use on existing boards. -- Dan
146 */
147struct screen_info screen_info = {
148 .orig_x = 0,
149 .orig_y = 25,
150 .orig_video_cols = 80,
151 .orig_video_lines = 25,
152 .orig_video_isVGA = 1,
153 .orig_video_points = 16
154};
155
156#if defined(CONFIG_PPC_MULTIPLATFORM) && defined(CONFIG_SMP)
157 142
158static int smt_enabled_cmdline; 143static int smt_enabled_cmdline;
159 144
@@ -306,15 +291,13 @@ static void __init setup_cpu_maps(void)
306 291
307 systemcfg->processorCount = num_present_cpus(); 292 systemcfg->processorCount = num_present_cpus();
308} 293}
309#endif /* defined(CONFIG_PPC_MULTIPLATFORM) && defined(CONFIG_SMP) */ 294#endif /* CONFIG_SMP */
310
311
312#ifdef CONFIG_PPC_MULTIPLATFORM
313 295
314extern struct machdep_calls pSeries_md; 296extern struct machdep_calls pSeries_md;
315extern struct machdep_calls pmac_md; 297extern struct machdep_calls pmac_md;
316extern struct machdep_calls maple_md; 298extern struct machdep_calls maple_md;
317extern struct machdep_calls bpa_md; 299extern struct machdep_calls bpa_md;
300extern struct machdep_calls iseries_md;
318 301
319/* Ultimately, stuff them in an elf section like initcalls... */ 302/* Ultimately, stuff them in an elf section like initcalls... */
320static struct machdep_calls __initdata *machines[] = { 303static struct machdep_calls __initdata *machines[] = {
@@ -330,6 +313,9 @@ static struct machdep_calls __initdata *machines[] = {
330#ifdef CONFIG_PPC_BPA 313#ifdef CONFIG_PPC_BPA
331 &bpa_md, 314 &bpa_md,
332#endif 315#endif
316#ifdef CONFIG_PPC_ISERIES
317 &iseries_md,
318#endif
333 NULL 319 NULL
334}; 320};
335 321
@@ -401,7 +387,8 @@ void __init early_setup(unsigned long dt_ptr)
401 /* 387 /*
402 * Initialize stab / SLB management 388 * Initialize stab / SLB management
403 */ 389 */
404 stab_initialize(lpaca->stab_real); 390 if (!firmware_has_feature(FW_FEATURE_ISERIES))
391 stab_initialize(lpaca->stab_real);
405 392
406 /* 393 /*
407 * Initialize the MMU Hash table and create the linear mapping 394 * Initialize the MMU Hash table and create the linear mapping
@@ -532,8 +519,6 @@ static void __init check_for_initrd(void)
532#endif /* CONFIG_BLK_DEV_INITRD */ 519#endif /* CONFIG_BLK_DEV_INITRD */
533} 520}
534 521
535#endif /* CONFIG_PPC_MULTIPLATFORM */
536
537/* 522/*
538 * Do some initial setup of the system. The parameters are those which 523 * Do some initial setup of the system. The parameters are those which
539 * were passed in from the bootloader. 524 * were passed in from the bootloader.
@@ -542,14 +527,6 @@ void __init setup_system(void)
542{ 527{
543 DBG(" -> setup_system()\n"); 528 DBG(" -> setup_system()\n");
544 529
545#ifdef CONFIG_PPC_ISERIES
546 /* pSeries systems are identified in prom.c via OF. */
547 if (itLpNaca.xLparInstalled == 1)
548 systemcfg->platform = PLATFORM_ISERIES_LPAR;
549
550 ppc_md.init_early();
551#else /* CONFIG_PPC_ISERIES */
552
553 /* 530 /*
554 * Unflatten the device-tree passed by prom_init or kexec 531 * Unflatten the device-tree passed by prom_init or kexec
555 */ 532 */
@@ -592,6 +569,10 @@ void __init setup_system(void)
592 */ 569 */
593 finish_device_tree(); 570 finish_device_tree();
594 571
572#ifdef CONFIG_BOOTX_TEXT
573 init_boot_display();
574#endif
575
595 /* 576 /*
596 * Initialize xmon 577 * Initialize xmon
597 */ 578 */
@@ -607,9 +588,8 @@ void __init setup_system(void)
607 strlcpy(saved_command_line, cmd_line, COMMAND_LINE_SIZE); 588 strlcpy(saved_command_line, cmd_line, COMMAND_LINE_SIZE);
608 589
609 parse_early_param(); 590 parse_early_param();
610#endif /* !CONFIG_PPC_ISERIES */
611 591
612#if defined(CONFIG_SMP) && !defined(CONFIG_PPC_ISERIES) 592#ifdef CONFIG_SMP
613 /* 593 /*
614 * iSeries has already initialized the cpu maps at this point. 594 * iSeries has already initialized the cpu maps at this point.
615 */ 595 */
@@ -619,7 +599,7 @@ void __init setup_system(void)
619 * we can map physical -> logical CPU ids 599 * we can map physical -> logical CPU ids
620 */ 600 */
621 smp_release_cpus(); 601 smp_release_cpus();
622#endif /* defined(CONFIG_SMP) && !defined(CONFIG_PPC_ISERIES) */ 602#endif
623 603
624 printk("Starting Linux PPC64 %s\n", system_utsname.version); 604 printk("Starting Linux PPC64 %s\n", system_utsname.version);
625 605
@@ -644,51 +624,6 @@ void __init setup_system(void)
644 DBG(" <- setup_system()\n"); 624 DBG(" <- setup_system()\n");
645} 625}
646 626
647/* also used by kexec */
648void machine_shutdown(void)
649{
650 if (ppc_md.nvram_sync)
651 ppc_md.nvram_sync();
652}
653
654void machine_restart(char *cmd)
655{
656 machine_shutdown();
657 ppc_md.restart(cmd);
658#ifdef CONFIG_SMP
659 smp_send_stop();
660#endif
661 printk(KERN_EMERG "System Halted, OK to turn off power\n");
662 local_irq_disable();
663 while (1) ;
664}
665
666void machine_power_off(void)
667{
668 machine_shutdown();
669 ppc_md.power_off();
670#ifdef CONFIG_SMP
671 smp_send_stop();
672#endif
673 printk(KERN_EMERG "System Halted, OK to turn off power\n");
674 local_irq_disable();
675 while (1) ;
676}
677/* Used by the G5 thermal driver */
678EXPORT_SYMBOL_GPL(machine_power_off);
679
680void machine_halt(void)
681{
682 machine_shutdown();
683 ppc_md.halt();
684#ifdef CONFIG_SMP
685 smp_send_stop();
686#endif
687 printk(KERN_EMERG "System Halted, OK to turn off power\n");
688 local_irq_disable();
689 while (1) ;
690}
691
692static int ppc64_panic_event(struct notifier_block *this, 627static int ppc64_panic_event(struct notifier_block *this,
693 unsigned long event, void *ptr) 628 unsigned long event, void *ptr)
694{ 629{
@@ -696,99 +631,6 @@ static int ppc64_panic_event(struct notifier_block *this,
696 return NOTIFY_DONE; 631 return NOTIFY_DONE;
697} 632}
698 633
699
700#ifdef CONFIG_SMP
701DEFINE_PER_CPU(unsigned int, pvr);
702#endif
703
704static int show_cpuinfo(struct seq_file *m, void *v)
705{
706 unsigned long cpu_id = (unsigned long)v - 1;
707 unsigned int pvr;
708 unsigned short maj;
709 unsigned short min;
710
711 if (cpu_id == NR_CPUS) {
712 seq_printf(m, "timebase\t: %lu\n", ppc_tb_freq);
713
714 if (ppc_md.get_cpuinfo != NULL)
715 ppc_md.get_cpuinfo(m);
716
717 return 0;
718 }
719
720 /* We only show online cpus: disable preempt (overzealous, I
721 * knew) to prevent cpu going down. */
722 preempt_disable();
723 if (!cpu_online(cpu_id)) {
724 preempt_enable();
725 return 0;
726 }
727
728#ifdef CONFIG_SMP
729 pvr = per_cpu(pvr, cpu_id);
730#else
731 pvr = mfspr(SPRN_PVR);
732#endif
733 maj = (pvr >> 8) & 0xFF;
734 min = pvr & 0xFF;
735
736 seq_printf(m, "processor\t: %lu\n", cpu_id);
737 seq_printf(m, "cpu\t\t: ");
738
739 if (cur_cpu_spec->pvr_mask)
740 seq_printf(m, "%s", cur_cpu_spec->cpu_name);
741 else
742 seq_printf(m, "unknown (%08x)", pvr);
743
744#ifdef CONFIG_ALTIVEC
745 if (cpu_has_feature(CPU_FTR_ALTIVEC))
746 seq_printf(m, ", altivec supported");
747#endif /* CONFIG_ALTIVEC */
748
749 seq_printf(m, "\n");
750
751 /*
752 * Assume here that all clock rates are the same in a
753 * smp system. -- Cort
754 */
755 seq_printf(m, "clock\t\t: %lu.%06luMHz\n", ppc_proc_freq / 1000000,
756 ppc_proc_freq % 1000000);
757
758 seq_printf(m, "revision\t: %hd.%hd\n\n", maj, min);
759
760 preempt_enable();
761 return 0;
762}
763
764static void *c_start(struct seq_file *m, loff_t *pos)
765{
766 return *pos <= NR_CPUS ? (void *)((*pos)+1) : NULL;
767}
768static void *c_next(struct seq_file *m, void *v, loff_t *pos)
769{
770 ++*pos;
771 return c_start(m, pos);
772}
773static void c_stop(struct seq_file *m, void *v)
774{
775}
776struct seq_operations cpuinfo_op = {
777 .start =c_start,
778 .next = c_next,
779 .stop = c_stop,
780 .show = show_cpuinfo,
781};
782
783/*
784 * These three variables are used to save values passed to us by prom_init()
785 * via the device tree. The TCE variables are needed because with a memory_limit
786 * in force we may need to explicitly map the TCE are at the top of RAM.
787 */
788unsigned long memory_limit;
789unsigned long tce_alloc_start;
790unsigned long tce_alloc_end;
791
792#ifdef CONFIG_PPC_ISERIES 634#ifdef CONFIG_PPC_ISERIES
793/* 635/*
794 * On iSeries we just parse the mem=X option from the command line. 636 * On iSeries we just parse the mem=X option from the command line.
@@ -806,130 +648,6 @@ static int __init early_parsemem(char *p)
806early_param("mem", early_parsemem); 648early_param("mem", early_parsemem);
807#endif /* CONFIG_PPC_ISERIES */ 649#endif /* CONFIG_PPC_ISERIES */
808 650
809#ifdef CONFIG_PPC_MULTIPLATFORM
810static int __init set_preferred_console(void)
811{
812 struct device_node *prom_stdout = NULL;
813 char *name;
814 u32 *spd;
815 int offset = 0;
816
817 DBG(" -> set_preferred_console()\n");
818
819 /* The user has requested a console so this is already set up. */
820 if (strstr(saved_command_line, "console=")) {
821 DBG(" console was specified !\n");
822 return -EBUSY;
823 }
824
825 if (!of_chosen) {
826 DBG(" of_chosen is NULL !\n");
827 return -ENODEV;
828 }
829 /* We are getting a weird phandle from OF ... */
830 /* ... So use the full path instead */
831 name = (char *)get_property(of_chosen, "linux,stdout-path", NULL);
832 if (name == NULL) {
833 DBG(" no linux,stdout-path !\n");
834 return -ENODEV;
835 }
836 prom_stdout = of_find_node_by_path(name);
837 if (!prom_stdout) {
838 DBG(" can't find stdout package %s !\n", name);
839 return -ENODEV;
840 }
841 DBG("stdout is %s\n", prom_stdout->full_name);
842
843 name = (char *)get_property(prom_stdout, "name", NULL);
844 if (!name) {
845 DBG(" stdout package has no name !\n");
846 goto not_found;
847 }
848 spd = (u32 *)get_property(prom_stdout, "current-speed", NULL);
849
850 if (0)
851 ;
852#ifdef CONFIG_SERIAL_8250_CONSOLE
853 else if (strcmp(name, "serial") == 0) {
854 int i;
855 u32 *reg = (u32 *)get_property(prom_stdout, "reg", &i);
856 if (i > 8) {
857 switch (reg[1]) {
858 case 0x3f8:
859 offset = 0;
860 break;
861 case 0x2f8:
862 offset = 1;
863 break;
864 case 0x898:
865 offset = 2;
866 break;
867 case 0x890:
868 offset = 3;
869 break;
870 default:
871 /* We dont recognise the serial port */
872 goto not_found;
873 }
874 }
875 }
876#endif /* CONFIG_SERIAL_8250_CONSOLE */
877#ifdef CONFIG_PPC_PSERIES
878 else if (strcmp(name, "vty") == 0) {
879 u32 *reg = (u32 *)get_property(prom_stdout, "reg", NULL);
880 char *compat = (char *)get_property(prom_stdout, "compatible", NULL);
881
882 if (reg && compat && (strcmp(compat, "hvterm-protocol") == 0)) {
883 /* Host Virtual Serial Interface */
884 int offset;
885 switch (reg[0]) {
886 case 0x30000000:
887 offset = 0;
888 break;
889 case 0x30000001:
890 offset = 1;
891 break;
892 default:
893 goto not_found;
894 }
895 of_node_put(prom_stdout);
896 DBG("Found hvsi console at offset %d\n", offset);
897 return add_preferred_console("hvsi", offset, NULL);
898 } else {
899 /* pSeries LPAR virtual console */
900 of_node_put(prom_stdout);
901 DBG("Found hvc console\n");
902 return add_preferred_console("hvc", 0, NULL);
903 }
904 }
905#endif /* CONFIG_PPC_PSERIES */
906#ifdef CONFIG_SERIAL_PMACZILOG_CONSOLE
907 else if (strcmp(name, "ch-a") == 0)
908 offset = 0;
909 else if (strcmp(name, "ch-b") == 0)
910 offset = 1;
911#endif /* CONFIG_SERIAL_PMACZILOG_CONSOLE */
912 else
913 goto not_found;
914 of_node_put(prom_stdout);
915
916 DBG("Found serial console at ttyS%d\n", offset);
917
918 if (spd) {
919 static char __initdata opt[16];
920 sprintf(opt, "%d", *spd);
921 return add_preferred_console("ttyS", offset, opt);
922 } else
923 return add_preferred_console("ttyS", offset, NULL);
924
925 not_found:
926 DBG("No preferred console found !\n");
927 of_node_put(prom_stdout);
928 return -ENODEV;
929}
930console_initcall(set_preferred_console);
931#endif /* CONFIG_PPC_MULTIPLATFORM */
932
933#ifdef CONFIG_IRQSTACKS 651#ifdef CONFIG_IRQSTACKS
934static void __init irqstack_early_init(void) 652static void __init irqstack_early_init(void)
935{ 653{
@@ -983,23 +701,22 @@ void __init setup_syscall_map(void)
983{ 701{
984 unsigned int i, count64 = 0, count32 = 0; 702 unsigned int i, count64 = 0, count32 = 0;
985 extern unsigned long *sys_call_table; 703 extern unsigned long *sys_call_table;
986 extern unsigned long *sys_call_table32;
987 extern unsigned long sys_ni_syscall; 704 extern unsigned long sys_ni_syscall;
988 705
989 706
990 for (i = 0; i < __NR_syscalls; i++) { 707 for (i = 0; i < __NR_syscalls; i++) {
991 if (sys_call_table[i] == sys_ni_syscall) 708 if (sys_call_table[i*2] != sys_ni_syscall) {
992 continue; 709 count64++;
993 count64++; 710 systemcfg->syscall_map_64[i >> 5] |=
994 systemcfg->syscall_map_64[i >> 5] |= 0x80000000UL >> (i & 0x1f); 711 0x80000000UL >> (i & 0x1f);
995 } 712 }
996 for (i = 0; i < __NR_syscalls; i++) { 713 if (sys_call_table[i*2+1] != sys_ni_syscall) {
997 if (sys_call_table32[i] == sys_ni_syscall) 714 count32++;
998 continue; 715 systemcfg->syscall_map_32[i >> 5] |=
999 count32++; 716 0x80000000UL >> (i & 0x1f);
1000 systemcfg->syscall_map_32[i >> 5] |= 0x80000000UL >> (i & 0x1f); 717 }
1001 } 718 }
1002 printk(KERN_INFO "Syscall map setup, %d 32 bits and %d 64 bits syscalls\n", 719 printk(KERN_INFO "Syscall map setup, %d 32-bit and %d 64-bit syscalls\n",
1003 count32, count64); 720 count32, count64);
1004} 721}
1005 722
@@ -1047,6 +764,10 @@ void __init setup_arch(char **cmdline_p)
1047 /* initialize the syscall map in systemcfg */ 764 /* initialize the syscall map in systemcfg */
1048 setup_syscall_map(); 765 setup_syscall_map();
1049 766
767#ifdef CONFIG_DUMMY_CONSOLE
768 conswitchp = &dummy_con;
769#endif
770
1050 ppc_md.setup_arch(); 771 ppc_md.setup_arch();
1051 772
1052 /* Use the default idle loop if the platform hasn't provided one. */ 773 /* Use the default idle loop if the platform hasn't provided one. */
@@ -1091,15 +812,6 @@ void ppc64_terminate_msg(unsigned int src, const char *msg)
1091 printk("[terminate]%04x %s\n", src, msg); 812 printk("[terminate]%04x %s\n", src, msg);
1092} 813}
1093 814
1094/* This should only be called on processor 0 during calibrate decr */
1095void __init setup_default_decr(void)
1096{
1097 struct paca_struct *lpaca = get_paca();
1098
1099 lpaca->default_decr = tb_ticks_per_jiffy;
1100 lpaca->next_jiffy_update_tb = get_tb() + tb_ticks_per_jiffy;
1101}
1102
1103#ifndef CONFIG_PPC_ISERIES 815#ifndef CONFIG_PPC_ISERIES
1104/* 816/*
1105 * This function can be used by platforms to "find" legacy serial ports. 817 * This function can be used by platforms to "find" legacy serial ports.
diff --git a/arch/ppc64/kernel/signal32.c b/arch/powerpc/kernel/signal_32.c
index a8b7a5a56bb4..444c3e81884c 100644
--- a/arch/ppc64/kernel/signal32.c
+++ b/arch/powerpc/kernel/signal_32.c
@@ -1,56 +1,353 @@
1/* 1/*
2 * signal32.c: Support 32bit signal syscalls. 2 * Signal handling for 32bit PPC and 32bit tasks on 64bit PPC
3 * 3 *
4 * PowerPC version
5 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
4 * Copyright (C) 2001 IBM 6 * Copyright (C) 2001 IBM
5 * Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz) 7 * Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
6 * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu) 8 * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
7 * 9 *
8 * These routines maintain argument size conversion between 32bit and 64bit 10 * Derived from "arch/i386/kernel/signal.c"
9 * environment. 11 * Copyright (C) 1991, 1992 Linus Torvalds
12 * 1997-11-28 Modified for POSIX.1b signals by Richard Henderson
10 * 13 *
11 * This program is free software; you can redistribute it and/or 14 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License 15 * modify it under the terms of the GNU General Public License
13 * as published by the Free Software Foundation; either version 16 * as published by the Free Software Foundation; either version
14 * 2 of the License, or (at your option) any later version. 17 * 2 of the License, or (at your option) any later version.
15 */ 18 */
16 19
17#include <linux/config.h> 20#include <linux/config.h>
18#include <linux/sched.h> 21#include <linux/sched.h>
19#include <linux/mm.h> 22#include <linux/mm.h>
20#include <linux/smp.h> 23#include <linux/smp.h>
21#include <linux/smp_lock.h> 24#include <linux/smp_lock.h>
22#include <linux/kernel.h> 25#include <linux/kernel.h>
23#include <linux/signal.h> 26#include <linux/signal.h>
24#include <linux/syscalls.h>
25#include <linux/errno.h> 27#include <linux/errno.h>
26#include <linux/elf.h> 28#include <linux/elf.h>
29#ifdef CONFIG_PPC64
30#include <linux/syscalls.h>
27#include <linux/compat.h> 31#include <linux/compat.h>
28#include <linux/ptrace.h> 32#include <linux/ptrace.h>
29#include <asm/ppc32.h> 33#else
34#include <linux/wait.h>
35#include <linux/ptrace.h>
36#include <linux/unistd.h>
37#include <linux/stddef.h>
38#include <linux/tty.h>
39#include <linux/binfmts.h>
40#include <linux/suspend.h>
41#endif
42
30#include <asm/uaccess.h> 43#include <asm/uaccess.h>
44#include <asm/cacheflush.h>
45#ifdef CONFIG_PPC64
46#include <asm/ppc32.h>
31#include <asm/ppcdebug.h> 47#include <asm/ppcdebug.h>
32#include <asm/unistd.h> 48#include <asm/unistd.h>
33#include <asm/cacheflush.h>
34#include <asm/vdso.h> 49#include <asm/vdso.h>
50#else
51#include <asm/ucontext.h>
52#include <asm/pgtable.h>
53#endif
35 54
36#define DEBUG_SIG 0 55#undef DEBUG_SIG
37 56
38#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP))) 57#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
39 58
40#define GP_REGS_SIZE32 min(sizeof(elf_gregset_t32), sizeof(struct pt_regs32)) 59#ifdef CONFIG_PPC64
60#define do_signal do_signal32
61#define sys_sigsuspend compat_sys_sigsuspend
62#define sys_rt_sigsuspend compat_sys_rt_sigsuspend
63#define sys_rt_sigreturn compat_sys_rt_sigreturn
64#define sys_sigaction compat_sys_sigaction
65#define sys_swapcontext compat_sys_swapcontext
66#define sys_sigreturn compat_sys_sigreturn
67
68#define old_sigaction old_sigaction32
69#define sigcontext sigcontext32
70#define mcontext mcontext32
71#define ucontext ucontext32
72
73/*
74 * Returning 0 means we return to userspace via
75 * ret_from_except and thus restore all user
76 * registers from *regs. This is what we need
77 * to do when a signal has been delivered.
78 */
79#define sigreturn_exit(regs) return 0
80
81#define GP_REGS_SIZE min(sizeof(elf_gregset_t32), sizeof(struct pt_regs32))
82#undef __SIGNAL_FRAMESIZE
83#define __SIGNAL_FRAMESIZE __SIGNAL_FRAMESIZE32
84#undef ELF_NVRREG
85#define ELF_NVRREG ELF_NVRREG32
86
87/*
88 * Functions for flipping sigsets (thanks to brain dead generic
89 * implementation that makes things simple for little endian only)
90 */
91static inline int put_sigset_t(compat_sigset_t __user *uset, sigset_t *set)
92{
93 compat_sigset_t cset;
94
95 switch (_NSIG_WORDS) {
96 case 4: cset.sig[5] = set->sig[3] & 0xffffffffull;
97 cset.sig[7] = set->sig[3] >> 32;
98 case 3: cset.sig[4] = set->sig[2] & 0xffffffffull;
99 cset.sig[5] = set->sig[2] >> 32;
100 case 2: cset.sig[2] = set->sig[1] & 0xffffffffull;
101 cset.sig[3] = set->sig[1] >> 32;
102 case 1: cset.sig[0] = set->sig[0] & 0xffffffffull;
103 cset.sig[1] = set->sig[0] >> 32;
104 }
105 return copy_to_user(uset, &cset, sizeof(*uset));
106}
107
108static inline int get_sigset_t(sigset_t *set,
109 const compat_sigset_t __user *uset)
110{
111 compat_sigset_t s32;
112
113 if (copy_from_user(&s32, uset, sizeof(*uset)))
114 return -EFAULT;
115
116 /*
117 * Swap the 2 words of the 64-bit sigset_t (they are stored
118 * in the "wrong" endian in 32-bit user storage).
119 */
120 switch (_NSIG_WORDS) {
121 case 4: set->sig[3] = s32.sig[6] | (((long)s32.sig[7]) << 32);
122 case 3: set->sig[2] = s32.sig[4] | (((long)s32.sig[5]) << 32);
123 case 2: set->sig[1] = s32.sig[2] | (((long)s32.sig[3]) << 32);
124 case 1: set->sig[0] = s32.sig[0] | (((long)s32.sig[1]) << 32);
125 }
126 return 0;
127}
128
129static inline int get_old_sigaction(struct k_sigaction *new_ka,
130 struct old_sigaction __user *act)
131{
132 compat_old_sigset_t mask;
133 compat_uptr_t handler, restorer;
134
135 if (get_user(handler, &act->sa_handler) ||
136 __get_user(restorer, &act->sa_restorer) ||
137 __get_user(new_ka->sa.sa_flags, &act->sa_flags) ||
138 __get_user(mask, &act->sa_mask))
139 return -EFAULT;
140 new_ka->sa.sa_handler = compat_ptr(handler);
141 new_ka->sa.sa_restorer = compat_ptr(restorer);
142 siginitset(&new_ka->sa.sa_mask, mask);
143 return 0;
144}
145
146static inline compat_uptr_t to_user_ptr(void *kp)
147{
148 return (compat_uptr_t)(u64)kp;
149}
150
151#define from_user_ptr(p) compat_ptr(p)
152
153static inline int save_general_regs(struct pt_regs *regs,
154 struct mcontext __user *frame)
155{
156 elf_greg_t64 *gregs = (elf_greg_t64 *)regs;
157 int i;
158
159 for (i = 0; i <= PT_RESULT; i ++)
160 if (__put_user((unsigned int)gregs[i], &frame->mc_gregs[i]))
161 return -EFAULT;
162 return 0;
163}
164
165static inline int restore_general_regs(struct pt_regs *regs,
166 struct mcontext __user *sr)
167{
168 elf_greg_t64 *gregs = (elf_greg_t64 *)regs;
169 int i;
170
171 for (i = 0; i <= PT_RESULT; i++) {
172 if ((i == PT_MSR) || (i == PT_SOFTE))
173 continue;
174 if (__get_user(gregs[i], &sr->mc_gregs[i]))
175 return -EFAULT;
176 }
177 return 0;
178}
179
180#else /* CONFIG_PPC64 */
181
182extern void sigreturn_exit(struct pt_regs *);
183
184#define GP_REGS_SIZE min(sizeof(elf_gregset_t), sizeof(struct pt_regs))
185
186static inline int put_sigset_t(sigset_t __user *uset, sigset_t *set)
187{
188 return copy_to_user(uset, set, sizeof(*uset));
189}
190
191static inline int get_sigset_t(sigset_t *set, const sigset_t __user *uset)
192{
193 return copy_from_user(set, uset, sizeof(*uset));
194}
195
196static inline int get_old_sigaction(struct k_sigaction *new_ka,
197 struct old_sigaction __user *act)
198{
199 old_sigset_t mask;
200
201 if (!access_ok(VERIFY_READ, act, sizeof(*act)) ||
202 __get_user(new_ka->sa.sa_handler, &act->sa_handler) ||
203 __get_user(new_ka->sa.sa_restorer, &act->sa_restorer))
204 return -EFAULT;
205 __get_user(new_ka->sa.sa_flags, &act->sa_flags);
206 __get_user(mask, &act->sa_mask);
207 siginitset(&new_ka->sa.sa_mask, mask);
208 return 0;
209}
210
211#define to_user_ptr(p) (p)
212#define from_user_ptr(p) (p)
213
214static inline int save_general_regs(struct pt_regs *regs,
215 struct mcontext __user *frame)
216{
217 return __copy_to_user(&frame->mc_gregs, regs, GP_REGS_SIZE);
218}
219
220static inline int restore_general_regs(struct pt_regs *regs,
221 struct mcontext __user *sr)
222{
223 /* copy up to but not including MSR */
224 if (__copy_from_user(regs, &sr->mc_gregs,
225 PT_MSR * sizeof(elf_greg_t)))
226 return -EFAULT;
227 /* copy from orig_r3 (the word after the MSR) up to the end */
228 if (__copy_from_user(&regs->orig_gpr3, &sr->mc_gregs[PT_ORIG_R3],
229 GP_REGS_SIZE - PT_ORIG_R3 * sizeof(elf_greg_t)))
230 return -EFAULT;
231 return 0;
232}
233
234#endif /* CONFIG_PPC64 */
235
236int do_signal(sigset_t *oldset, struct pt_regs *regs);
237
238/*
239 * Atomically swap in the new signal mask, and wait for a signal.
240 */
241long sys_sigsuspend(old_sigset_t mask, int p2, int p3, int p4, int p6, int p7,
242 struct pt_regs *regs)
243{
244 sigset_t saveset;
245
246 mask &= _BLOCKABLE;
247 spin_lock_irq(&current->sighand->siglock);
248 saveset = current->blocked;
249 siginitset(&current->blocked, mask);
250 recalc_sigpending();
251 spin_unlock_irq(&current->sighand->siglock);
252
253 regs->result = -EINTR;
254 regs->gpr[3] = EINTR;
255 regs->ccr |= 0x10000000;
256 while (1) {
257 current->state = TASK_INTERRUPTIBLE;
258 schedule();
259 if (do_signal(&saveset, regs))
260 sigreturn_exit(regs);
261 }
262}
263
264long sys_rt_sigsuspend(
265#ifdef CONFIG_PPC64
266 compat_sigset_t __user *unewset,
267#else
268 sigset_t __user *unewset,
269#endif
270 size_t sigsetsize, int p3, int p4,
271 int p6, int p7, struct pt_regs *regs)
272{
273 sigset_t saveset, newset;
274
275 /* XXX: Don't preclude handling different sized sigset_t's. */
276 if (sigsetsize != sizeof(sigset_t))
277 return -EINVAL;
278
279 if (get_sigset_t(&newset, unewset))
280 return -EFAULT;
281 sigdelsetmask(&newset, ~_BLOCKABLE);
282
283 spin_lock_irq(&current->sighand->siglock);
284 saveset = current->blocked;
285 current->blocked = newset;
286 recalc_sigpending();
287 spin_unlock_irq(&current->sighand->siglock);
288
289 regs->result = -EINTR;
290 regs->gpr[3] = EINTR;
291 regs->ccr |= 0x10000000;
292 while (1) {
293 current->state = TASK_INTERRUPTIBLE;
294 schedule();
295 if (do_signal(&saveset, regs))
296 sigreturn_exit(regs);
297 }
298}
299
300#ifdef CONFIG_PPC32
301long sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss, int r5,
302 int r6, int r7, int r8, struct pt_regs *regs)
303{
304 return do_sigaltstack(uss, uoss, regs->gpr[1]);
305}
306#endif
307
308long sys_sigaction(int sig, struct old_sigaction __user *act,
309 struct old_sigaction __user *oact)
310{
311 struct k_sigaction new_ka, old_ka;
312 int ret;
313
314#ifdef CONFIG_PPC64
315 if (sig < 0)
316 sig = -sig;
317#endif
318
319 if (act) {
320 if (get_old_sigaction(&new_ka, act))
321 return -EFAULT;
322 }
323
324 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
325 if (!ret && oact) {
326 if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) ||
327 __put_user(to_user_ptr(old_ka.sa.sa_handler),
328 &oact->sa_handler) ||
329 __put_user(to_user_ptr(old_ka.sa.sa_restorer),
330 &oact->sa_restorer) ||
331 __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
332 __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
333 return -EFAULT;
334 }
335
336 return ret;
337}
41 338
42/* 339/*
43 * When we have signals to deliver, we set up on the 340 * When we have signals to deliver, we set up on the
44 * user stack, going down from the original stack pointer: 341 * user stack, going down from the original stack pointer:
45 * a sigregs32 struct 342 * a sigregs struct
46 * a sigcontext32 struct 343 * a sigcontext struct
47 * a gap of __SIGNAL_FRAMESIZE32 bytes 344 * a gap of __SIGNAL_FRAMESIZE bytes
48 * 345 *
49 * Each of these things must be a multiple of 16 bytes in size. 346 * Each of these things must be a multiple of 16 bytes in size.
50 * 347 *
51 */ 348 */
52struct sigregs32 { 349struct sigregs {
53 struct mcontext32 mctx; /* all the register values */ 350 struct mcontext mctx; /* all the register values */
54 /* 351 /*
55 * Programs using the rs6000/xcoff abi can save up to 19 gp 352 * Programs using the rs6000/xcoff abi can save up to 19 gp
56 * regs and 18 fp regs below sp before decrementing it. 353 * regs and 18 fp regs below sp before decrementing it.
@@ -64,17 +361,21 @@ struct sigregs32 {
64/* 361/*
65 * When we have rt signals to deliver, we set up on the 362 * When we have rt signals to deliver, we set up on the
66 * user stack, going down from the original stack pointer: 363 * user stack, going down from the original stack pointer:
67 * one rt_sigframe32 struct (siginfo + ucontext + ABI gap) 364 * one rt_sigframe struct (siginfo + ucontext + ABI gap)
68 * a gap of __SIGNAL_FRAMESIZE32+16 bytes 365 * a gap of __SIGNAL_FRAMESIZE+16 bytes
69 * (the +16 is to get the siginfo and ucontext32 in the same 366 * (the +16 is to get the siginfo and ucontext in the same
70 * positions as in older kernels). 367 * positions as in older kernels).
71 * 368 *
72 * Each of these things must be a multiple of 16 bytes in size. 369 * Each of these things must be a multiple of 16 bytes in size.
73 * 370 *
74 */ 371 */
75struct rt_sigframe32 { 372struct rt_sigframe {
76 compat_siginfo_t info; 373#ifdef CONFIG_PPC64
77 struct ucontext32 uc; 374 compat_siginfo_t info;
375#else
376 struct siginfo info;
377#endif
378 struct ucontext uc;
78 /* 379 /*
79 * Programs using the rs6000/xcoff abi can save up to 19 gp 380 * Programs using the rs6000/xcoff abi can save up to 19 gp
80 * regs and 18 fp regs below sp before decrementing it. 381 * regs and 18 fp regs below sp before decrementing it.
@@ -82,76 +383,34 @@ struct rt_sigframe32 {
82 int abigap[56]; 383 int abigap[56];
83}; 384};
84 385
85
86/*
87 * Common utility functions used by signal and context support
88 *
89 */
90
91/*
92 * Restore the user process's signal mask
93 * (implemented in signal.c)
94 */
95extern void restore_sigmask(sigset_t *set);
96
97/*
98 * Functions for flipping sigsets (thanks to brain dead generic
99 * implementation that makes things simple for little endian only
100 */
101static inline void compat_from_sigset(compat_sigset_t *compat, sigset_t *set)
102{
103 switch (_NSIG_WORDS) {
104 case 4: compat->sig[5] = set->sig[3] & 0xffffffffull ;
105 compat->sig[7] = set->sig[3] >> 32;
106 case 3: compat->sig[4] = set->sig[2] & 0xffffffffull ;
107 compat->sig[5] = set->sig[2] >> 32;
108 case 2: compat->sig[2] = set->sig[1] & 0xffffffffull ;
109 compat->sig[3] = set->sig[1] >> 32;
110 case 1: compat->sig[0] = set->sig[0] & 0xffffffffull ;
111 compat->sig[1] = set->sig[0] >> 32;
112 }
113}
114
115static inline void sigset_from_compat(sigset_t *set, compat_sigset_t *compat)
116{
117 switch (_NSIG_WORDS) {
118 case 4: set->sig[3] = compat->sig[6] | (((long)compat->sig[7]) << 32);
119 case 3: set->sig[2] = compat->sig[4] | (((long)compat->sig[5]) << 32);
120 case 2: set->sig[1] = compat->sig[2] | (((long)compat->sig[3]) << 32);
121 case 1: set->sig[0] = compat->sig[0] | (((long)compat->sig[1]) << 32);
122 }
123}
124
125
126/* 386/*
127 * Save the current user registers on the user stack. 387 * Save the current user registers on the user stack.
128 * We only save the altivec registers if the process has used 388 * We only save the altivec/spe registers if the process has used
129 * altivec instructions at some point. 389 * altivec/spe instructions at some point.
130 */ 390 */
131static int save_user_regs(struct pt_regs *regs, struct mcontext32 __user *frame, int sigret) 391static int save_user_regs(struct pt_regs *regs, struct mcontext __user *frame,
392 int sigret)
132{ 393{
133 elf_greg_t64 *gregs = (elf_greg_t64 *)regs; 394#ifdef CONFIG_PPC32
134 int i, err = 0; 395 CHECK_FULL_REGS(regs);
135 396#endif
136 /* Make sure floating point registers are stored in regs */ 397 /* Make sure floating point registers are stored in regs */
137 flush_fp_to_thread(current); 398 flush_fp_to_thread(current);
138 399
139 /* save general and floating-point registers */ 400 /* save general and floating-point registers */
140 for (i = 0; i <= PT_RESULT; i ++) 401 if (save_general_regs(regs, frame) ||
141 err |= __put_user((unsigned int)gregs[i], &frame->mc_gregs[i]); 402 __copy_to_user(&frame->mc_fregs, current->thread.fpr,
142 err |= __copy_to_user(&frame->mc_fregs, current->thread.fpr, 403 ELF_NFPREG * sizeof(double)))
143 ELF_NFPREG * sizeof(double));
144 if (err)
145 return 1; 404 return 1;
146 405
147 current->thread.fpscr = 0; /* turn off all fp exceptions */ 406 current->thread.fpscr.val = 0; /* turn off all fp exceptions */
148 407
149#ifdef CONFIG_ALTIVEC 408#ifdef CONFIG_ALTIVEC
150 /* save altivec registers */ 409 /* save altivec registers */
151 if (current->thread.used_vr) { 410 if (current->thread.used_vr) {
152 flush_altivec_to_thread(current); 411 flush_altivec_to_thread(current);
153 if (__copy_to_user(&frame->mc_vregs, current->thread.vr, 412 if (__copy_to_user(&frame->mc_vregs, current->thread.vr,
154 ELF_NVRREG32 * sizeof(vector128))) 413 ELF_NVRREG * sizeof(vector128)))
155 return 1; 414 return 1;
156 /* set MSR_VEC in the saved MSR value to indicate that 415 /* set MSR_VEC in the saved MSR value to indicate that
157 frame->mc_vregs contains valid data */ 416 frame->mc_vregs contains valid data */
@@ -169,6 +428,25 @@ static int save_user_regs(struct pt_regs *regs, struct mcontext32 __user *frame,
169 return 1; 428 return 1;
170#endif /* CONFIG_ALTIVEC */ 429#endif /* CONFIG_ALTIVEC */
171 430
431#ifdef CONFIG_SPE
432 /* save spe registers */
433 if (current->thread.used_spe) {
434 flush_spe_to_thread(current);
435 if (__copy_to_user(&frame->mc_vregs, current->thread.evr,
436 ELF_NEVRREG * sizeof(u32)))
437 return 1;
438 /* set MSR_SPE in the saved MSR value to indicate that
439 frame->mc_vregs contains valid data */
440 if (__put_user(regs->msr | MSR_SPE, &frame->mc_gregs[PT_MSR]))
441 return 1;
442 }
443 /* else assert((regs->msr & MSR_SPE) == 0) */
444
445 /* We always copy to/from spefscr */
446 if (__put_user(current->thread.spefscr, (u32 __user *)&frame->mc_vregs + ELF_NEVRREG))
447 return 1;
448#endif /* CONFIG_SPE */
449
172 if (sigret) { 450 if (sigret) {
173 /* Set up the sigreturn trampoline: li r0,sigret; sc */ 451 /* Set up the sigreturn trampoline: li r0,sigret; sc */
174 if (__put_user(0x38000000UL + sigret, &frame->tramp[0]) 452 if (__put_user(0x38000000UL + sigret, &frame->tramp[0])
@@ -186,13 +464,11 @@ static int save_user_regs(struct pt_regs *regs, struct mcontext32 __user *frame,
186 * (except for MSR). 464 * (except for MSR).
187 */ 465 */
188static long restore_user_regs(struct pt_regs *regs, 466static long restore_user_regs(struct pt_regs *regs,
189 struct mcontext32 __user *sr, int sig) 467 struct mcontext __user *sr, int sig)
190{ 468{
191 elf_greg_t64 *gregs = (elf_greg_t64 *)regs; 469 long err;
192 int i;
193 long err = 0;
194 unsigned int save_r2 = 0; 470 unsigned int save_r2 = 0;
195#ifdef CONFIG_ALTIVEC 471#if defined(CONFIG_ALTIVEC) || defined(CONFIG_SPE)
196 unsigned long msr; 472 unsigned long msr;
197#endif 473#endif
198 474
@@ -202,11 +478,7 @@ static long restore_user_regs(struct pt_regs *regs,
202 */ 478 */
203 if (!sig) 479 if (!sig)
204 save_r2 = (unsigned int)regs->gpr[2]; 480 save_r2 = (unsigned int)regs->gpr[2];
205 for (i = 0; i <= PT_RESULT; i++) { 481 err = restore_general_regs(regs, sr);
206 if ((i == PT_MSR) || (i == PT_SOFTE))
207 continue;
208 err |= __get_user(gregs[i], &sr->mc_gregs[i]);
209 }
210 if (!sig) 482 if (!sig)
211 regs->gpr[2] = (unsigned long) save_r2; 483 regs->gpr[2] = (unsigned long) save_r2;
212 if (err) 484 if (err)
@@ -229,135 +501,51 @@ static long restore_user_regs(struct pt_regs *regs,
229 sizeof(sr->mc_vregs))) 501 sizeof(sr->mc_vregs)))
230 return 1; 502 return 1;
231 } else if (current->thread.used_vr) 503 } else if (current->thread.used_vr)
232 memset(current->thread.vr, 0, ELF_NVRREG32 * sizeof(vector128)); 504 memset(current->thread.vr, 0, ELF_NVRREG * sizeof(vector128));
233 505
234 /* Always get VRSAVE back */ 506 /* Always get VRSAVE back */
235 if (__get_user(current->thread.vrsave, (u32 __user *)&sr->mc_vregs[32])) 507 if (__get_user(current->thread.vrsave, (u32 __user *)&sr->mc_vregs[32]))
236 return 1; 508 return 1;
237#endif /* CONFIG_ALTIVEC */ 509#endif /* CONFIG_ALTIVEC */
238 510
511#ifdef CONFIG_SPE
512 /* force the process to reload the spe registers from
513 current->thread when it next does spe instructions */
514 regs->msr &= ~MSR_SPE;
515 if (!__get_user(msr, &sr->mc_gregs[PT_MSR]) && (msr & MSR_SPE) != 0) {
516 /* restore spe registers from the stack */
517 if (__copy_from_user(current->thread.evr, &sr->mc_vregs,
518 ELF_NEVRREG * sizeof(u32)))
519 return 1;
520 } else if (current->thread.used_spe)
521 memset(current->thread.evr, 0, ELF_NEVRREG * sizeof(u32));
522
523 /* Always get SPEFSCR back */
524 if (__get_user(current->thread.spefscr, (u32 __user *)&sr->mc_vregs + ELF_NEVRREG))
525 return 1;
526#endif /* CONFIG_SPE */
527
239#ifndef CONFIG_SMP 528#ifndef CONFIG_SMP
240 preempt_disable(); 529 preempt_disable();
241 if (last_task_used_math == current) 530 if (last_task_used_math == current)
242 last_task_used_math = NULL; 531 last_task_used_math = NULL;
243 if (last_task_used_altivec == current) 532 if (last_task_used_altivec == current)
244 last_task_used_altivec = NULL; 533 last_task_used_altivec = NULL;
534#ifdef CONFIG_SPE
535 if (last_task_used_spe == current)
536 last_task_used_spe = NULL;
537#endif
245 preempt_enable(); 538 preempt_enable();
246#endif 539#endif
247 return 0; 540 return 0;
248} 541}
249 542
250 543#ifdef CONFIG_PPC64
251/* 544long compat_sys_rt_sigaction(int sig, const struct sigaction32 __user *act,
252 * Start of nonRT signal support
253 *
254 * sigset_t is 32 bits for non-rt signals
255 *
256 * System Calls
257 * sigaction sys32_sigaction
258 * sigreturn sys32_sigreturn
259 *
260 * Note sigsuspend has no special 32 bit routine - uses the 64 bit routine
261 *
262 * Other routines
263 * setup_frame32
264 */
265
266/*
267 * Atomically swap in the new signal mask, and wait for a signal.
268 */
269long sys32_sigsuspend(old_sigset_t mask, int p2, int p3, int p4, int p6, int p7,
270 struct pt_regs *regs)
271{
272 sigset_t saveset;
273
274 mask &= _BLOCKABLE;
275 spin_lock_irq(&current->sighand->siglock);
276 saveset = current->blocked;
277 siginitset(&current->blocked, mask);
278 recalc_sigpending();
279 spin_unlock_irq(&current->sighand->siglock);
280
281 regs->result = -EINTR;
282 regs->gpr[3] = EINTR;
283 regs->ccr |= 0x10000000;
284 while (1) {
285 current->state = TASK_INTERRUPTIBLE;
286 schedule();
287 if (do_signal32(&saveset, regs))
288 /*
289 * Returning 0 means we return to userspace via
290 * ret_from_except and thus restore all user
291 * registers from *regs. This is what we need
292 * to do when a signal has been delivered.
293 */
294 return 0;
295 }
296}
297
298long sys32_sigaction(int sig, struct old_sigaction32 __user *act,
299 struct old_sigaction32 __user *oact)
300{
301 struct k_sigaction new_ka, old_ka;
302 int ret;
303
304 if (sig < 0)
305 sig = -sig;
306
307 if (act) {
308 compat_old_sigset_t mask;
309 compat_uptr_t handler, restorer;
310
311 if (get_user(handler, &act->sa_handler) ||
312 __get_user(restorer, &act->sa_restorer) ||
313 __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
314 __get_user(mask, &act->sa_mask))
315 return -EFAULT;
316 new_ka.sa.sa_handler = compat_ptr(handler);
317 new_ka.sa.sa_restorer = compat_ptr(restorer);
318 siginitset(&new_ka.sa.sa_mask, mask);
319 }
320
321 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
322 if (!ret && oact) {
323 if (put_user((long)old_ka.sa.sa_handler, &oact->sa_handler) ||
324 __put_user((long)old_ka.sa.sa_restorer, &oact->sa_restorer) ||
325 __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
326 __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
327 return -EFAULT;
328 }
329
330 return ret;
331}
332
333
334
335/*
336 * Start of RT signal support
337 *
338 * sigset_t is 64 bits for rt signals
339 *
340 * System Calls
341 * sigaction sys32_rt_sigaction
342 * sigpending sys32_rt_sigpending
343 * sigprocmask sys32_rt_sigprocmask
344 * sigreturn sys32_rt_sigreturn
345 * sigqueueinfo sys32_rt_sigqueueinfo
346 * sigsuspend sys32_rt_sigsuspend
347 *
348 * Other routines
349 * setup_rt_frame32
350 * copy_siginfo_to_user32
351 * siginfo32to64
352 */
353
354
355long sys32_rt_sigaction(int sig, const struct sigaction32 __user *act,
356 struct sigaction32 __user *oact, size_t sigsetsize) 545 struct sigaction32 __user *oact, size_t sigsetsize)
357{ 546{
358 struct k_sigaction new_ka, old_ka; 547 struct k_sigaction new_ka, old_ka;
359 int ret; 548 int ret;
360 compat_sigset_t set32;
361 549
362 /* XXX: Don't preclude handling different sized sigset_t's. */ 550 /* XXX: Don't preclude handling different sized sigset_t's. */
363 if (sigsetsize != sizeof(compat_sigset_t)) 551 if (sigsetsize != sizeof(compat_sigset_t))
@@ -368,9 +556,7 @@ long sys32_rt_sigaction(int sig, const struct sigaction32 __user *act,
368 556
369 ret = get_user(handler, &act->sa_handler); 557 ret = get_user(handler, &act->sa_handler);
370 new_ka.sa.sa_handler = compat_ptr(handler); 558 new_ka.sa.sa_handler = compat_ptr(handler);
371 ret |= __copy_from_user(&set32, &act->sa_mask, 559 ret |= get_sigset_t(&new_ka.sa.sa_mask, &act->sa_mask);
372 sizeof(compat_sigset_t));
373 sigset_from_compat(&new_ka.sa.sa_mask, &set32);
374 ret |= __get_user(new_ka.sa.sa_flags, &act->sa_flags); 560 ret |= __get_user(new_ka.sa.sa_flags, &act->sa_flags);
375 if (ret) 561 if (ret)
376 return -EFAULT; 562 return -EFAULT;
@@ -378,10 +564,8 @@ long sys32_rt_sigaction(int sig, const struct sigaction32 __user *act,
378 564
379 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL); 565 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
380 if (!ret && oact) { 566 if (!ret && oact) {
381 compat_from_sigset(&set32, &old_ka.sa.sa_mask);
382 ret = put_user((long)old_ka.sa.sa_handler, &oact->sa_handler); 567 ret = put_user((long)old_ka.sa.sa_handler, &oact->sa_handler);
383 ret |= __copy_to_user(&oact->sa_mask, &set32, 568 ret |= put_sigset_t(&oact->sa_mask, &old_ka.sa.sa_mask);
384 sizeof(compat_sigset_t));
385 ret |= __put_user(old_ka.sa.sa_flags, &oact->sa_flags); 569 ret |= __put_user(old_ka.sa.sa_flags, &oact->sa_flags);
386 } 570 }
387 return ret; 571 return ret;
@@ -394,41 +578,37 @@ long sys32_rt_sigaction(int sig, const struct sigaction32 __user *act,
394 * of a signed int (msr in 32-bit mode) and the register representation 578 * of a signed int (msr in 32-bit mode) and the register representation
395 * of a signed int (msr in 64-bit mode) is performed. 579 * of a signed int (msr in 64-bit mode) is performed.
396 */ 580 */
397long sys32_rt_sigprocmask(u32 how, compat_sigset_t __user *set, 581long compat_sys_rt_sigprocmask(u32 how, compat_sigset_t __user *set,
398 compat_sigset_t __user *oset, size_t sigsetsize) 582 compat_sigset_t __user *oset, size_t sigsetsize)
399{ 583{
400 sigset_t s; 584 sigset_t s;
401 sigset_t __user *up; 585 sigset_t __user *up;
402 compat_sigset_t s32;
403 int ret; 586 int ret;
404 mm_segment_t old_fs = get_fs(); 587 mm_segment_t old_fs = get_fs();
405 588
406 if (set) { 589 if (set) {
407 if (copy_from_user (&s32, set, sizeof(compat_sigset_t))) 590 if (get_sigset_t(&s, set))
408 return -EFAULT; 591 return -EFAULT;
409 sigset_from_compat(&s, &s32);
410 } 592 }
411 593
412 set_fs(KERNEL_DS); 594 set_fs(KERNEL_DS);
413 /* This is valid because of the set_fs() */ 595 /* This is valid because of the set_fs() */
414 up = (sigset_t __user *) &s; 596 up = (sigset_t __user *) &s;
415 ret = sys_rt_sigprocmask((int)how, set ? up : NULL, oset ? up : NULL, 597 ret = sys_rt_sigprocmask((int)how, set ? up : NULL, oset ? up : NULL,
416 sigsetsize); 598 sigsetsize);
417 set_fs(old_fs); 599 set_fs(old_fs);
418 if (ret) 600 if (ret)
419 return ret; 601 return ret;
420 if (oset) { 602 if (oset) {
421 compat_from_sigset(&s32, &s); 603 if (put_sigset_t(oset, &s))
422 if (copy_to_user (oset, &s32, sizeof(compat_sigset_t)))
423 return -EFAULT; 604 return -EFAULT;
424 } 605 }
425 return 0; 606 return 0;
426} 607}
427 608
428long sys32_rt_sigpending(compat_sigset_t __user *set, compat_size_t sigsetsize) 609long compat_sys_rt_sigpending(compat_sigset_t __user *set, compat_size_t sigsetsize)
429{ 610{
430 sigset_t s; 611 sigset_t s;
431 compat_sigset_t s32;
432 int ret; 612 int ret;
433 mm_segment_t old_fs = get_fs(); 613 mm_segment_t old_fs = get_fs();
434 614
@@ -437,8 +617,7 @@ long sys32_rt_sigpending(compat_sigset_t __user *set, compat_size_t sigsetsize)
437 ret = sys_rt_sigpending((sigset_t __user *) &s, sigsetsize); 617 ret = sys_rt_sigpending((sigset_t __user *) &s, sigsetsize);
438 set_fs(old_fs); 618 set_fs(old_fs);
439 if (!ret) { 619 if (!ret) {
440 compat_from_sigset(&s32, &s); 620 if (put_sigset_t(set, &s))
441 if (copy_to_user (set, &s32, sizeof(compat_sigset_t)))
442 return -EFAULT; 621 return -EFAULT;
443 } 622 }
444 return ret; 623 return ret;
@@ -500,6 +679,8 @@ int copy_siginfo_to_user32(struct compat_siginfo __user *d, siginfo_t *s)
500 return err; 679 return err;
501} 680}
502 681
682#define copy_siginfo_to_user copy_siginfo_to_user32
683
503/* 684/*
504 * Note: it is necessary to treat pid and sig as unsigned ints, with the 685 * Note: it is necessary to treat pid and sig as unsigned ints, with the
505 * corresponding cast to a signed int to insure that the proper conversion 686 * corresponding cast to a signed int to insure that the proper conversion
@@ -507,12 +688,12 @@ int copy_siginfo_to_user32(struct compat_siginfo __user *d, siginfo_t *s)
507 * (msr in 32-bit mode) and the register representation of a signed int 688 * (msr in 32-bit mode) and the register representation of a signed int
508 * (msr in 64-bit mode) is performed. 689 * (msr in 64-bit mode) is performed.
509 */ 690 */
510long sys32_rt_sigqueueinfo(u32 pid, u32 sig, compat_siginfo_t __user *uinfo) 691long compat_sys_rt_sigqueueinfo(u32 pid, u32 sig, compat_siginfo_t __user *uinfo)
511{ 692{
512 siginfo_t info; 693 siginfo_t info;
513 int ret; 694 int ret;
514 mm_segment_t old_fs = get_fs(); 695 mm_segment_t old_fs = get_fs();
515 696
516 if (copy_from_user (&info, uinfo, 3*sizeof(int)) || 697 if (copy_from_user (&info, uinfo, 3*sizeof(int)) ||
517 copy_from_user (info._sifields._pad, uinfo->_sifields._pad, SI_PAD_SIZE32)) 698 copy_from_user (info._sifields._pad, uinfo->_sifields._pad, SI_PAD_SIZE32))
518 return -EFAULT; 699 return -EFAULT;
@@ -522,58 +703,14 @@ long sys32_rt_sigqueueinfo(u32 pid, u32 sig, compat_siginfo_t __user *uinfo)
522 set_fs (old_fs); 703 set_fs (old_fs);
523 return ret; 704 return ret;
524} 705}
525
526int sys32_rt_sigsuspend(compat_sigset_t __user * unewset, size_t sigsetsize, int p3,
527 int p4, int p6, int p7, struct pt_regs *regs)
528{
529 sigset_t saveset, newset;
530 compat_sigset_t s32;
531
532 /* XXX: Don't preclude handling different sized sigset_t's. */
533 if (sigsetsize != sizeof(sigset_t))
534 return -EINVAL;
535
536 if (copy_from_user(&s32, unewset, sizeof(s32)))
537 return -EFAULT;
538
539 /*
540 * Swap the 2 words of the 64-bit sigset_t (they are stored
541 * in the "wrong" endian in 32-bit user storage).
542 */
543 sigset_from_compat(&newset, &s32);
544
545 sigdelsetmask(&newset, ~_BLOCKABLE);
546 spin_lock_irq(&current->sighand->siglock);
547 saveset = current->blocked;
548 current->blocked = newset;
549 recalc_sigpending();
550 spin_unlock_irq(&current->sighand->siglock);
551
552 regs->result = -EINTR;
553 regs->gpr[3] = EINTR;
554 regs->ccr |= 0x10000000;
555 while (1) {
556 current->state = TASK_INTERRUPTIBLE;
557 schedule();
558 if (do_signal32(&saveset, regs))
559 /*
560 * Returning 0 means we return to userspace via
561 * ret_from_except and thus restore all user
562 * registers from *regs. This is what we need
563 * to do when a signal has been delivered.
564 */
565 return 0;
566 }
567}
568
569/* 706/*
570 * Start Alternate signal stack support 707 * Start Alternate signal stack support
571 * 708 *
572 * System Calls 709 * System Calls
573 * sigaltatck sys32_sigaltstack 710 * sigaltatck compat_sys_sigaltstack
574 */ 711 */
575 712
576int sys32_sigaltstack(u32 __new, u32 __old, int r5, 713int compat_sys_sigaltstack(u32 __new, u32 __old, int r5,
577 int r6, int r7, int r8, struct pt_regs *regs) 714 int r6, int r7, int r8, struct pt_regs *regs)
578{ 715{
579 stack_32_t __user * newstack = (stack_32_t __user *)(long) __new; 716 stack_32_t __user * newstack = (stack_32_t __user *)(long) __new;
@@ -615,76 +752,94 @@ int sys32_sigaltstack(u32 __new, u32 __old, int r5,
615 return -EFAULT; 752 return -EFAULT;
616 return ret; 753 return ret;
617} 754}
755#endif /* CONFIG_PPC64 */
756
618 757
758/*
759 * Restore the user process's signal mask
760 */
761#ifdef CONFIG_PPC64
762extern void restore_sigmask(sigset_t *set);
763#else /* CONFIG_PPC64 */
764static void restore_sigmask(sigset_t *set)
765{
766 sigdelsetmask(set, ~_BLOCKABLE);
767 spin_lock_irq(&current->sighand->siglock);
768 current->blocked = *set;
769 recalc_sigpending();
770 spin_unlock_irq(&current->sighand->siglock);
771}
772#endif
619 773
620/* 774/*
621 * Set up a signal frame for a "real-time" signal handler 775 * Set up a signal frame for a "real-time" signal handler
622 * (one which gets siginfo). 776 * (one which gets siginfo).
623 */ 777 */
624static int handle_rt_signal32(unsigned long sig, struct k_sigaction *ka, 778static int handle_rt_signal(unsigned long sig, struct k_sigaction *ka,
625 siginfo_t *info, sigset_t *oldset, 779 siginfo_t *info, sigset_t *oldset,
626 struct pt_regs * regs, unsigned long newsp) 780 struct pt_regs *regs, unsigned long newsp)
627{ 781{
628 struct rt_sigframe32 __user *rt_sf; 782 struct rt_sigframe __user *rt_sf;
629 struct mcontext32 __user *frame; 783 struct mcontext __user *frame;
630 unsigned long origsp = newsp; 784 unsigned long origsp = newsp;
631 compat_sigset_t c_oldset;
632 785
633 /* Set up Signal Frame */ 786 /* Set up Signal Frame */
634 /* Put a Real Time Context onto stack */ 787 /* Put a Real Time Context onto stack */
635 newsp -= sizeof(*rt_sf); 788 newsp -= sizeof(*rt_sf);
636 rt_sf = (struct rt_sigframe32 __user *)newsp; 789 rt_sf = (struct rt_sigframe __user *)newsp;
637 790
638 /* create a stack frame for the caller of the handler */ 791 /* create a stack frame for the caller of the handler */
639 newsp -= __SIGNAL_FRAMESIZE32 + 16; 792 newsp -= __SIGNAL_FRAMESIZE + 16;
640 793
641 if (!access_ok(VERIFY_WRITE, (void __user *)newsp, origsp - newsp)) 794 if (!access_ok(VERIFY_WRITE, (void __user *)newsp, origsp - newsp))
642 goto badframe; 795 goto badframe;
643 796
644 compat_from_sigset(&c_oldset, oldset);
645
646 /* Put the siginfo & fill in most of the ucontext */ 797 /* Put the siginfo & fill in most of the ucontext */
647 if (copy_siginfo_to_user32(&rt_sf->info, info) 798 if (copy_siginfo_to_user(&rt_sf->info, info)
648 || __put_user(0, &rt_sf->uc.uc_flags) 799 || __put_user(0, &rt_sf->uc.uc_flags)
649 || __put_user(0, &rt_sf->uc.uc_link) 800 || __put_user(0, &rt_sf->uc.uc_link)
650 || __put_user(current->sas_ss_sp, &rt_sf->uc.uc_stack.ss_sp) 801 || __put_user(current->sas_ss_sp, &rt_sf->uc.uc_stack.ss_sp)
651 || __put_user(sas_ss_flags(regs->gpr[1]), 802 || __put_user(sas_ss_flags(regs->gpr[1]),
652 &rt_sf->uc.uc_stack.ss_flags) 803 &rt_sf->uc.uc_stack.ss_flags)
653 || __put_user(current->sas_ss_size, &rt_sf->uc.uc_stack.ss_size) 804 || __put_user(current->sas_ss_size, &rt_sf->uc.uc_stack.ss_size)
654 || __put_user((u32)(u64)&rt_sf->uc.uc_mcontext, &rt_sf->uc.uc_regs) 805 || __put_user(to_user_ptr(&rt_sf->uc.uc_mcontext),
655 || __copy_to_user(&rt_sf->uc.uc_sigmask, &c_oldset, sizeof(c_oldset))) 806 &rt_sf->uc.uc_regs)
807 || put_sigset_t(&rt_sf->uc.uc_sigmask, oldset))
656 goto badframe; 808 goto badframe;
657 809
658 /* Save user registers on the stack */ 810 /* Save user registers on the stack */
659 frame = &rt_sf->uc.uc_mcontext; 811 frame = &rt_sf->uc.uc_mcontext;
660 if (put_user(regs->gpr[1], (u32 __user *)newsp)) 812#ifdef CONFIG_PPC64
661 goto badframe;
662
663 if (vdso32_rt_sigtramp && current->thread.vdso_base) { 813 if (vdso32_rt_sigtramp && current->thread.vdso_base) {
664 if (save_user_regs(regs, frame, 0)) 814 if (save_user_regs(regs, frame, 0))
665 goto badframe; 815 goto badframe;
666 regs->link = current->thread.vdso_base + vdso32_rt_sigtramp; 816 regs->link = current->thread.vdso_base + vdso32_rt_sigtramp;
667 } else { 817 } else
818#endif
819 {
668 if (save_user_regs(regs, frame, __NR_rt_sigreturn)) 820 if (save_user_regs(regs, frame, __NR_rt_sigreturn))
669 goto badframe; 821 goto badframe;
670 regs->link = (unsigned long) frame->tramp; 822 regs->link = (unsigned long) frame->tramp;
671 } 823 }
672 regs->gpr[1] = (unsigned long) newsp; 824 if (put_user(regs->gpr[1], (u32 __user *)newsp))
825 goto badframe;
826 regs->gpr[1] = newsp;
673 regs->gpr[3] = sig; 827 regs->gpr[3] = sig;
674 regs->gpr[4] = (unsigned long) &rt_sf->info; 828 regs->gpr[4] = (unsigned long) &rt_sf->info;
675 regs->gpr[5] = (unsigned long) &rt_sf->uc; 829 regs->gpr[5] = (unsigned long) &rt_sf->uc;
676 regs->gpr[6] = (unsigned long) rt_sf; 830 regs->gpr[6] = (unsigned long) rt_sf;
677 regs->nip = (unsigned long) ka->sa.sa_handler; 831 regs->nip = (unsigned long) ka->sa.sa_handler;
678 regs->trap = 0; 832 regs->trap = 0;
833#ifdef CONFIG_PPC64
679 regs->result = 0; 834 regs->result = 0;
680 835
681 if (test_thread_flag(TIF_SINGLESTEP)) 836 if (test_thread_flag(TIF_SINGLESTEP))
682 ptrace_notify(SIGTRAP); 837 ptrace_notify(SIGTRAP);
683 838#endif
684 return 1; 839 return 1;
685 840
686badframe: 841badframe:
687#if DEBUG_SIG 842#ifdef DEBUG_SIG
688 printk("badframe in handle_rt_signal, regs=%p frame=%p newsp=%lx\n", 843 printk("badframe in handle_rt_signal, regs=%p frame=%p newsp=%lx\n",
689 regs, frame, newsp); 844 regs, frame, newsp);
690#endif 845#endif
@@ -692,46 +847,50 @@ badframe:
692 return 0; 847 return 0;
693} 848}
694 849
695static long do_setcontext32(struct ucontext32 __user *ucp, struct pt_regs *regs, int sig) 850static int do_setcontext(struct ucontext __user *ucp, struct pt_regs *regs, int sig)
696{ 851{
697 compat_sigset_t c_set;
698 sigset_t set; 852 sigset_t set;
699 u32 mcp; 853 struct mcontext __user *mcp;
854
855 if (get_sigset_t(&set, &ucp->uc_sigmask))
856 return -EFAULT;
857#ifdef CONFIG_PPC64
858 {
859 u32 cmcp;
700 860
701 if (__copy_from_user(&c_set, &ucp->uc_sigmask, sizeof(c_set)) 861 if (__get_user(cmcp, &ucp->uc_regs))
702 || __get_user(mcp, &ucp->uc_regs)) 862 return -EFAULT;
863 mcp = (struct mcontext __user *)(u64)cmcp;
864 }
865#else
866 if (__get_user(mcp, &ucp->uc_regs))
703 return -EFAULT; 867 return -EFAULT;
704 sigset_from_compat(&set, &c_set); 868#endif
705 restore_sigmask(&set); 869 restore_sigmask(&set);
706 if (restore_user_regs(regs, (struct mcontext32 __user *)(u64)mcp, sig)) 870 if (restore_user_regs(regs, mcp, sig))
707 return -EFAULT; 871 return -EFAULT;
708 872
709 return 0; 873 return 0;
710} 874}
711 875
712/* 876long sys_swapcontext(struct ucontext __user *old_ctx,
713 * Handle {get,set,swap}_context operations for 32 bits processes 877 struct ucontext __user *new_ctx,
714 */
715
716long sys32_swapcontext(struct ucontext32 __user *old_ctx,
717 struct ucontext32 __user *new_ctx,
718 int ctx_size, int r6, int r7, int r8, struct pt_regs *regs) 878 int ctx_size, int r6, int r7, int r8, struct pt_regs *regs)
719{ 879{
720 unsigned char tmp; 880 unsigned char tmp;
721 compat_sigset_t c_set;
722 881
723 /* Context size is for future use. Right now, we only make sure 882 /* Context size is for future use. Right now, we only make sure
724 * we are passed something we understand 883 * we are passed something we understand
725 */ 884 */
726 if (ctx_size < sizeof(struct ucontext32)) 885 if (ctx_size < sizeof(struct ucontext))
727 return -EINVAL; 886 return -EINVAL;
728 887
729 if (old_ctx != NULL) { 888 if (old_ctx != NULL) {
730 compat_from_sigset(&c_set, &current->blocked);
731 if (!access_ok(VERIFY_WRITE, old_ctx, sizeof(*old_ctx)) 889 if (!access_ok(VERIFY_WRITE, old_ctx, sizeof(*old_ctx))
732 || save_user_regs(regs, &old_ctx->uc_mcontext, 0) 890 || save_user_regs(regs, &old_ctx->uc_mcontext, 0)
733 || __copy_to_user(&old_ctx->uc_sigmask, &c_set, sizeof(c_set)) 891 || put_sigset_t(&old_ctx->uc_sigmask, &current->blocked)
734 || __put_user((u32)(u64)&old_ctx->uc_mcontext, &old_ctx->uc_regs)) 892 || __put_user(to_user_ptr(&old_ctx->uc_mcontext),
893 &old_ctx->uc_regs))
735 return -EFAULT; 894 return -EFAULT;
736 } 895 }
737 if (new_ctx == NULL) 896 if (new_ctx == NULL)
@@ -752,27 +911,26 @@ long sys32_swapcontext(struct ucontext32 __user *old_ctx,
752 * or if another thread unmaps the region containing the context. 911 * or if another thread unmaps the region containing the context.
753 * We kill the task with a SIGSEGV in this situation. 912 * We kill the task with a SIGSEGV in this situation.
754 */ 913 */
755 if (do_setcontext32(new_ctx, regs, 0)) 914 if (do_setcontext(new_ctx, regs, 0))
756 do_exit(SIGSEGV); 915 do_exit(SIGSEGV);
757 916 sigreturn_exit(regs);
917 /* doesn't actually return back to here */
758 return 0; 918 return 0;
759} 919}
760 920
761long sys32_rt_sigreturn(int r3, int r4, int r5, int r6, int r7, int r8, 921long sys_rt_sigreturn(int r3, int r4, int r5, int r6, int r7, int r8,
762 struct pt_regs *regs) 922 struct pt_regs *regs)
763{ 923{
764 struct rt_sigframe32 __user *rt_sf; 924 struct rt_sigframe __user *rt_sf;
765 int ret;
766
767 925
768 /* Always make any pending restarted system calls return -EINTR */ 926 /* Always make any pending restarted system calls return -EINTR */
769 current_thread_info()->restart_block.fn = do_no_restart_syscall; 927 current_thread_info()->restart_block.fn = do_no_restart_syscall;
770 928
771 rt_sf = (struct rt_sigframe32 __user *) 929 rt_sf = (struct rt_sigframe __user *)
772 (regs->gpr[1] + __SIGNAL_FRAMESIZE32 + 16); 930 (regs->gpr[1] + __SIGNAL_FRAMESIZE + 16);
773 if (!access_ok(VERIFY_READ, rt_sf, sizeof(*rt_sf))) 931 if (!access_ok(VERIFY_READ, rt_sf, sizeof(*rt_sf)))
774 goto bad; 932 goto bad;
775 if (do_setcontext32(&rt_sf->uc, regs, 1)) 933 if (do_setcontext(&rt_sf->uc, regs, 1))
776 goto bad; 934 goto bad;
777 935
778 /* 936 /*
@@ -781,62 +939,165 @@ long sys32_rt_sigreturn(int r3, int r4, int r5, int r6, int r7, int r8,
781 * signal return. But other architectures do this and we have 939 * signal return. But other architectures do this and we have
782 * always done it up until now so it is probably better not to 940 * always done it up until now so it is probably better not to
783 * change it. -- paulus 941 * change it. -- paulus
784 * We use the sys32_ version that does the 32/64 bits conversion 942 */
943#ifdef CONFIG_PPC64
944 /*
945 * We use the compat_sys_ version that does the 32/64 bits conversion
785 * and takes userland pointer directly. What about error checking ? 946 * and takes userland pointer directly. What about error checking ?
786 * nobody does any... 947 * nobody does any...
787 */ 948 */
788 sys32_sigaltstack((u32)(u64)&rt_sf->uc.uc_stack, 0, 0, 0, 0, 0, regs); 949 compat_sys_sigaltstack((u32)(u64)&rt_sf->uc.uc_stack, 0, 0, 0, 0, 0, regs);
789 950 return (int)regs->result;
790 ret = regs->result; 951#else
791 952 do_sigaltstack(&rt_sf->uc.uc_stack, NULL, regs->gpr[1]);
792 return ret; 953 sigreturn_exit(regs); /* doesn't return here */
954 return 0;
955#endif
793 956
794 bad: 957 bad:
795 force_sig(SIGSEGV, current); 958 force_sig(SIGSEGV, current);
796 return 0; 959 return 0;
797} 960}
798 961
962#ifdef CONFIG_PPC32
963int sys_debug_setcontext(struct ucontext __user *ctx,
964 int ndbg, struct sig_dbg_op __user *dbg,
965 int r6, int r7, int r8,
966 struct pt_regs *regs)
967{
968 struct sig_dbg_op op;
969 int i;
970 unsigned long new_msr = regs->msr;
971#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
972 unsigned long new_dbcr0 = current->thread.dbcr0;
973#endif
974
975 for (i=0; i<ndbg; i++) {
976 if (__copy_from_user(&op, dbg, sizeof(op)))
977 return -EFAULT;
978 switch (op.dbg_type) {
979 case SIG_DBG_SINGLE_STEPPING:
980#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
981 if (op.dbg_value) {
982 new_msr |= MSR_DE;
983 new_dbcr0 |= (DBCR0_IDM | DBCR0_IC);
984 } else {
985 new_msr &= ~MSR_DE;
986 new_dbcr0 &= ~(DBCR0_IDM | DBCR0_IC);
987 }
988#else
989 if (op.dbg_value)
990 new_msr |= MSR_SE;
991 else
992 new_msr &= ~MSR_SE;
993#endif
994 break;
995 case SIG_DBG_BRANCH_TRACING:
996#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
997 return -EINVAL;
998#else
999 if (op.dbg_value)
1000 new_msr |= MSR_BE;
1001 else
1002 new_msr &= ~MSR_BE;
1003#endif
1004 break;
1005
1006 default:
1007 return -EINVAL;
1008 }
1009 }
1010
1011 /* We wait until here to actually install the values in the
1012 registers so if we fail in the above loop, it will not
1013 affect the contents of these registers. After this point,
1014 failure is a problem, anyway, and it's very unlikely unless
1015 the user is really doing something wrong. */
1016 regs->msr = new_msr;
1017#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
1018 current->thread.dbcr0 = new_dbcr0;
1019#endif
1020
1021 /*
1022 * If we get a fault copying the context into the kernel's
1023 * image of the user's registers, we can't just return -EFAULT
1024 * because the user's registers will be corrupted. For instance
1025 * the NIP value may have been updated but not some of the
1026 * other registers. Given that we have done the access_ok
1027 * and successfully read the first and last bytes of the region
1028 * above, this should only happen in an out-of-memory situation
1029 * or if another thread unmaps the region containing the context.
1030 * We kill the task with a SIGSEGV in this situation.
1031 */
1032 if (do_setcontext(ctx, regs, 1)) {
1033 force_sig(SIGSEGV, current);
1034 goto out;
1035 }
1036
1037 /*
1038 * It's not clear whether or why it is desirable to save the
1039 * sigaltstack setting on signal delivery and restore it on
1040 * signal return. But other architectures do this and we have
1041 * always done it up until now so it is probably better not to
1042 * change it. -- paulus
1043 */
1044 do_sigaltstack(&ctx->uc_stack, NULL, regs->gpr[1]);
1045
1046 sigreturn_exit(regs);
1047 /* doesn't actually return back to here */
1048
1049 out:
1050 return 0;
1051}
1052#endif
799 1053
800/* 1054/*
801 * OK, we're invoking a handler 1055 * OK, we're invoking a handler
802 */ 1056 */
803static int handle_signal32(unsigned long sig, struct k_sigaction *ka, 1057static int handle_signal(unsigned long sig, struct k_sigaction *ka,
804 siginfo_t *info, sigset_t *oldset, 1058 siginfo_t *info, sigset_t *oldset, struct pt_regs *regs,
805 struct pt_regs * regs, unsigned long newsp) 1059 unsigned long newsp)
806{ 1060{
807 struct sigcontext32 __user *sc; 1061 struct sigcontext __user *sc;
808 struct sigregs32 __user *frame; 1062 struct sigregs __user *frame;
809 unsigned long origsp = newsp; 1063 unsigned long origsp = newsp;
810 1064
811 /* Set up Signal Frame */ 1065 /* Set up Signal Frame */
812 newsp -= sizeof(struct sigregs32); 1066 newsp -= sizeof(struct sigregs);
813 frame = (struct sigregs32 __user *) newsp; 1067 frame = (struct sigregs __user *) newsp;
814 1068
815 /* Put a sigcontext on the stack */ 1069 /* Put a sigcontext on the stack */
816 newsp -= sizeof(*sc); 1070 newsp -= sizeof(*sc);
817 sc = (struct sigcontext32 __user *) newsp; 1071 sc = (struct sigcontext __user *) newsp;
818 1072
819 /* create a stack frame for the caller of the handler */ 1073 /* create a stack frame for the caller of the handler */
820 newsp -= __SIGNAL_FRAMESIZE32; 1074 newsp -= __SIGNAL_FRAMESIZE;
821 1075
822 if (!access_ok(VERIFY_WRITE, (void __user *) newsp, origsp - newsp)) 1076 if (!access_ok(VERIFY_WRITE, (void __user *) newsp, origsp - newsp))
823 goto badframe; 1077 goto badframe;
824 1078
825#if _NSIG != 64 1079#if _NSIG != 64
826#error "Please adjust handle_signal32()" 1080#error "Please adjust handle_signal()"
827#endif 1081#endif
828 if (__put_user((u32)(u64)ka->sa.sa_handler, &sc->handler) 1082 if (__put_user(to_user_ptr(ka->sa.sa_handler), &sc->handler)
829 || __put_user(oldset->sig[0], &sc->oldmask) 1083 || __put_user(oldset->sig[0], &sc->oldmask)
1084#ifdef CONFIG_PPC64
830 || __put_user((oldset->sig[0] >> 32), &sc->_unused[3]) 1085 || __put_user((oldset->sig[0] >> 32), &sc->_unused[3])
831 || __put_user((u32)(u64)frame, &sc->regs) 1086#else
1087 || __put_user(oldset->sig[1], &sc->_unused[3])
1088#endif
1089 || __put_user(to_user_ptr(frame), &sc->regs)
832 || __put_user(sig, &sc->signal)) 1090 || __put_user(sig, &sc->signal))
833 goto badframe; 1091 goto badframe;
834 1092
1093#ifdef CONFIG_PPC64
835 if (vdso32_sigtramp && current->thread.vdso_base) { 1094 if (vdso32_sigtramp && current->thread.vdso_base) {
836 if (save_user_regs(regs, &frame->mctx, 0)) 1095 if (save_user_regs(regs, &frame->mctx, 0))
837 goto badframe; 1096 goto badframe;
838 regs->link = current->thread.vdso_base + vdso32_sigtramp; 1097 regs->link = current->thread.vdso_base + vdso32_sigtramp;
839 } else { 1098 } else
1099#endif
1100 {
840 if (save_user_regs(regs, &frame->mctx, __NR_sigreturn)) 1101 if (save_user_regs(regs, &frame->mctx, __NR_sigreturn))
841 goto badframe; 1102 goto badframe;
842 regs->link = (unsigned long) frame->mctx.tramp; 1103 regs->link = (unsigned long) frame->mctx.tramp;
@@ -844,22 +1105,24 @@ static int handle_signal32(unsigned long sig, struct k_sigaction *ka,
844 1105
845 if (put_user(regs->gpr[1], (u32 __user *)newsp)) 1106 if (put_user(regs->gpr[1], (u32 __user *)newsp))
846 goto badframe; 1107 goto badframe;
847 regs->gpr[1] = (unsigned long) newsp; 1108 regs->gpr[1] = newsp;
848 regs->gpr[3] = sig; 1109 regs->gpr[3] = sig;
849 regs->gpr[4] = (unsigned long) sc; 1110 regs->gpr[4] = (unsigned long) sc;
850 regs->nip = (unsigned long) ka->sa.sa_handler; 1111 regs->nip = (unsigned long) ka->sa.sa_handler;
851 regs->trap = 0; 1112 regs->trap = 0;
1113#ifdef CONFIG_PPC64
852 regs->result = 0; 1114 regs->result = 0;
853 1115
854 if (test_thread_flag(TIF_SINGLESTEP)) 1116 if (test_thread_flag(TIF_SINGLESTEP))
855 ptrace_notify(SIGTRAP); 1117 ptrace_notify(SIGTRAP);
1118#endif
856 1119
857 return 1; 1120 return 1;
858 1121
859badframe: 1122badframe:
860#if DEBUG_SIG 1123#ifdef DEBUG_SIG
861 printk("badframe in handle_signal, regs=%p frame=%x newsp=%x\n", 1124 printk("badframe in handle_signal, regs=%p frame=%p newsp=%lx\n",
862 regs, frame, *newspp); 1125 regs, frame, newsp);
863#endif 1126#endif
864 force_sigsegv(sig, current); 1127 force_sigsegv(sig, current);
865 return 0; 1128 return 0;
@@ -868,65 +1131,69 @@ badframe:
868/* 1131/*
869 * Do a signal return; undo the signal stack. 1132 * Do a signal return; undo the signal stack.
870 */ 1133 */
871long sys32_sigreturn(int r3, int r4, int r5, int r6, int r7, int r8, 1134long sys_sigreturn(int r3, int r4, int r5, int r6, int r7, int r8,
872 struct pt_regs *regs) 1135 struct pt_regs *regs)
873{ 1136{
874 struct sigcontext32 __user *sc; 1137 struct sigcontext __user *sc;
875 struct sigcontext32 sigctx; 1138 struct sigcontext sigctx;
876 struct mcontext32 __user *sr; 1139 struct mcontext __user *sr;
877 sigset_t set; 1140 sigset_t set;
878 int ret;
879 1141
880 /* Always make any pending restarted system calls return -EINTR */ 1142 /* Always make any pending restarted system calls return -EINTR */
881 current_thread_info()->restart_block.fn = do_no_restart_syscall; 1143 current_thread_info()->restart_block.fn = do_no_restart_syscall;
882 1144
883 sc = (struct sigcontext32 __user *)(regs->gpr[1] + __SIGNAL_FRAMESIZE32); 1145 sc = (struct sigcontext __user *)(regs->gpr[1] + __SIGNAL_FRAMESIZE);
884 if (copy_from_user(&sigctx, sc, sizeof(sigctx))) 1146 if (copy_from_user(&sigctx, sc, sizeof(sigctx)))
885 goto badframe; 1147 goto badframe;
886 1148
1149#ifdef CONFIG_PPC64
887 /* 1150 /*
888 * Note that PPC32 puts the upper 32 bits of the sigmask in the 1151 * Note that PPC32 puts the upper 32 bits of the sigmask in the
889 * unused part of the signal stackframe 1152 * unused part of the signal stackframe
890 */ 1153 */
891 set.sig[0] = sigctx.oldmask + ((long)(sigctx._unused[3]) << 32); 1154 set.sig[0] = sigctx.oldmask + ((long)(sigctx._unused[3]) << 32);
1155#else
1156 set.sig[0] = sigctx.oldmask;
1157 set.sig[1] = sigctx._unused[3];
1158#endif
892 restore_sigmask(&set); 1159 restore_sigmask(&set);
893 1160
894 sr = (struct mcontext32 __user *)(u64)sigctx.regs; 1161 sr = (struct mcontext __user *)from_user_ptr(sigctx.regs);
895 if (!access_ok(VERIFY_READ, sr, sizeof(*sr)) 1162 if (!access_ok(VERIFY_READ, sr, sizeof(*sr))
896 || restore_user_regs(regs, sr, 1)) 1163 || restore_user_regs(regs, sr, 1))
897 goto badframe; 1164 goto badframe;
898 1165
899 ret = regs->result; 1166#ifdef CONFIG_PPC64
900 return ret; 1167 return (int)regs->result;
1168#else
1169 sigreturn_exit(regs); /* doesn't return */
1170 return 0;
1171#endif
901 1172
902badframe: 1173badframe:
903 force_sig(SIGSEGV, current); 1174 force_sig(SIGSEGV, current);
904 return 0; 1175 return 0;
905} 1176}
906 1177
907
908
909/*
910 * Start of do_signal32 routine
911 *
912 * This routine gets control when a pending signal needs to be processed
913 * in the 32 bit target thread -
914 *
915 * It handles both rt and non-rt signals
916 */
917
918/* 1178/*
919 * Note that 'init' is a special process: it doesn't get signals it doesn't 1179 * Note that 'init' is a special process: it doesn't get signals it doesn't
920 * want to handle. Thus you cannot kill init even with a SIGKILL even by 1180 * want to handle. Thus you cannot kill init even with a SIGKILL even by
921 * mistake. 1181 * mistake.
922 */ 1182 */
923 1183int do_signal(sigset_t *oldset, struct pt_regs *regs)
924int do_signal32(sigset_t *oldset, struct pt_regs *regs)
925{ 1184{
926 siginfo_t info; 1185 siginfo_t info;
1186 struct k_sigaction ka;
927 unsigned int frame, newsp; 1187 unsigned int frame, newsp;
928 int signr, ret; 1188 int signr, ret;
929 struct k_sigaction ka; 1189
1190#ifdef CONFIG_PPC32
1191 if (try_to_freeze()) {
1192 signr = 0;
1193 if (!signal_pending(current))
1194 goto no_signal;
1195 }
1196#endif
930 1197
931 if (!oldset) 1198 if (!oldset)
932 oldset = &current->blocked; 1199 oldset = &current->blocked;
@@ -934,7 +1201,9 @@ int do_signal32(sigset_t *oldset, struct pt_regs *regs)
934 newsp = frame = 0; 1201 newsp = frame = 0;
935 1202
936 signr = get_signal_to_deliver(&info, &ka, regs, NULL); 1203 signr = get_signal_to_deliver(&info, &ka, regs, NULL);
937 1204#ifdef CONFIG_PPC32
1205no_signal:
1206#endif
938 if (TRAP(regs) == 0x0C00 /* System Call! */ 1207 if (TRAP(regs) == 0x0C00 /* System Call! */
939 && regs->ccr & 0x10000000 /* error signalled */ 1208 && regs->ccr & 0x10000000 /* error signalled */
940 && ((ret = regs->gpr[3]) == ERESTARTSYS 1209 && ((ret = regs->gpr[3]) == ERESTARTSYS
@@ -964,12 +1233,13 @@ int do_signal32(sigset_t *oldset, struct pt_regs *regs)
964 return 0; /* no signals delivered */ 1233 return 0; /* no signals delivered */
965 1234
966 if ((ka.sa.sa_flags & SA_ONSTACK) && current->sas_ss_size 1235 if ((ka.sa.sa_flags & SA_ONSTACK) && current->sas_ss_size
967 && (!on_sig_stack(regs->gpr[1]))) 1236 && !on_sig_stack(regs->gpr[1]))
968 newsp = (current->sas_ss_sp + current->sas_ss_size); 1237 newsp = current->sas_ss_sp + current->sas_ss_size;
969 else 1238 else
970 newsp = regs->gpr[1]; 1239 newsp = regs->gpr[1];
971 newsp &= ~0xfUL; 1240 newsp &= ~0xfUL;
972 1241
1242#ifdef CONFIG_PPC64
973 /* 1243 /*
974 * Reenable the DABR before delivering the signal to 1244 * Reenable the DABR before delivering the signal to
975 * user space. The DABR will have been cleared if it 1245 * user space. The DABR will have been cleared if it
@@ -977,12 +1247,13 @@ int do_signal32(sigset_t *oldset, struct pt_regs *regs)
977 */ 1247 */
978 if (current->thread.dabr) 1248 if (current->thread.dabr)
979 set_dabr(current->thread.dabr); 1249 set_dabr(current->thread.dabr);
1250#endif
980 1251
981 /* Whee! Actually deliver the signal. */ 1252 /* Whee! Actually deliver the signal. */
982 if (ka.sa.sa_flags & SA_SIGINFO) 1253 if (ka.sa.sa_flags & SA_SIGINFO)
983 ret = handle_rt_signal32(signr, &ka, &info, oldset, regs, newsp); 1254 ret = handle_rt_signal(signr, &ka, &info, oldset, regs, newsp);
984 else 1255 else
985 ret = handle_signal32(signr, &ka, &info, oldset, regs, newsp); 1256 ret = handle_signal(signr, &ka, &info, oldset, regs, newsp);
986 1257
987 if (ret) { 1258 if (ret) {
988 spin_lock_irq(&current->sighand->siglock); 1259 spin_lock_irq(&current->sighand->siglock);
diff --git a/arch/ppc64/kernel/sys_ppc32.c b/arch/powerpc/kernel/sys_ppc32.c
index e93c13458910..a8210ed5c686 100644
--- a/arch/ppc64/kernel/sys_ppc32.c
+++ b/arch/powerpc/kernel/sys_ppc32.c
@@ -53,8 +53,7 @@
53#include <asm/time.h> 53#include <asm/time.h>
54#include <asm/mmu_context.h> 54#include <asm/mmu_context.h>
55#include <asm/systemcfg.h> 55#include <asm/systemcfg.h>
56 56#include <asm/ppc-pci.h>
57#include "pci.h"
58 57
59/* readdir & getdents */ 58/* readdir & getdents */
60#define NAME_OFFSET(de) ((int) ((de)->d_name - (char __user *) (de))) 59#define NAME_OFFSET(de) ((int) ((de)->d_name - (char __user *) (de)))
@@ -114,96 +113,6 @@ out:
114 return error; 113 return error;
115} 114}
116 115
117struct linux_dirent32 {
118 u32 d_ino;
119 u32 d_off;
120 unsigned short d_reclen;
121 char d_name[1];
122};
123
124struct getdents_callback32 {
125 struct linux_dirent32 __user * current_dir;
126 struct linux_dirent32 __user * previous;
127 int count;
128 int error;
129};
130
131static int filldir(void * __buf, const char * name, int namlen, off_t offset,
132 ino_t ino, unsigned int d_type)
133{
134 struct linux_dirent32 __user * dirent;
135 struct getdents_callback32 * buf = (struct getdents_callback32 *) __buf;
136 int reclen = ROUND_UP(NAME_OFFSET(dirent) + namlen + 2);
137
138 buf->error = -EINVAL; /* only used if we fail.. */
139 if (reclen > buf->count)
140 return -EINVAL;
141 dirent = buf->previous;
142 if (dirent) {
143 if (__put_user(offset, &dirent->d_off))
144 goto efault;
145 }
146 dirent = buf->current_dir;
147 if (__put_user(ino, &dirent->d_ino))
148 goto efault;
149 if (__put_user(reclen, &dirent->d_reclen))
150 goto efault;
151 if (copy_to_user(dirent->d_name, name, namlen))
152 goto efault;
153 if (__put_user(0, dirent->d_name + namlen))
154 goto efault;
155 if (__put_user(d_type, (char __user *) dirent + reclen - 1))
156 goto efault;
157 buf->previous = dirent;
158 dirent = (void __user *)dirent + reclen;
159 buf->current_dir = dirent;
160 buf->count -= reclen;
161 return 0;
162efault:
163 buf->error = -EFAULT;
164 return -EFAULT;
165}
166
167asmlinkage long sys32_getdents(unsigned int fd, struct linux_dirent32 __user *dirent,
168 unsigned int count)
169{
170 struct file * file;
171 struct linux_dirent32 __user * lastdirent;
172 struct getdents_callback32 buf;
173 int error;
174
175 error = -EFAULT;
176 if (!access_ok(VERIFY_WRITE, dirent, count))
177 goto out;
178
179 error = -EBADF;
180 file = fget(fd);
181 if (!file)
182 goto out;
183
184 buf.current_dir = dirent;
185 buf.previous = NULL;
186 buf.count = count;
187 buf.error = 0;
188
189 error = vfs_readdir(file, (filldir_t)filldir, &buf);
190 if (error < 0)
191 goto out_putf;
192 error = buf.error;
193 lastdirent = buf.previous;
194 if (lastdirent) {
195 if (put_user(file->f_pos, &lastdirent->d_off))
196 error = -EFAULT;
197 else
198 error = count - buf.count;
199 }
200
201out_putf:
202 fput(file);
203out:
204 return error;
205}
206
207asmlinkage long ppc32_select(u32 n, compat_ulong_t __user *inp, 116asmlinkage long ppc32_select(u32 n, compat_ulong_t __user *inp,
208 compat_ulong_t __user *outp, compat_ulong_t __user *exp, 117 compat_ulong_t __user *outp, compat_ulong_t __user *exp,
209 compat_uptr_t tvp_x) 118 compat_uptr_t tvp_x)
@@ -248,7 +157,7 @@ int cp_compat_stat(struct kstat *stat, struct compat_stat __user *statbuf)
248 * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode) 157 * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
249 * and the register representation of a signed int (msr in 64-bit mode) is performed. 158 * and the register representation of a signed int (msr in 64-bit mode) is performed.
250 */ 159 */
251asmlinkage long sys32_sysfs(u32 option, u32 arg1, u32 arg2) 160asmlinkage long compat_sys_sysfs(u32 option, u32 arg1, u32 arg2)
252{ 161{
253 return sys_sysfs((int)option, arg1, arg2); 162 return sys_sysfs((int)option, arg1, arg2);
254} 163}
@@ -270,7 +179,7 @@ struct timex32 {
270extern int do_adjtimex(struct timex *); 179extern int do_adjtimex(struct timex *);
271extern void ppc_adjtimex(void); 180extern void ppc_adjtimex(void);
272 181
273asmlinkage long sys32_adjtimex(struct timex32 __user *utp) 182asmlinkage long compat_sys_adjtimex(struct timex32 __user *utp)
274{ 183{
275 struct timex txc; 184 struct timex txc;
276 int ret; 185 int ret;
@@ -329,7 +238,7 @@ asmlinkage long sys32_adjtimex(struct timex32 __user *utp)
329 return ret; 238 return ret;
330} 239}
331 240
332asmlinkage long sys32_pause(void) 241asmlinkage long compat_sys_pause(void)
333{ 242{
334 current->state = TASK_INTERRUPTIBLE; 243 current->state = TASK_INTERRUPTIBLE;
335 schedule(); 244 schedule();
@@ -375,7 +284,7 @@ struct sysinfo32 {
375 char _f[20-2*sizeof(int)-sizeof(int)]; 284 char _f[20-2*sizeof(int)-sizeof(int)];
376}; 285};
377 286
378asmlinkage long sys32_sysinfo(struct sysinfo32 __user *info) 287asmlinkage long compat_sys_sysinfo(struct sysinfo32 __user *info)
379{ 288{
380 struct sysinfo s; 289 struct sysinfo s;
381 int ret, err; 290 int ret, err;
@@ -432,7 +341,7 @@ asmlinkage long sys32_sysinfo(struct sysinfo32 __user *info)
432 sorts of things, like timeval and itimerval. */ 341 sorts of things, like timeval and itimerval. */
433extern struct timezone sys_tz; 342extern struct timezone sys_tz;
434 343
435asmlinkage long sys32_gettimeofday(struct compat_timeval __user *tv, struct timezone __user *tz) 344asmlinkage long compat_sys_gettimeofday(struct compat_timeval __user *tv, struct timezone __user *tz)
436{ 345{
437 if (tv) { 346 if (tv) {
438 struct timeval ktv; 347 struct timeval ktv;
@@ -450,7 +359,7 @@ asmlinkage long sys32_gettimeofday(struct compat_timeval __user *tv, struct time
450 359
451 360
452 361
453asmlinkage long sys32_settimeofday(struct compat_timeval __user *tv, struct timezone __user *tz) 362asmlinkage long compat_sys_settimeofday(struct compat_timeval __user *tv, struct timezone __user *tz)
454{ 363{
455 struct timespec kts; 364 struct timespec kts;
456 struct timezone ktz; 365 struct timezone ktz;
@@ -468,7 +377,7 @@ asmlinkage long sys32_settimeofday(struct compat_timeval __user *tv, struct time
468} 377}
469 378
470#ifdef CONFIG_SYSVIPC 379#ifdef CONFIG_SYSVIPC
471long sys32_ipc(u32 call, u32 first, u32 second, u32 third, compat_uptr_t ptr, 380long compat_sys_ipc(u32 call, u32 first, u32 second, u32 third, compat_uptr_t ptr,
472 u32 fifth) 381 u32 fifth)
473{ 382{
474 int version; 383 int version;
@@ -539,7 +448,7 @@ long sys32_ipc(u32 call, u32 first, u32 second, u32 third, compat_uptr_t ptr,
539 * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode) 448 * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
540 * and the register representation of a signed int (msr in 64-bit mode) is performed. 449 * and the register representation of a signed int (msr in 64-bit mode) is performed.
541 */ 450 */
542asmlinkage long sys32_sendfile(u32 out_fd, u32 in_fd, compat_off_t __user * offset, u32 count) 451asmlinkage long compat_sys_sendfile(u32 out_fd, u32 in_fd, compat_off_t __user * offset, u32 count)
543{ 452{
544 mm_segment_t old_fs = get_fs(); 453 mm_segment_t old_fs = get_fs();
545 int ret; 454 int ret;
@@ -561,7 +470,7 @@ asmlinkage long sys32_sendfile(u32 out_fd, u32 in_fd, compat_off_t __user * offs
561 return ret; 470 return ret;
562} 471}
563 472
564asmlinkage int sys32_sendfile64(int out_fd, int in_fd, compat_loff_t __user *offset, s32 count) 473asmlinkage int compat_sys_sendfile64(int out_fd, int in_fd, compat_loff_t __user *offset, s32 count)
565{ 474{
566 mm_segment_t old_fs = get_fs(); 475 mm_segment_t old_fs = get_fs();
567 int ret; 476 int ret;
@@ -583,7 +492,7 @@ asmlinkage int sys32_sendfile64(int out_fd, int in_fd, compat_loff_t __user *off
583 return ret; 492 return ret;
584} 493}
585 494
586long sys32_execve(unsigned long a0, unsigned long a1, unsigned long a2, 495long compat_sys_execve(unsigned long a0, unsigned long a1, unsigned long a2,
587 unsigned long a3, unsigned long a4, unsigned long a5, 496 unsigned long a3, unsigned long a4, unsigned long a5,
588 struct pt_regs *regs) 497 struct pt_regs *regs)
589{ 498{
@@ -610,58 +519,12 @@ out:
610 return error; 519 return error;
611} 520}
612 521
613/* Set up a thread for executing a new program. */
614void start_thread32(struct pt_regs* regs, unsigned long nip, unsigned long sp)
615{
616 set_fs(USER_DS);
617
618 /*
619 * If we exec out of a kernel thread then thread.regs will not be
620 * set. Do it now.
621 */
622 if (!current->thread.regs) {
623 unsigned long childregs = (unsigned long)current->thread_info +
624 THREAD_SIZE;
625 childregs -= sizeof(struct pt_regs);
626 current->thread.regs = (struct pt_regs *)childregs;
627 }
628
629 /*
630 * ELF_PLAT_INIT already clears all registers but it also sets r2.
631 * So just clear r2 here.
632 */
633 regs->gpr[2] = 0;
634
635 regs->nip = nip;
636 regs->gpr[1] = sp;
637 regs->msr = MSR_USER32;
638#ifndef CONFIG_SMP
639 if (last_task_used_math == current)
640 last_task_used_math = 0;
641#endif /* CONFIG_SMP */
642 current->thread.fpscr = 0;
643 memset(current->thread.fpr, 0, sizeof(current->thread.fpr));
644#ifdef CONFIG_ALTIVEC
645#ifndef CONFIG_SMP
646 if (last_task_used_altivec == current)
647 last_task_used_altivec = 0;
648#endif /* CONFIG_SMP */
649 memset(current->thread.vr, 0, sizeof(current->thread.vr));
650 current->thread.vscr.u[0] = 0;
651 current->thread.vscr.u[1] = 0;
652 current->thread.vscr.u[2] = 0;
653 current->thread.vscr.u[3] = 0x00010000; /* Java mode disabled */
654 current->thread.vrsave = 0;
655 current->thread.used_vr = 0;
656#endif /* CONFIG_ALTIVEC */
657}
658
659/* Note: it is necessary to treat option as an unsigned int, 522/* Note: it is necessary to treat option as an unsigned int,
660 * with the corresponding cast to a signed int to insure that the 523 * with the corresponding cast to a signed int to insure that the
661 * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode) 524 * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
662 * and the register representation of a signed int (msr in 64-bit mode) is performed. 525 * and the register representation of a signed int (msr in 64-bit mode) is performed.
663 */ 526 */
664asmlinkage long sys32_prctl(u32 option, u32 arg2, u32 arg3, u32 arg4, u32 arg5) 527asmlinkage long compat_sys_prctl(u32 option, u32 arg2, u32 arg3, u32 arg4, u32 arg5)
665{ 528{
666 return sys_prctl((int)option, 529 return sys_prctl((int)option,
667 (unsigned long) arg2, 530 (unsigned long) arg2,
@@ -675,7 +538,7 @@ asmlinkage long sys32_prctl(u32 option, u32 arg2, u32 arg3, u32 arg4, u32 arg5)
675 * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode) 538 * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
676 * and the register representation of a signed int (msr in 64-bit mode) is performed. 539 * and the register representation of a signed int (msr in 64-bit mode) is performed.
677 */ 540 */
678asmlinkage long sys32_sched_rr_get_interval(u32 pid, struct compat_timespec __user *interval) 541asmlinkage long compat_sys_sched_rr_get_interval(u32 pid, struct compat_timespec __user *interval)
679{ 542{
680 struct timespec t; 543 struct timespec t;
681 int ret; 544 int ret;
@@ -690,7 +553,7 @@ asmlinkage long sys32_sched_rr_get_interval(u32 pid, struct compat_timespec __us
690 return ret; 553 return ret;
691} 554}
692 555
693asmlinkage int sys32_pciconfig_read(u32 bus, u32 dfn, u32 off, u32 len, u32 ubuf) 556asmlinkage int compat_sys_pciconfig_read(u32 bus, u32 dfn, u32 off, u32 len, u32 ubuf)
694{ 557{
695 return sys_pciconfig_read((unsigned long) bus, 558 return sys_pciconfig_read((unsigned long) bus,
696 (unsigned long) dfn, 559 (unsigned long) dfn,
@@ -699,7 +562,7 @@ asmlinkage int sys32_pciconfig_read(u32 bus, u32 dfn, u32 off, u32 len, u32 ubuf
699 compat_ptr(ubuf)); 562 compat_ptr(ubuf));
700} 563}
701 564
702asmlinkage int sys32_pciconfig_write(u32 bus, u32 dfn, u32 off, u32 len, u32 ubuf) 565asmlinkage int compat_sys_pciconfig_write(u32 bus, u32 dfn, u32 off, u32 len, u32 ubuf)
703{ 566{
704 return sys_pciconfig_write((unsigned long) bus, 567 return sys_pciconfig_write((unsigned long) bus,
705 (unsigned long) dfn, 568 (unsigned long) dfn,
@@ -708,7 +571,7 @@ asmlinkage int sys32_pciconfig_write(u32 bus, u32 dfn, u32 off, u32 len, u32 ubu
708 compat_ptr(ubuf)); 571 compat_ptr(ubuf));
709} 572}
710 573
711asmlinkage int sys32_pciconfig_iobase(u32 which, u32 in_bus, u32 in_devfn) 574asmlinkage int compat_sys_pciconfig_iobase(u32 which, u32 in_bus, u32 in_devfn)
712{ 575{
713 return sys_pciconfig_iobase(which, in_bus, in_devfn); 576 return sys_pciconfig_iobase(which, in_bus, in_devfn);
714} 577}
@@ -719,7 +582,7 @@ asmlinkage int sys32_pciconfig_iobase(u32 which, u32 in_bus, u32 in_devfn)
719 * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode) 582 * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
720 * and the register representation of a signed int (msr in 64-bit mode) is performed. 583 * and the register representation of a signed int (msr in 64-bit mode) is performed.
721 */ 584 */
722asmlinkage long sys32_access(const char __user * filename, u32 mode) 585asmlinkage long compat_sys_access(const char __user * filename, u32 mode)
723{ 586{
724 return sys_access(filename, (int)mode); 587 return sys_access(filename, (int)mode);
725} 588}
@@ -730,7 +593,7 @@ asmlinkage long sys32_access(const char __user * filename, u32 mode)
730 * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode) 593 * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
731 * and the register representation of a signed int (msr in 64-bit mode) is performed. 594 * and the register representation of a signed int (msr in 64-bit mode) is performed.
732 */ 595 */
733asmlinkage long sys32_creat(const char __user * pathname, u32 mode) 596asmlinkage long compat_sys_creat(const char __user * pathname, u32 mode)
734{ 597{
735 return sys_creat(pathname, (int)mode); 598 return sys_creat(pathname, (int)mode);
736} 599}
@@ -741,7 +604,7 @@ asmlinkage long sys32_creat(const char __user * pathname, u32 mode)
741 * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode) 604 * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
742 * and the register representation of a signed int (msr in 64-bit mode) is performed. 605 * and the register representation of a signed int (msr in 64-bit mode) is performed.
743 */ 606 */
744asmlinkage long sys32_waitpid(u32 pid, unsigned int __user * stat_addr, u32 options) 607asmlinkage long compat_sys_waitpid(u32 pid, unsigned int __user * stat_addr, u32 options)
745{ 608{
746 return sys_waitpid((int)pid, stat_addr, (int)options); 609 return sys_waitpid((int)pid, stat_addr, (int)options);
747} 610}
@@ -752,7 +615,7 @@ asmlinkage long sys32_waitpid(u32 pid, unsigned int __user * stat_addr, u32 opti
752 * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode) 615 * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
753 * and the register representation of a signed int (msr in 64-bit mode) is performed. 616 * and the register representation of a signed int (msr in 64-bit mode) is performed.
754 */ 617 */
755asmlinkage long sys32_getgroups(u32 gidsetsize, gid_t __user *grouplist) 618asmlinkage long compat_sys_getgroups(u32 gidsetsize, gid_t __user *grouplist)
756{ 619{
757 return sys_getgroups((int)gidsetsize, grouplist); 620 return sys_getgroups((int)gidsetsize, grouplist);
758} 621}
@@ -763,7 +626,7 @@ asmlinkage long sys32_getgroups(u32 gidsetsize, gid_t __user *grouplist)
763 * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode) 626 * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
764 * and the register representation of a signed int (msr in 64-bit mode) is performed. 627 * and the register representation of a signed int (msr in 64-bit mode) is performed.
765 */ 628 */
766asmlinkage long sys32_getpgid(u32 pid) 629asmlinkage long compat_sys_getpgid(u32 pid)
767{ 630{
768 return sys_getpgid((int)pid); 631 return sys_getpgid((int)pid);
769} 632}
@@ -775,7 +638,7 @@ asmlinkage long sys32_getpgid(u32 pid)
775 * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode) 638 * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
776 * and the register representation of a signed int (msr in 64-bit mode) is performed. 639 * and the register representation of a signed int (msr in 64-bit mode) is performed.
777 */ 640 */
778asmlinkage long sys32_getsid(u32 pid) 641asmlinkage long compat_sys_getsid(u32 pid)
779{ 642{
780 return sys_getsid((int)pid); 643 return sys_getsid((int)pid);
781} 644}
@@ -786,7 +649,7 @@ asmlinkage long sys32_getsid(u32 pid)
786 * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode) 649 * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
787 * and the register representation of a signed int (msr in 64-bit mode) is performed. 650 * and the register representation of a signed int (msr in 64-bit mode) is performed.
788 */ 651 */
789asmlinkage long sys32_kill(u32 pid, u32 sig) 652asmlinkage long compat_sys_kill(u32 pid, u32 sig)
790{ 653{
791 return sys_kill((int)pid, (int)sig); 654 return sys_kill((int)pid, (int)sig);
792} 655}
@@ -797,12 +660,12 @@ asmlinkage long sys32_kill(u32 pid, u32 sig)
797 * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode) 660 * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
798 * and the register representation of a signed int (msr in 64-bit mode) is performed. 661 * and the register representation of a signed int (msr in 64-bit mode) is performed.
799 */ 662 */
800asmlinkage long sys32_mkdir(const char __user * pathname, u32 mode) 663asmlinkage long compat_sys_mkdir(const char __user * pathname, u32 mode)
801{ 664{
802 return sys_mkdir(pathname, (int)mode); 665 return sys_mkdir(pathname, (int)mode);
803} 666}
804 667
805long sys32_nice(u32 increment) 668long compat_sys_nice(u32 increment)
806{ 669{
807 /* sign extend increment */ 670 /* sign extend increment */
808 return sys_nice((int)increment); 671 return sys_nice((int)increment);
@@ -819,7 +682,7 @@ off_t ppc32_lseek(unsigned int fd, u32 offset, unsigned int origin)
819 * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode) 682 * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
820 * and the register representation of a signed int (msr in 64-bit mode) is performed. 683 * and the register representation of a signed int (msr in 64-bit mode) is performed.
821 */ 684 */
822asmlinkage long sys32_readlink(const char __user * path, char __user * buf, u32 bufsiz) 685asmlinkage long compat_sys_readlink(const char __user * path, char __user * buf, u32 bufsiz)
823{ 686{
824 return sys_readlink(path, buf, (int)bufsiz); 687 return sys_readlink(path, buf, (int)bufsiz);
825} 688}
@@ -829,7 +692,7 @@ asmlinkage long sys32_readlink(const char __user * path, char __user * buf, u32
829 * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode) 692 * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
830 * and the register representation of a signed int (msr in 64-bit mode) is performed. 693 * and the register representation of a signed int (msr in 64-bit mode) is performed.
831 */ 694 */
832asmlinkage long sys32_sched_get_priority_max(u32 policy) 695asmlinkage long compat_sys_sched_get_priority_max(u32 policy)
833{ 696{
834 return sys_sched_get_priority_max((int)policy); 697 return sys_sched_get_priority_max((int)policy);
835} 698}
@@ -840,7 +703,7 @@ asmlinkage long sys32_sched_get_priority_max(u32 policy)
840 * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode) 703 * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
841 * and the register representation of a signed int (msr in 64-bit mode) is performed. 704 * and the register representation of a signed int (msr in 64-bit mode) is performed.
842 */ 705 */
843asmlinkage long sys32_sched_get_priority_min(u32 policy) 706asmlinkage long compat_sys_sched_get_priority_min(u32 policy)
844{ 707{
845 return sys_sched_get_priority_min((int)policy); 708 return sys_sched_get_priority_min((int)policy);
846} 709}
@@ -851,7 +714,7 @@ asmlinkage long sys32_sched_get_priority_min(u32 policy)
851 * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode) 714 * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
852 * and the register representation of a signed int (msr in 64-bit mode) is performed. 715 * and the register representation of a signed int (msr in 64-bit mode) is performed.
853 */ 716 */
854asmlinkage long sys32_sched_getparam(u32 pid, struct sched_param __user *param) 717asmlinkage long compat_sys_sched_getparam(u32 pid, struct sched_param __user *param)
855{ 718{
856 return sys_sched_getparam((int)pid, param); 719 return sys_sched_getparam((int)pid, param);
857} 720}
@@ -862,7 +725,7 @@ asmlinkage long sys32_sched_getparam(u32 pid, struct sched_param __user *param)
862 * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode) 725 * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
863 * and the register representation of a signed int (msr in 64-bit mode) is performed. 726 * and the register representation of a signed int (msr in 64-bit mode) is performed.
864 */ 727 */
865asmlinkage long sys32_sched_getscheduler(u32 pid) 728asmlinkage long compat_sys_sched_getscheduler(u32 pid)
866{ 729{
867 return sys_sched_getscheduler((int)pid); 730 return sys_sched_getscheduler((int)pid);
868} 731}
@@ -873,7 +736,7 @@ asmlinkage long sys32_sched_getscheduler(u32 pid)
873 * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode) 736 * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
874 * and the register representation of a signed int (msr in 64-bit mode) is performed. 737 * and the register representation of a signed int (msr in 64-bit mode) is performed.
875 */ 738 */
876asmlinkage long sys32_sched_setparam(u32 pid, struct sched_param __user *param) 739asmlinkage long compat_sys_sched_setparam(u32 pid, struct sched_param __user *param)
877{ 740{
878 return sys_sched_setparam((int)pid, param); 741 return sys_sched_setparam((int)pid, param);
879} 742}
@@ -884,7 +747,7 @@ asmlinkage long sys32_sched_setparam(u32 pid, struct sched_param __user *param)
884 * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode) 747 * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
885 * and the register representation of a signed int (msr in 64-bit mode) is performed. 748 * and the register representation of a signed int (msr in 64-bit mode) is performed.
886 */ 749 */
887asmlinkage long sys32_sched_setscheduler(u32 pid, u32 policy, struct sched_param __user *param) 750asmlinkage long compat_sys_sched_setscheduler(u32 pid, u32 policy, struct sched_param __user *param)
888{ 751{
889 return sys_sched_setscheduler((int)pid, (int)policy, param); 752 return sys_sched_setscheduler((int)pid, (int)policy, param);
890} 753}
@@ -895,7 +758,7 @@ asmlinkage long sys32_sched_setscheduler(u32 pid, u32 policy, struct sched_param
895 * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode) 758 * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
896 * and the register representation of a signed int (msr in 64-bit mode) is performed. 759 * and the register representation of a signed int (msr in 64-bit mode) is performed.
897 */ 760 */
898asmlinkage long sys32_setdomainname(char __user *name, u32 len) 761asmlinkage long compat_sys_setdomainname(char __user *name, u32 len)
899{ 762{
900 return sys_setdomainname(name, (int)len); 763 return sys_setdomainname(name, (int)len);
901} 764}
@@ -906,13 +769,13 @@ asmlinkage long sys32_setdomainname(char __user *name, u32 len)
906 * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode) 769 * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
907 * and the register representation of a signed int (msr in 64-bit mode) is performed. 770 * and the register representation of a signed int (msr in 64-bit mode) is performed.
908 */ 771 */
909asmlinkage long sys32_setgroups(u32 gidsetsize, gid_t __user *grouplist) 772asmlinkage long compat_sys_setgroups(u32 gidsetsize, gid_t __user *grouplist)
910{ 773{
911 return sys_setgroups((int)gidsetsize, grouplist); 774 return sys_setgroups((int)gidsetsize, grouplist);
912} 775}
913 776
914 777
915asmlinkage long sys32_sethostname(char __user *name, u32 len) 778asmlinkage long compat_sys_sethostname(char __user *name, u32 len)
916{ 779{
917 /* sign extend len */ 780 /* sign extend len */
918 return sys_sethostname(name, (int)len); 781 return sys_sethostname(name, (int)len);
@@ -924,30 +787,30 @@ asmlinkage long sys32_sethostname(char __user *name, u32 len)
924 * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode) 787 * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
925 * and the register representation of a signed int (msr in 64-bit mode) is performed. 788 * and the register representation of a signed int (msr in 64-bit mode) is performed.
926 */ 789 */
927asmlinkage long sys32_setpgid(u32 pid, u32 pgid) 790asmlinkage long compat_sys_setpgid(u32 pid, u32 pgid)
928{ 791{
929 return sys_setpgid((int)pid, (int)pgid); 792 return sys_setpgid((int)pid, (int)pgid);
930} 793}
931 794
932long sys32_getpriority(u32 which, u32 who) 795long compat_sys_getpriority(u32 which, u32 who)
933{ 796{
934 /* sign extend which and who */ 797 /* sign extend which and who */
935 return sys_getpriority((int)which, (int)who); 798 return sys_getpriority((int)which, (int)who);
936} 799}
937 800
938long sys32_setpriority(u32 which, u32 who, u32 niceval) 801long compat_sys_setpriority(u32 which, u32 who, u32 niceval)
939{ 802{
940 /* sign extend which, who and niceval */ 803 /* sign extend which, who and niceval */
941 return sys_setpriority((int)which, (int)who, (int)niceval); 804 return sys_setpriority((int)which, (int)who, (int)niceval);
942} 805}
943 806
944long sys32_ioprio_get(u32 which, u32 who) 807long compat_sys_ioprio_get(u32 which, u32 who)
945{ 808{
946 /* sign extend which and who */ 809 /* sign extend which and who */
947 return sys_ioprio_get((int)which, (int)who); 810 return sys_ioprio_get((int)which, (int)who);
948} 811}
949 812
950long sys32_ioprio_set(u32 which, u32 who, u32 ioprio) 813long compat_sys_ioprio_set(u32 which, u32 who, u32 ioprio)
951{ 814{
952 /* sign extend which, who and ioprio */ 815 /* sign extend which, who and ioprio */
953 return sys_ioprio_set((int)which, (int)who, (int)ioprio); 816 return sys_ioprio_set((int)which, (int)who, (int)ioprio);
@@ -958,12 +821,12 @@ long sys32_ioprio_set(u32 which, u32 who, u32 ioprio)
958 * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode) 821 * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
959 * and the register representation of a signed int (msr in 64-bit mode) is performed. 822 * and the register representation of a signed int (msr in 64-bit mode) is performed.
960 */ 823 */
961asmlinkage long sys32_ssetmask(u32 newmask) 824asmlinkage long compat_sys_ssetmask(u32 newmask)
962{ 825{
963 return sys_ssetmask((int) newmask); 826 return sys_ssetmask((int) newmask);
964} 827}
965 828
966asmlinkage long sys32_syslog(u32 type, char __user * buf, u32 len) 829asmlinkage long compat_sys_syslog(u32 type, char __user * buf, u32 len)
967{ 830{
968 /* sign extend len */ 831 /* sign extend len */
969 return sys_syslog(type, buf, (int)len); 832 return sys_syslog(type, buf, (int)len);
@@ -975,7 +838,7 @@ asmlinkage long sys32_syslog(u32 type, char __user * buf, u32 len)
975 * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode) 838 * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
976 * and the register representation of a signed int (msr in 64-bit mode) is performed. 839 * and the register representation of a signed int (msr in 64-bit mode) is performed.
977 */ 840 */
978asmlinkage long sys32_umask(u32 mask) 841asmlinkage long compat_sys_umask(u32 mask)
979{ 842{
980 return sys_umask((int)mask); 843 return sys_umask((int)mask);
981} 844}
@@ -991,7 +854,7 @@ struct __sysctl_args32 {
991 u32 __unused[4]; 854 u32 __unused[4];
992}; 855};
993 856
994asmlinkage long sys32_sysctl(struct __sysctl_args32 __user *args) 857asmlinkage long compat_sys_sysctl(struct __sysctl_args32 __user *args)
995{ 858{
996 struct __sysctl_args32 tmp; 859 struct __sysctl_args32 tmp;
997 int error; 860 int error;
@@ -1032,55 +895,7 @@ asmlinkage long sys32_sysctl(struct __sysctl_args32 __user *args)
1032} 895}
1033#endif 896#endif
1034 897
1035asmlinkage int sys32_uname(struct old_utsname __user * name) 898unsigned long compat_sys_mmap2(unsigned long addr, size_t len,
1036{
1037 int err = 0;
1038
1039 down_read(&uts_sem);
1040 if (copy_to_user(name, &system_utsname, sizeof(*name)))
1041 err = -EFAULT;
1042 up_read(&uts_sem);
1043 if (!err && personality(current->personality) == PER_LINUX32) {
1044 /* change "ppc64" to "ppc" */
1045 if (__put_user(0, name->machine + 3)
1046 || __put_user(0, name->machine + 4))
1047 err = -EFAULT;
1048 }
1049 return err;
1050}
1051
1052asmlinkage int sys32_olduname(struct oldold_utsname __user * name)
1053{
1054 int error;
1055
1056 if (!access_ok(VERIFY_WRITE,name,sizeof(struct oldold_utsname)))
1057 return -EFAULT;
1058
1059 down_read(&uts_sem);
1060 error = __copy_to_user(&name->sysname,&system_utsname.sysname,__OLD_UTS_LEN);
1061 error |= __put_user(0,name->sysname+__OLD_UTS_LEN);
1062 error |= __copy_to_user(&name->nodename,&system_utsname.nodename,__OLD_UTS_LEN);
1063 error |= __put_user(0,name->nodename+__OLD_UTS_LEN);
1064 error |= __copy_to_user(&name->release,&system_utsname.release,__OLD_UTS_LEN);
1065 error |= __put_user(0,name->release+__OLD_UTS_LEN);
1066 error |= __copy_to_user(&name->version,&system_utsname.version,__OLD_UTS_LEN);
1067 error |= __put_user(0,name->version+__OLD_UTS_LEN);
1068 error |= __copy_to_user(&name->machine,&system_utsname.machine,__OLD_UTS_LEN);
1069 error |= __put_user(0,name->machine+__OLD_UTS_LEN);
1070 if (personality(current->personality) == PER_LINUX32) {
1071 /* change "ppc64" to "ppc" */
1072 error |= __put_user(0, name->machine + 3);
1073 error |= __put_user(0, name->machine + 4);
1074 }
1075
1076 up_read(&uts_sem);
1077
1078 error = error ? -EFAULT : 0;
1079
1080 return error;
1081}
1082
1083unsigned long sys32_mmap2(unsigned long addr, size_t len,
1084 unsigned long prot, unsigned long flags, 899 unsigned long prot, unsigned long flags,
1085 unsigned long fd, unsigned long pgoff) 900 unsigned long fd, unsigned long pgoff)
1086{ 901{
@@ -1088,29 +903,7 @@ unsigned long sys32_mmap2(unsigned long addr, size_t len,
1088 return sys_mmap(addr, len, prot, flags, fd, pgoff << 12); 903 return sys_mmap(addr, len, prot, flags, fd, pgoff << 12);
1089} 904}
1090 905
1091int get_compat_timeval(struct timeval *tv, struct compat_timeval __user *ctv) 906long compat_sys_tgkill(u32 tgid, u32 pid, int sig)
1092{
1093 return (!access_ok(VERIFY_READ, ctv, sizeof(*ctv)) ||
1094 __get_user(tv->tv_sec, &ctv->tv_sec) ||
1095 __get_user(tv->tv_usec, &ctv->tv_usec)) ? -EFAULT : 0;
1096}
1097
1098asmlinkage long sys32_utimes(char __user *filename, struct compat_timeval __user *tvs)
1099{
1100 struct timeval ktvs[2], *ptr;
1101
1102 ptr = NULL;
1103 if (tvs) {
1104 if (get_compat_timeval(&ktvs[0], &tvs[0]) ||
1105 get_compat_timeval(&ktvs[1], &tvs[1]))
1106 return -EFAULT;
1107 ptr = ktvs;
1108 }
1109
1110 return do_utimes(filename, ptr);
1111}
1112
1113long sys32_tgkill(u32 tgid, u32 pid, int sig)
1114{ 907{
1115 /* sign extend tgid, pid */ 908 /* sign extend tgid, pid */
1116 return sys_tgkill((int)tgid, (int)pid, sig); 909 return sys_tgkill((int)tgid, (int)pid, sig);
@@ -1121,30 +914,30 @@ long sys32_tgkill(u32 tgid, u32 pid, int sig)
1121 * The 32 bit ABI passes long longs in an odd even register pair. 914 * The 32 bit ABI passes long longs in an odd even register pair.
1122 */ 915 */
1123 916
1124compat_ssize_t sys32_pread64(unsigned int fd, char __user *ubuf, compat_size_t count, 917compat_ssize_t compat_sys_pread64(unsigned int fd, char __user *ubuf, compat_size_t count,
1125 u32 reg6, u32 poshi, u32 poslo) 918 u32 reg6, u32 poshi, u32 poslo)
1126{ 919{
1127 return sys_pread64(fd, ubuf, count, ((loff_t)poshi << 32) | poslo); 920 return sys_pread64(fd, ubuf, count, ((loff_t)poshi << 32) | poslo);
1128} 921}
1129 922
1130compat_ssize_t sys32_pwrite64(unsigned int fd, char __user *ubuf, compat_size_t count, 923compat_ssize_t compat_sys_pwrite64(unsigned int fd, char __user *ubuf, compat_size_t count,
1131 u32 reg6, u32 poshi, u32 poslo) 924 u32 reg6, u32 poshi, u32 poslo)
1132{ 925{
1133 return sys_pwrite64(fd, ubuf, count, ((loff_t)poshi << 32) | poslo); 926 return sys_pwrite64(fd, ubuf, count, ((loff_t)poshi << 32) | poslo);
1134} 927}
1135 928
1136compat_ssize_t sys32_readahead(int fd, u32 r4, u32 offhi, u32 offlo, u32 count) 929compat_ssize_t compat_sys_readahead(int fd, u32 r4, u32 offhi, u32 offlo, u32 count)
1137{ 930{
1138 return sys_readahead(fd, ((loff_t)offhi << 32) | offlo, count); 931 return sys_readahead(fd, ((loff_t)offhi << 32) | offlo, count);
1139} 932}
1140 933
1141asmlinkage int sys32_truncate64(const char __user * path, u32 reg4, 934asmlinkage int compat_sys_truncate64(const char __user * path, u32 reg4,
1142 unsigned long high, unsigned long low) 935 unsigned long high, unsigned long low)
1143{ 936{
1144 return sys_truncate(path, (high << 32) | low); 937 return sys_truncate(path, (high << 32) | low);
1145} 938}
1146 939
1147asmlinkage int sys32_ftruncate64(unsigned int fd, u32 reg4, unsigned long high, 940asmlinkage int compat_sys_ftruncate64(unsigned int fd, u32 reg4, unsigned long high,
1148 unsigned long low) 941 unsigned long low)
1149{ 942{
1150 return sys_ftruncate(fd, (high << 32) | low); 943 return sys_ftruncate(fd, (high << 32) | low);
@@ -1164,13 +957,6 @@ long ppc32_fadvise64(int fd, u32 unused, u32 offset_high, u32 offset_low,
1164 advice); 957 advice);
1165} 958}
1166 959
1167long ppc32_fadvise64_64(int fd, int advice, u32 offset_high, u32 offset_low,
1168 u32 len_high, u32 len_low)
1169{
1170 return sys_fadvise64(fd, (u64)offset_high << 32 | offset_low,
1171 (u64)len_high << 32 | len_low, advice);
1172}
1173
1174long ppc32_timer_create(clockid_t clock, 960long ppc32_timer_create(clockid_t clock,
1175 struct compat_sigevent __user *ev32, 961 struct compat_sigevent __user *ev32,
1176 timer_t __user *timer_id) 962 timer_t __user *timer_id)
@@ -1203,7 +989,7 @@ long ppc32_timer_create(clockid_t clock,
1203 return err; 989 return err;
1204} 990}
1205 991
1206asmlinkage long sys32_add_key(const char __user *_type, 992asmlinkage long compat_sys_add_key(const char __user *_type,
1207 const char __user *_description, 993 const char __user *_description,
1208 const void __user *_payload, 994 const void __user *_payload,
1209 u32 plen, 995 u32 plen,
@@ -1212,7 +998,7 @@ asmlinkage long sys32_add_key(const char __user *_type,
1212 return sys_add_key(_type, _description, _payload, plen, ringid); 998 return sys_add_key(_type, _description, _payload, plen, ringid);
1213} 999}
1214 1000
1215asmlinkage long sys32_request_key(const char __user *_type, 1001asmlinkage long compat_sys_request_key(const char __user *_type,
1216 const char __user *_description, 1002 const char __user *_description,
1217 const char __user *_callout_info, 1003 const char __user *_callout_info,
1218 u32 destringid) 1004 u32 destringid)
diff --git a/arch/ppc64/kernel/syscalls.c b/arch/powerpc/kernel/syscalls.c
index 05f16633bd2c..f72ced11212d 100644
--- a/arch/ppc64/kernel/syscalls.c
+++ b/arch/powerpc/kernel/syscalls.c
@@ -1,7 +1,6 @@
1/* 1/*
2 * linux/arch/ppc64/kernel/sys_ppc.c 2 * Implementation of various system calls for Linux/PowerPC
3 * 3 *
4 * PowerPC version
5 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) 4 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
6 * 5 *
7 * Derived from "arch/i386/kernel/sys_i386.c" 6 * Derived from "arch/i386/kernel/sys_i386.c"
@@ -52,9 +51,8 @@ extern unsigned long wall_jiffies;
52 * 51 *
53 * This is really horribly ugly. 52 * This is really horribly ugly.
54 */ 53 */
55asmlinkage int 54int sys_ipc(uint call, int first, unsigned long second, long third,
56sys_ipc (uint call, int first, unsigned long second, long third, 55 void __user *ptr, long fifth)
57 void __user *ptr, long fifth)
58{ 56{
59 int version, ret; 57 int version, ret;
60 58
@@ -88,7 +86,7 @@ sys_ipc (uint call, int first, unsigned long second, long third,
88 } 86 }
89 case MSGSND: 87 case MSGSND:
90 ret = sys_msgsnd(first, (struct msgbuf __user *)ptr, 88 ret = sys_msgsnd(first, (struct msgbuf __user *)ptr,
91 (size_t)second, third); 89 (size_t)second, third);
92 break; 90 break;
93 case MSGRCV: 91 case MSGRCV:
94 switch (version) { 92 switch (version) {
@@ -113,41 +111,29 @@ sys_ipc (uint call, int first, unsigned long second, long third,
113 } 111 }
114 break; 112 break;
115 case MSGGET: 113 case MSGGET:
116 ret = sys_msgget ((key_t)first, (int)second); 114 ret = sys_msgget((key_t)first, (int)second);
117 break; 115 break;
118 case MSGCTL: 116 case MSGCTL:
119 ret = sys_msgctl(first, (int)second, 117 ret = sys_msgctl(first, (int)second,
120 (struct msqid_ds __user *)ptr); 118 (struct msqid_ds __user *)ptr);
121 break; 119 break;
122 case SHMAT: 120 case SHMAT: {
123 switch (version) { 121 ulong raddr;
124 default: { 122 ret = do_shmat(first, (char __user *)ptr, (int)second, &raddr);
125 ulong raddr; 123 if (ret)
126 ret = do_shmat(first, (char __user *) ptr,
127 (int)second, &raddr);
128 if (ret)
129 break;
130 ret = put_user (raddr, (ulong __user *) third);
131 break;
132 }
133 case 1: /* iBCS2 emulator entry point */
134 ret = -EINVAL;
135 if (!segment_eq(get_fs(), get_ds()))
136 break;
137 ret = do_shmat(first, (char __user *)ptr,
138 (int)second, (ulong *)third);
139 break; 124 break;
140 } 125 ret = put_user(raddr, (ulong __user *) third);
141 break; 126 break;
142 case SHMDT: 127 }
143 ret = sys_shmdt ((char __user *)ptr); 128 case SHMDT:
129 ret = sys_shmdt((char __user *)ptr);
144 break; 130 break;
145 case SHMGET: 131 case SHMGET:
146 ret = sys_shmget (first, (size_t)second, third); 132 ret = sys_shmget(first, (size_t)second, third);
147 break; 133 break;
148 case SHMCTL: 134 case SHMCTL:
149 ret = sys_shmctl(first, (int)second, 135 ret = sys_shmctl(first, (int)second,
150 (struct shmid_ds __user *)ptr); 136 (struct shmid_ds __user *)ptr);
151 break; 137 break;
152 } 138 }
153 139
@@ -158,43 +144,89 @@ sys_ipc (uint call, int first, unsigned long second, long third,
158 * sys_pipe() is the normal C calling standard for creating 144 * sys_pipe() is the normal C calling standard for creating
159 * a pipe. It's not the way unix traditionally does this, though. 145 * a pipe. It's not the way unix traditionally does this, though.
160 */ 146 */
161asmlinkage int sys_pipe(int __user *fildes) 147int sys_pipe(int __user *fildes)
162{ 148{
163 int fd[2]; 149 int fd[2];
164 int error; 150 int error;
165 151
166 error = do_pipe(fd); 152 error = do_pipe(fd);
167 if (!error) { 153 if (!error) {
168 if (copy_to_user(fildes, fd, 2*sizeof(int))) 154 if (copy_to_user(fildes, fd, 2*sizeof(int)))
169 error = -EFAULT; 155 error = -EFAULT;
170 } 156 }
171
172 return error; 157 return error;
173} 158}
174 159
175unsigned long sys_mmap(unsigned long addr, size_t len, 160static inline unsigned long do_mmap2(unsigned long addr, size_t len,
176 unsigned long prot, unsigned long flags, 161 unsigned long prot, unsigned long flags,
177 unsigned long fd, off_t offset) 162 unsigned long fd, unsigned long off, int shift)
178{ 163{
179 struct file * file = NULL; 164 struct file * file = NULL;
180 unsigned long ret = -EBADF; 165 unsigned long ret = -EINVAL;
181 166
167 if (shift) {
168 if (off & ((1 << shift) - 1))
169 goto out;
170 off >>= shift;
171 }
172
173 ret = -EBADF;
182 if (!(flags & MAP_ANONYMOUS)) { 174 if (!(flags & MAP_ANONYMOUS)) {
183 if (!(file = fget(fd))) 175 if (!(file = fget(fd)))
184 goto out; 176 goto out;
185 } 177 }
186 178
187 flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE); 179 flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
180
188 down_write(&current->mm->mmap_sem); 181 down_write(&current->mm->mmap_sem);
189 ret = do_mmap(file, addr, len, prot, flags, offset); 182 ret = do_mmap_pgoff(file, addr, len, prot, flags, off);
190 up_write(&current->mm->mmap_sem); 183 up_write(&current->mm->mmap_sem);
191 if (file) 184 if (file)
192 fput(file); 185 fput(file);
193
194out: 186out:
195 return ret; 187 return ret;
196} 188}
197 189
190unsigned long sys_mmap2(unsigned long addr, size_t len,
191 unsigned long prot, unsigned long flags,
192 unsigned long fd, unsigned long pgoff)
193{
194 return do_mmap2(addr, len, prot, flags, fd, pgoff, PAGE_SHIFT-12);
195}
196
197unsigned long sys_mmap(unsigned long addr, size_t len,
198 unsigned long prot, unsigned long flags,
199 unsigned long fd, off_t offset)
200{
201 return do_mmap2(addr, len, prot, flags, fd, offset, PAGE_SHIFT);
202}
203
204#ifdef CONFIG_PPC32
205/*
206 * Due to some executables calling the wrong select we sometimes
207 * get wrong args. This determines how the args are being passed
208 * (a single ptr to them all args passed) then calls
209 * sys_select() with the appropriate args. -- Cort
210 */
211int
212ppc_select(int n, fd_set __user *inp, fd_set __user *outp, fd_set __user *exp, struct timeval __user *tvp)
213{
214 if ( (unsigned long)n >= 4096 )
215 {
216 unsigned long __user *buffer = (unsigned long __user *)n;
217 if (!access_ok(VERIFY_READ, buffer, 5*sizeof(unsigned long))
218 || __get_user(n, buffer)
219 || __get_user(inp, ((fd_set __user * __user *)(buffer+1)))
220 || __get_user(outp, ((fd_set __user * __user *)(buffer+2)))
221 || __get_user(exp, ((fd_set __user * __user *)(buffer+3)))
222 || __get_user(tvp, ((struct timeval __user * __user *)(buffer+4))))
223 return -EFAULT;
224 }
225 return sys_select(n, inp, outp, exp, tvp);
226}
227#endif
228
229#ifdef CONFIG_PPC64
198long ppc64_personality(unsigned long personality) 230long ppc64_personality(unsigned long personality)
199{ 231{
200 long ret; 232 long ret;
@@ -207,8 +239,25 @@ long ppc64_personality(unsigned long personality)
207 ret = PER_LINUX; 239 ret = PER_LINUX;
208 return ret; 240 return ret;
209} 241}
242#endif
243
244#ifdef CONFIG_PPC64
245#define OVERRIDE_MACHINE (personality(current->personality) == PER_LINUX32)
246#else
247#define OVERRIDE_MACHINE 0
248#endif
249
250static inline int override_machine(char *mach)
251{
252 if (OVERRIDE_MACHINE) {
253 /* change ppc64 to ppc */
254 if (__put_user(0, mach+3) || __put_user(0, mach+4))
255 return -EFAULT;
256 }
257 return 0;
258}
210 259
211long ppc64_newuname(struct new_utsname __user * name) 260long ppc_newuname(struct new_utsname __user * name)
212{ 261{
213 int err = 0; 262 int err = 0;
214 263
@@ -216,16 +265,54 @@ long ppc64_newuname(struct new_utsname __user * name)
216 if (copy_to_user(name, &system_utsname, sizeof(*name))) 265 if (copy_to_user(name, &system_utsname, sizeof(*name)))
217 err = -EFAULT; 266 err = -EFAULT;
218 up_read(&uts_sem); 267 up_read(&uts_sem);
219 if (!err && personality(current->personality) == PER_LINUX32) { 268 if (!err)
220 /* change ppc64 to ppc */ 269 err = override_machine(name->machine);
221 if (__put_user(0, name->machine + 3)
222 || __put_user(0, name->machine + 4))
223 err = -EFAULT;
224 }
225 return err; 270 return err;
226} 271}
227 272
228asmlinkage time_t sys64_time(time_t __user * tloc) 273int sys_uname(struct old_utsname __user *name)
274{
275 int err = 0;
276
277 down_read(&uts_sem);
278 if (copy_to_user(name, &system_utsname, sizeof(*name)))
279 err = -EFAULT;
280 up_read(&uts_sem);
281 if (!err)
282 err = override_machine(name->machine);
283 return err;
284}
285
286int sys_olduname(struct oldold_utsname __user *name)
287{
288 int error;
289
290 if (!access_ok(VERIFY_WRITE, name, sizeof(struct oldold_utsname)))
291 return -EFAULT;
292
293 down_read(&uts_sem);
294 error = __copy_to_user(&name->sysname, &system_utsname.sysname,
295 __OLD_UTS_LEN);
296 error |= __put_user(0, name->sysname + __OLD_UTS_LEN);
297 error |= __copy_to_user(&name->nodename, &system_utsname.nodename,
298 __OLD_UTS_LEN);
299 error |= __put_user(0, name->nodename + __OLD_UTS_LEN);
300 error |= __copy_to_user(&name->release, &system_utsname.release,
301 __OLD_UTS_LEN);
302 error |= __put_user(0, name->release + __OLD_UTS_LEN);
303 error |= __copy_to_user(&name->version, &system_utsname.version,
304 __OLD_UTS_LEN);
305 error |= __put_user(0, name->version + __OLD_UTS_LEN);
306 error |= __copy_to_user(&name->machine, &system_utsname.machine,
307 __OLD_UTS_LEN);
308 error |= override_machine(name->machine);
309 up_read(&uts_sem);
310
311 return error? -EFAULT: 0;
312}
313
314#ifdef CONFIG_PPC64
315time_t sys64_time(time_t __user * tloc)
229{ 316{
230 time_t secs; 317 time_t secs;
231 time_t usecs; 318 time_t usecs;
@@ -247,6 +334,14 @@ asmlinkage time_t sys64_time(time_t __user * tloc)
247 334
248 return secs; 335 return secs;
249} 336}
337#endif
338
339long ppc_fadvise64_64(int fd, int advice, u32 offset_high, u32 offset_low,
340 u32 len_high, u32 len_low)
341{
342 return sys_fadvise64(fd, (u64)offset_high << 32 | offset_low,
343 (u64)len_high << 32 | len_low, advice);
344}
250 345
251void do_show_syscall(unsigned long r3, unsigned long r4, unsigned long r5, 346void do_show_syscall(unsigned long r3, unsigned long r4, unsigned long r5,
252 unsigned long r6, unsigned long r7, unsigned long r8, 347 unsigned long r6, unsigned long r7, unsigned long r8,
diff --git a/arch/powerpc/kernel/systbl.S b/arch/powerpc/kernel/systbl.S
new file mode 100644
index 000000000000..65eaea91b499
--- /dev/null
+++ b/arch/powerpc/kernel/systbl.S
@@ -0,0 +1,321 @@
1/*
2 * This file contains the table of syscall-handling functions.
3 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
4 *
5 * Largely rewritten by Cort Dougan (cort@cs.nmt.edu)
6 * and Paul Mackerras.
7 *
8 * Adapted for iSeries by Mike Corrigan (mikejc@us.ibm.com)
9 * PPC64 updates by Dave Engebretsen (engebret@us.ibm.com)
10 *
11 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License
13 * as published by the Free Software Foundation; either version
14 * 2 of the License, or (at your option) any later version.
15 */
16
17#include <linux/config.h>
18#include <asm/ppc_asm.h>
19
20#ifdef CONFIG_PPC64
21#define SYSCALL(func) .llong .sys_##func,.sys_##func
22#define COMPAT_SYS(func) .llong .sys_##func,.compat_sys_##func
23#define PPC_SYS(func) .llong .ppc_##func,.ppc_##func
24#define OLDSYS(func) .llong .sys_ni_syscall,.sys_ni_syscall
25#define SYS32ONLY(func) .llong .sys_ni_syscall,.compat_sys_##func
26#define SYSX(f, f3264, f32) .llong .f,.f3264
27#else
28#define SYSCALL(func) .long sys_##func
29#define COMPAT_SYS(func) .long sys_##func
30#define PPC_SYS(func) .long ppc_##func
31#define OLDSYS(func) .long sys_##func
32#define SYS32ONLY(func) .long sys_##func
33#define SYSX(f, f3264, f32) .long f32
34#endif
35
36#ifdef CONFIG_PPC64
37#define sys_sigpending sys_ni_syscall
38#define sys_old_getrlimit sys_ni_syscall
39#else
40#define ppc_rtas sys_ni_syscall
41#endif
42
43_GLOBAL(sys_call_table)
44SYSCALL(restart_syscall)
45SYSCALL(exit)
46PPC_SYS(fork)
47SYSCALL(read)
48SYSCALL(write)
49COMPAT_SYS(open)
50SYSCALL(close)
51COMPAT_SYS(waitpid)
52COMPAT_SYS(creat)
53SYSCALL(link)
54SYSCALL(unlink)
55COMPAT_SYS(execve)
56SYSCALL(chdir)
57SYSX(sys64_time,compat_sys_time,sys_time)
58SYSCALL(mknod)
59SYSCALL(chmod)
60SYSCALL(lchown)
61SYSCALL(ni_syscall)
62OLDSYS(stat)
63SYSX(sys_lseek,ppc32_lseek,sys_lseek)
64SYSCALL(getpid)
65COMPAT_SYS(mount)
66SYSX(sys_ni_syscall,sys_oldumount,sys_oldumount)
67SYSCALL(setuid)
68SYSCALL(getuid)
69COMPAT_SYS(stime)
70COMPAT_SYS(ptrace)
71SYSCALL(alarm)
72OLDSYS(fstat)
73COMPAT_SYS(pause)
74COMPAT_SYS(utime)
75SYSCALL(ni_syscall)
76SYSCALL(ni_syscall)
77COMPAT_SYS(access)
78COMPAT_SYS(nice)
79SYSCALL(ni_syscall)
80SYSCALL(sync)
81COMPAT_SYS(kill)
82SYSCALL(rename)
83COMPAT_SYS(mkdir)
84SYSCALL(rmdir)
85SYSCALL(dup)
86SYSCALL(pipe)
87COMPAT_SYS(times)
88SYSCALL(ni_syscall)
89SYSCALL(brk)
90SYSCALL(setgid)
91SYSCALL(getgid)
92SYSCALL(signal)
93SYSCALL(geteuid)
94SYSCALL(getegid)
95SYSCALL(acct)
96SYSCALL(umount)
97SYSCALL(ni_syscall)
98COMPAT_SYS(ioctl)
99COMPAT_SYS(fcntl)
100SYSCALL(ni_syscall)
101COMPAT_SYS(setpgid)
102SYSCALL(ni_syscall)
103SYSX(sys_ni_syscall,sys_olduname, sys_olduname)
104COMPAT_SYS(umask)
105SYSCALL(chroot)
106SYSCALL(ustat)
107SYSCALL(dup2)
108SYSCALL(getppid)
109SYSCALL(getpgrp)
110SYSCALL(setsid)
111SYS32ONLY(sigaction)
112SYSCALL(sgetmask)
113COMPAT_SYS(ssetmask)
114SYSCALL(setreuid)
115SYSCALL(setregid)
116SYSX(sys_ni_syscall,ppc32_sigsuspend,ppc_sigsuspend)
117COMPAT_SYS(sigpending)
118COMPAT_SYS(sethostname)
119COMPAT_SYS(setrlimit)
120COMPAT_SYS(old_getrlimit)
121COMPAT_SYS(getrusage)
122COMPAT_SYS(gettimeofday)
123COMPAT_SYS(settimeofday)
124COMPAT_SYS(getgroups)
125COMPAT_SYS(setgroups)
126SYSX(sys_ni_syscall,sys_ni_syscall,ppc_select)
127SYSCALL(symlink)
128OLDSYS(lstat)
129COMPAT_SYS(readlink)
130SYSCALL(uselib)
131SYSCALL(swapon)
132SYSCALL(reboot)
133SYSX(sys_ni_syscall,old32_readdir,old_readdir)
134SYSCALL(mmap)
135SYSCALL(munmap)
136SYSCALL(truncate)
137SYSCALL(ftruncate)
138SYSCALL(fchmod)
139SYSCALL(fchown)
140COMPAT_SYS(getpriority)
141COMPAT_SYS(setpriority)
142SYSCALL(ni_syscall)
143COMPAT_SYS(statfs)
144COMPAT_SYS(fstatfs)
145SYSCALL(ni_syscall)
146COMPAT_SYS(socketcall)
147COMPAT_SYS(syslog)
148COMPAT_SYS(setitimer)
149COMPAT_SYS(getitimer)
150COMPAT_SYS(newstat)
151COMPAT_SYS(newlstat)
152COMPAT_SYS(newfstat)
153SYSX(sys_ni_syscall,sys_uname,sys_uname)
154SYSCALL(ni_syscall)
155SYSCALL(vhangup)
156SYSCALL(ni_syscall)
157SYSCALL(ni_syscall)
158COMPAT_SYS(wait4)
159SYSCALL(swapoff)
160COMPAT_SYS(sysinfo)
161COMPAT_SYS(ipc)
162SYSCALL(fsync)
163SYSX(sys_ni_syscall,ppc32_sigreturn,sys_sigreturn)
164PPC_SYS(clone)
165COMPAT_SYS(setdomainname)
166PPC_SYS(newuname)
167SYSCALL(ni_syscall)
168COMPAT_SYS(adjtimex)
169SYSCALL(mprotect)
170SYSX(sys_ni_syscall,compat_sys_sigprocmask,sys_sigprocmask)
171SYSCALL(ni_syscall)
172SYSCALL(init_module)
173SYSCALL(delete_module)
174SYSCALL(ni_syscall)
175SYSCALL(quotactl)
176COMPAT_SYS(getpgid)
177SYSCALL(fchdir)
178SYSCALL(bdflush)
179COMPAT_SYS(sysfs)
180SYSX(ppc64_personality,ppc64_personality,sys_personality)
181SYSCALL(ni_syscall)
182SYSCALL(setfsuid)
183SYSCALL(setfsgid)
184SYSCALL(llseek)
185COMPAT_SYS(getdents)
186SYSX(sys_select,ppc32_select,ppc_select)
187SYSCALL(flock)
188SYSCALL(msync)
189COMPAT_SYS(readv)
190COMPAT_SYS(writev)
191COMPAT_SYS(getsid)
192SYSCALL(fdatasync)
193COMPAT_SYS(sysctl)
194SYSCALL(mlock)
195SYSCALL(munlock)
196SYSCALL(mlockall)
197SYSCALL(munlockall)
198COMPAT_SYS(sched_setparam)
199COMPAT_SYS(sched_getparam)
200COMPAT_SYS(sched_setscheduler)
201COMPAT_SYS(sched_getscheduler)
202SYSCALL(sched_yield)
203COMPAT_SYS(sched_get_priority_max)
204COMPAT_SYS(sched_get_priority_min)
205COMPAT_SYS(sched_rr_get_interval)
206COMPAT_SYS(nanosleep)
207SYSCALL(mremap)
208SYSCALL(setresuid)
209SYSCALL(getresuid)
210SYSCALL(ni_syscall)
211SYSCALL(poll)
212COMPAT_SYS(nfsservctl)
213SYSCALL(setresgid)
214SYSCALL(getresgid)
215COMPAT_SYS(prctl)
216SYSX(ppc64_rt_sigreturn,ppc32_rt_sigreturn,sys_rt_sigreturn)
217COMPAT_SYS(rt_sigaction)
218COMPAT_SYS(rt_sigprocmask)
219COMPAT_SYS(rt_sigpending)
220COMPAT_SYS(rt_sigtimedwait)
221COMPAT_SYS(rt_sigqueueinfo)
222SYSX(ppc64_rt_sigsuspend,ppc32_rt_sigsuspend,ppc_rt_sigsuspend)
223COMPAT_SYS(pread64)
224COMPAT_SYS(pwrite64)
225SYSCALL(chown)
226SYSCALL(getcwd)
227SYSCALL(capget)
228SYSCALL(capset)
229COMPAT_SYS(sigaltstack)
230SYSX(sys_sendfile64,compat_sys_sendfile,sys_sendfile)
231SYSCALL(ni_syscall)
232SYSCALL(ni_syscall)
233PPC_SYS(vfork)
234COMPAT_SYS(getrlimit)
235COMPAT_SYS(readahead)
236SYS32ONLY(mmap2)
237SYS32ONLY(truncate64)
238SYS32ONLY(ftruncate64)
239SYSX(sys_ni_syscall,sys_stat64,sys_stat64)
240SYSX(sys_ni_syscall,sys_lstat64,sys_lstat64)
241SYSX(sys_ni_syscall,sys_fstat64,sys_fstat64)
242COMPAT_SYS(pciconfig_read)
243COMPAT_SYS(pciconfig_write)
244COMPAT_SYS(pciconfig_iobase)
245SYSCALL(ni_syscall)
246SYSCALL(getdents64)
247SYSCALL(pivot_root)
248SYSX(sys_ni_syscall,compat_sys_fcntl64,sys_fcntl64)
249SYSCALL(madvise)
250SYSCALL(mincore)
251SYSCALL(gettid)
252SYSCALL(tkill)
253SYSCALL(setxattr)
254SYSCALL(lsetxattr)
255SYSCALL(fsetxattr)
256SYSCALL(getxattr)
257SYSCALL(lgetxattr)
258SYSCALL(fgetxattr)
259SYSCALL(listxattr)
260SYSCALL(llistxattr)
261SYSCALL(flistxattr)
262SYSCALL(removexattr)
263SYSCALL(lremovexattr)
264SYSCALL(fremovexattr)
265COMPAT_SYS(futex)
266COMPAT_SYS(sched_setaffinity)
267COMPAT_SYS(sched_getaffinity)
268SYSCALL(ni_syscall)
269SYSCALL(ni_syscall)
270SYS32ONLY(sendfile64)
271COMPAT_SYS(io_setup)
272SYSCALL(io_destroy)
273COMPAT_SYS(io_getevents)
274COMPAT_SYS(io_submit)
275SYSCALL(io_cancel)
276SYSCALL(set_tid_address)
277SYSX(sys_fadvise64,ppc32_fadvise64,sys_fadvise64)
278SYSCALL(exit_group)
279SYSX(sys_lookup_dcookie,ppc32_lookup_dcookie,sys_lookup_dcookie)
280SYSCALL(epoll_create)
281SYSCALL(epoll_ctl)
282SYSCALL(epoll_wait)
283SYSCALL(remap_file_pages)
284SYSX(sys_timer_create,ppc32_timer_create,sys_timer_create)
285COMPAT_SYS(timer_settime)
286COMPAT_SYS(timer_gettime)
287SYSCALL(timer_getoverrun)
288SYSCALL(timer_delete)
289COMPAT_SYS(clock_settime)
290COMPAT_SYS(clock_gettime)
291COMPAT_SYS(clock_getres)
292COMPAT_SYS(clock_nanosleep)
293SYSX(ppc64_swapcontext,ppc32_swapcontext,ppc_swapcontext)
294COMPAT_SYS(tgkill)
295COMPAT_SYS(utimes)
296COMPAT_SYS(statfs64)
297COMPAT_SYS(fstatfs64)
298SYSX(sys_ni_syscall, ppc_fadvise64_64, ppc_fadvise64_64)
299PPC_SYS(rtas)
300OLDSYS(debug_setcontext)
301SYSCALL(ni_syscall)
302SYSCALL(ni_syscall)
303COMPAT_SYS(mbind)
304COMPAT_SYS(get_mempolicy)
305COMPAT_SYS(set_mempolicy)
306COMPAT_SYS(mq_open)
307SYSCALL(mq_unlink)
308COMPAT_SYS(mq_timedsend)
309COMPAT_SYS(mq_timedreceive)
310COMPAT_SYS(mq_notify)
311COMPAT_SYS(mq_getsetattr)
312COMPAT_SYS(kexec_load)
313COMPAT_SYS(add_key)
314COMPAT_SYS(request_key)
315COMPAT_SYS(keyctl)
316COMPAT_SYS(waitid)
317COMPAT_SYS(ioprio_set)
318COMPAT_SYS(ioprio_get)
319SYSCALL(inotify_init)
320SYSCALL(inotify_add_watch)
321SYSCALL(inotify_rm_watch)
diff --git a/arch/ppc64/kernel/time.c b/arch/powerpc/kernel/time.c
index b56c6a324e17..23436b6c1881 100644
--- a/arch/ppc64/kernel/time.c
+++ b/arch/powerpc/kernel/time.c
@@ -1,5 +1,4 @@
1/* 1/*
2 *
3 * Common time routines among all ppc machines. 2 * Common time routines among all ppc machines.
4 * 3 *
5 * Written by Cort Dougan (cort@cs.nmt.edu) to merge 4 * Written by Cort Dougan (cort@cs.nmt.edu) to merge
@@ -44,33 +43,32 @@
44#include <linux/interrupt.h> 43#include <linux/interrupt.h>
45#include <linux/timex.h> 44#include <linux/timex.h>
46#include <linux/kernel_stat.h> 45#include <linux/kernel_stat.h>
47#include <linux/mc146818rtc.h>
48#include <linux/time.h> 46#include <linux/time.h>
49#include <linux/init.h> 47#include <linux/init.h>
50#include <linux/profile.h> 48#include <linux/profile.h>
51#include <linux/cpu.h> 49#include <linux/cpu.h>
52#include <linux/security.h> 50#include <linux/security.h>
51#include <linux/percpu.h>
52#include <linux/rtc.h>
53 53
54#include <asm/io.h> 54#include <asm/io.h>
55#include <asm/processor.h> 55#include <asm/processor.h>
56#include <asm/nvram.h> 56#include <asm/nvram.h>
57#include <asm/cache.h> 57#include <asm/cache.h>
58#include <asm/machdep.h> 58#include <asm/machdep.h>
59#ifdef CONFIG_PPC_ISERIES
60#include <asm/iSeries/ItLpQueue.h>
61#include <asm/iSeries/HvCallXm.h>
62#endif
63#include <asm/uaccess.h> 59#include <asm/uaccess.h>
64#include <asm/time.h> 60#include <asm/time.h>
65#include <asm/ppcdebug.h>
66#include <asm/prom.h> 61#include <asm/prom.h>
67#include <asm/sections.h> 62#include <asm/irq.h>
63#include <asm/div64.h>
64#ifdef CONFIG_PPC64
68#include <asm/systemcfg.h> 65#include <asm/systemcfg.h>
69#include <asm/firmware.h> 66#include <asm/firmware.h>
70 67#endif
71u64 jiffies_64 __cacheline_aligned_in_smp = INITIAL_JIFFIES; 68#ifdef CONFIG_PPC_ISERIES
72 69#include <asm/iSeries/ItLpQueue.h>
73EXPORT_SYMBOL(jiffies_64); 70#include <asm/iSeries/HvCallXm.h>
71#endif
74 72
75/* keep track of when we need to update the rtc */ 73/* keep track of when we need to update the rtc */
76time_t last_rtc_update; 74time_t last_rtc_update;
@@ -81,27 +79,37 @@ unsigned long iSeries_recal_tb = 0;
81static unsigned long first_settimeofday = 1; 79static unsigned long first_settimeofday = 1;
82#endif 80#endif
83 81
82/* The decrementer counts down by 128 every 128ns on a 601. */
83#define DECREMENTER_COUNT_601 (1000000000 / HZ)
84
84#define XSEC_PER_SEC (1024*1024) 85#define XSEC_PER_SEC (1024*1024)
85 86
87#ifdef CONFIG_PPC64
88#define SCALE_XSEC(xsec, max) (((xsec) * max) / XSEC_PER_SEC)
89#else
90/* compute ((xsec << 12) * max) >> 32 */
91#define SCALE_XSEC(xsec, max) mulhwu((xsec) << 12, max)
92#endif
93
86unsigned long tb_ticks_per_jiffy; 94unsigned long tb_ticks_per_jiffy;
87unsigned long tb_ticks_per_usec = 100; /* sane default */ 95unsigned long tb_ticks_per_usec = 100; /* sane default */
88EXPORT_SYMBOL(tb_ticks_per_usec); 96EXPORT_SYMBOL(tb_ticks_per_usec);
89unsigned long tb_ticks_per_sec; 97unsigned long tb_ticks_per_sec;
90unsigned long tb_to_xs; 98u64 tb_to_xs;
91unsigned tb_to_us; 99unsigned tb_to_us;
92unsigned long processor_freq; 100unsigned long processor_freq;
93DEFINE_SPINLOCK(rtc_lock); 101DEFINE_SPINLOCK(rtc_lock);
94EXPORT_SYMBOL_GPL(rtc_lock); 102EXPORT_SYMBOL_GPL(rtc_lock);
95 103
96unsigned long tb_to_ns_scale; 104u64 tb_to_ns_scale;
97unsigned long tb_to_ns_shift; 105unsigned tb_to_ns_shift;
98 106
99struct gettimeofday_struct do_gtod; 107struct gettimeofday_struct do_gtod;
100 108
101extern unsigned long wall_jiffies; 109extern unsigned long wall_jiffies;
102extern int smp_tb_synchronized;
103 110
104extern struct timezone sys_tz; 111extern struct timezone sys_tz;
112static long timezone_offset;
105 113
106void ppc_adjtimex(void); 114void ppc_adjtimex(void);
107 115
@@ -110,6 +118,20 @@ static unsigned adjusting_time = 0;
110unsigned long ppc_proc_freq; 118unsigned long ppc_proc_freq;
111unsigned long ppc_tb_freq; 119unsigned long ppc_tb_freq;
112 120
121#ifdef CONFIG_PPC32 /* XXX for now */
122#define boot_cpuid 0
123#endif
124
125u64 tb_last_jiffy __cacheline_aligned_in_smp;
126unsigned long tb_last_stamp;
127
128/*
129 * Note that on ppc32 this only stores the bottom 32 bits of
130 * the timebase value, but that's enough to tell when a jiffy
131 * has passed.
132 */
133DEFINE_PER_CPU(unsigned long, last_jiffy);
134
113static __inline__ void timer_check_rtc(void) 135static __inline__ void timer_check_rtc(void)
114{ 136{
115 /* 137 /*
@@ -128,31 +150,31 @@ static __inline__ void timer_check_rtc(void)
128 * We should have an rtc call that only sets the minutes and 150 * We should have an rtc call that only sets the minutes and
129 * seconds like on Intel to avoid problems with non UTC clocks. 151 * seconds like on Intel to avoid problems with non UTC clocks.
130 */ 152 */
131 if (ntp_synced() && 153 if (ppc_md.set_rtc_time && ntp_synced() &&
132 xtime.tv_sec - last_rtc_update >= 659 && 154 xtime.tv_sec - last_rtc_update >= 659 &&
133 abs((xtime.tv_nsec/1000) - (1000000-1000000/HZ)) < 500000/HZ && 155 abs((xtime.tv_nsec/1000) - (1000000-1000000/HZ)) < 500000/HZ &&
134 jiffies - wall_jiffies == 1) { 156 jiffies - wall_jiffies == 1) {
135 struct rtc_time tm; 157 struct rtc_time tm;
136 to_tm(xtime.tv_sec+1, &tm); 158 to_tm(xtime.tv_sec + 1 + timezone_offset, &tm);
137 tm.tm_year -= 1900; 159 tm.tm_year -= 1900;
138 tm.tm_mon -= 1; 160 tm.tm_mon -= 1;
139 if (ppc_md.set_rtc_time(&tm) == 0) 161 if (ppc_md.set_rtc_time(&tm) == 0)
140 last_rtc_update = xtime.tv_sec+1; 162 last_rtc_update = xtime.tv_sec + 1;
141 else 163 else
142 /* Try again one minute later */ 164 /* Try again one minute later */
143 last_rtc_update += 60; 165 last_rtc_update += 60;
144 } 166 }
145} 167}
146 168
147/* 169/*
148 * This version of gettimeofday has microsecond resolution. 170 * This version of gettimeofday has microsecond resolution.
149 */ 171 */
150static inline void __do_gettimeofday(struct timeval *tv, unsigned long tb_val) 172static inline void __do_gettimeofday(struct timeval *tv, u64 tb_val)
151{ 173{
152 unsigned long sec, usec, tb_ticks; 174 unsigned long sec, usec;
153 unsigned long xsec, tb_xsec; 175 u64 tb_ticks, xsec;
154 struct gettimeofday_vars * temp_varp; 176 struct gettimeofday_vars *temp_varp;
155 unsigned long temp_tb_to_xs, temp_stamp_xsec; 177 u64 temp_tb_to_xs, temp_stamp_xsec;
156 178
157 /* 179 /*
158 * These calculations are faster (gets rid of divides) 180 * These calculations are faster (gets rid of divides)
@@ -164,11 +186,10 @@ static inline void __do_gettimeofday(struct timeval *tv, unsigned long tb_val)
164 tb_ticks = tb_val - temp_varp->tb_orig_stamp; 186 tb_ticks = tb_val - temp_varp->tb_orig_stamp;
165 temp_tb_to_xs = temp_varp->tb_to_xs; 187 temp_tb_to_xs = temp_varp->tb_to_xs;
166 temp_stamp_xsec = temp_varp->stamp_xsec; 188 temp_stamp_xsec = temp_varp->stamp_xsec;
167 tb_xsec = mulhdu( tb_ticks, temp_tb_to_xs ); 189 xsec = temp_stamp_xsec + mulhdu(tb_ticks, temp_tb_to_xs);
168 xsec = temp_stamp_xsec + tb_xsec;
169 sec = xsec / XSEC_PER_SEC; 190 sec = xsec / XSEC_PER_SEC;
170 xsec -= sec * XSEC_PER_SEC; 191 usec = (unsigned long)xsec & (XSEC_PER_SEC - 1);
171 usec = (xsec * USEC_PER_SEC)/XSEC_PER_SEC; 192 usec = SCALE_XSEC(usec, 1000000);
172 193
173 tv->tv_sec = sec; 194 tv->tv_sec = sec;
174 tv->tv_usec = usec; 195 tv->tv_usec = usec;
@@ -176,6 +197,26 @@ static inline void __do_gettimeofday(struct timeval *tv, unsigned long tb_val)
176 197
177void do_gettimeofday(struct timeval *tv) 198void do_gettimeofday(struct timeval *tv)
178{ 199{
200 if (__USE_RTC()) {
201 /* do this the old way */
202 unsigned long flags, seq;
203 unsigned int sec, nsec, usec, lost;
204
205 do {
206 seq = read_seqbegin_irqsave(&xtime_lock, flags);
207 sec = xtime.tv_sec;
208 nsec = xtime.tv_nsec + tb_ticks_since(tb_last_stamp);
209 lost = jiffies - wall_jiffies;
210 } while (read_seqretry_irqrestore(&xtime_lock, seq, flags));
211 usec = nsec / 1000 + lost * (1000000 / HZ);
212 while (usec >= 1000000) {
213 usec -= 1000000;
214 ++sec;
215 }
216 tv->tv_sec = sec;
217 tv->tv_usec = usec;
218 return;
219 }
179 __do_gettimeofday(tv, get_tb()); 220 __do_gettimeofday(tv, get_tb());
180} 221}
181 222
@@ -185,6 +226,8 @@ EXPORT_SYMBOL(do_gettimeofday);
185 226
186static inline void timer_sync_xtime(unsigned long cur_tb) 227static inline void timer_sync_xtime(unsigned long cur_tb)
187{ 228{
229#ifdef CONFIG_PPC64
230 /* why do we do this? */
188 struct timeval my_tv; 231 struct timeval my_tv;
189 232
190 __do_gettimeofday(&my_tv, cur_tb); 233 __do_gettimeofday(&my_tv, cur_tb);
@@ -193,47 +236,76 @@ static inline void timer_sync_xtime(unsigned long cur_tb)
193 xtime.tv_sec = my_tv.tv_sec; 236 xtime.tv_sec = my_tv.tv_sec;
194 xtime.tv_nsec = my_tv.tv_usec * 1000; 237 xtime.tv_nsec = my_tv.tv_usec * 1000;
195 } 238 }
239#endif
196} 240}
197 241
198/* 242/*
199 * When the timebase - tb_orig_stamp gets too big, we do a manipulation 243 * There are two copies of tb_to_xs and stamp_xsec so that no
200 * between tb_orig_stamp and stamp_xsec. The goal here is to keep the 244 * lock is needed to access and use these values in
201 * difference tb - tb_orig_stamp small enough to always fit inside a 245 * do_gettimeofday. We alternate the copies and as long as a
202 * 32 bits number. This is a requirement of our fast 32 bits userland 246 * reasonable time elapses between changes, there will never
203 * implementation in the vdso. If we "miss" a call to this function 247 * be inconsistent values. ntpd has a minimum of one minute
204 * (interrupt latency, CPU locked in a spinlock, ...) and we end up 248 * between updates.
205 * with a too big difference, then the vdso will fallback to calling
206 * the syscall
207 */ 249 */
208static __inline__ void timer_recalc_offset(unsigned long cur_tb) 250static inline void update_gtod(u64 new_tb_stamp, u64 new_stamp_xsec,
251 u64 new_tb_to_xs)
209{ 252{
210 struct gettimeofday_vars * temp_varp;
211 unsigned temp_idx; 253 unsigned temp_idx;
212 unsigned long offset, new_stamp_xsec, new_tb_orig_stamp; 254 struct gettimeofday_vars *temp_varp;
213
214 if (((cur_tb - do_gtod.varp->tb_orig_stamp) & 0x80000000u) == 0)
215 return;
216 255
217 temp_idx = (do_gtod.var_idx == 0); 256 temp_idx = (do_gtod.var_idx == 0);
218 temp_varp = &do_gtod.vars[temp_idx]; 257 temp_varp = &do_gtod.vars[temp_idx];
219 258
220 new_tb_orig_stamp = cur_tb; 259 temp_varp->tb_to_xs = new_tb_to_xs;
221 offset = new_tb_orig_stamp - do_gtod.varp->tb_orig_stamp; 260 temp_varp->tb_orig_stamp = new_tb_stamp;
222 new_stamp_xsec = do_gtod.varp->stamp_xsec + mulhdu(offset, do_gtod.varp->tb_to_xs);
223
224 temp_varp->tb_to_xs = do_gtod.varp->tb_to_xs;
225 temp_varp->tb_orig_stamp = new_tb_orig_stamp;
226 temp_varp->stamp_xsec = new_stamp_xsec; 261 temp_varp->stamp_xsec = new_stamp_xsec;
227 smp_mb(); 262 smp_mb();
228 do_gtod.varp = temp_varp; 263 do_gtod.varp = temp_varp;
229 do_gtod.var_idx = temp_idx; 264 do_gtod.var_idx = temp_idx;
230 265
266#ifdef CONFIG_PPC64
267 /*
268 * tb_update_count is used to allow the userspace gettimeofday code
269 * to assure itself that it sees a consistent view of the tb_to_xs and
270 * stamp_xsec variables. It reads the tb_update_count, then reads
271 * tb_to_xs and stamp_xsec and then reads tb_update_count again. If
272 * the two values of tb_update_count match and are even then the
273 * tb_to_xs and stamp_xsec values are consistent. If not, then it
274 * loops back and reads them again until this criteria is met.
275 */
231 ++(systemcfg->tb_update_count); 276 ++(systemcfg->tb_update_count);
232 smp_wmb(); 277 smp_wmb();
233 systemcfg->tb_orig_stamp = new_tb_orig_stamp; 278 systemcfg->tb_orig_stamp = new_tb_stamp;
234 systemcfg->stamp_xsec = new_stamp_xsec; 279 systemcfg->stamp_xsec = new_stamp_xsec;
280 systemcfg->tb_to_xs = new_tb_to_xs;
235 smp_wmb(); 281 smp_wmb();
236 ++(systemcfg->tb_update_count); 282 ++(systemcfg->tb_update_count);
283#endif
284}
285
286/*
287 * When the timebase - tb_orig_stamp gets too big, we do a manipulation
288 * between tb_orig_stamp and stamp_xsec. The goal here is to keep the
289 * difference tb - tb_orig_stamp small enough to always fit inside a
290 * 32 bits number. This is a requirement of our fast 32 bits userland
291 * implementation in the vdso. If we "miss" a call to this function
292 * (interrupt latency, CPU locked in a spinlock, ...) and we end up
293 * with a too big difference, then the vdso will fallback to calling
294 * the syscall
295 */
296static __inline__ void timer_recalc_offset(u64 cur_tb)
297{
298 unsigned long offset;
299 u64 new_stamp_xsec;
300
301 if (__USE_RTC())
302 return;
303 offset = cur_tb - do_gtod.varp->tb_orig_stamp;
304 if ((offset & 0x80000000u) == 0)
305 return;
306 new_stamp_xsec = do_gtod.varp->stamp_xsec
307 + mulhdu(offset, do_gtod.varp->tb_to_xs);
308 update_gtod(cur_tb, new_stamp_xsec, do_gtod.varp->tb_to_xs);
237} 309}
238 310
239#ifdef CONFIG_SMP 311#ifdef CONFIG_SMP
@@ -313,26 +385,37 @@ static void iSeries_tb_recal(void)
313 * call will not be needed) 385 * call will not be needed)
314 */ 386 */
315 387
316unsigned long tb_last_stamp __cacheline_aligned_in_smp;
317
318/* 388/*
319 * timer_interrupt - gets called when the decrementer overflows, 389 * timer_interrupt - gets called when the decrementer overflows,
320 * with interrupts disabled. 390 * with interrupts disabled.
321 */ 391 */
322int timer_interrupt(struct pt_regs * regs) 392void timer_interrupt(struct pt_regs * regs)
323{ 393{
324 int next_dec; 394 int next_dec;
325 unsigned long cur_tb; 395 int cpu = smp_processor_id();
326 struct paca_struct *lpaca = get_paca(); 396 unsigned long ticks;
327 unsigned long cpu = smp_processor_id(); 397
398#ifdef CONFIG_PPC32
399 if (atomic_read(&ppc_n_lost_interrupts) != 0)
400 do_IRQ(regs);
401#endif
328 402
329 irq_enter(); 403 irq_enter();
330 404
331 profile_tick(CPU_PROFILING, regs); 405 profile_tick(CPU_PROFILING, regs);
332 406
333 lpaca->lppaca.int_dword.fields.decr_int = 0; 407#ifdef CONFIG_PPC_ISERIES
408 get_paca()->lppaca.int_dword.fields.decr_int = 0;
409#endif
410
411 while ((ticks = tb_ticks_since(per_cpu(last_jiffy, cpu)))
412 >= tb_ticks_per_jiffy) {
413 /* Update last_jiffy */
414 per_cpu(last_jiffy, cpu) += tb_ticks_per_jiffy;
415 /* Handle RTCL overflow on 601 */
416 if (__USE_RTC() && per_cpu(last_jiffy, cpu) >= 1000000000)
417 per_cpu(last_jiffy, cpu) -= 1000000000;
334 418
335 while (lpaca->next_jiffy_update_tb <= (cur_tb = get_tb())) {
336 /* 419 /*
337 * We cannot disable the decrementer, so in the period 420 * We cannot disable the decrementer, so in the period
338 * between this cpu's being marked offline in cpu_online_map 421 * between this cpu's being marked offline in cpu_online_map
@@ -342,27 +425,27 @@ int timer_interrupt(struct pt_regs * regs)
342 */ 425 */
343 if (!cpu_is_offline(cpu)) 426 if (!cpu_is_offline(cpu))
344 update_process_times(user_mode(regs)); 427 update_process_times(user_mode(regs));
428
345 /* 429 /*
346 * No need to check whether cpu is offline here; boot_cpuid 430 * No need to check whether cpu is offline here; boot_cpuid
347 * should have been fixed up by now. 431 * should have been fixed up by now.
348 */ 432 */
349 if (cpu == boot_cpuid) { 433 if (cpu != boot_cpuid)
350 write_seqlock(&xtime_lock); 434 continue;
351 tb_last_stamp = lpaca->next_jiffy_update_tb; 435
352 timer_recalc_offset(lpaca->next_jiffy_update_tb); 436 write_seqlock(&xtime_lock);
353 do_timer(regs); 437 tb_last_jiffy += tb_ticks_per_jiffy;
354 timer_sync_xtime(lpaca->next_jiffy_update_tb); 438 tb_last_stamp = per_cpu(last_jiffy, cpu);
355 timer_check_rtc(); 439 timer_recalc_offset(tb_last_jiffy);
356 write_sequnlock(&xtime_lock); 440 do_timer(regs);
357 if ( adjusting_time && (time_adjust == 0) ) 441 timer_sync_xtime(tb_last_jiffy);
358 ppc_adjtimex(); 442 timer_check_rtc();
359 } 443 write_sequnlock(&xtime_lock);
360 lpaca->next_jiffy_update_tb += tb_ticks_per_jiffy; 444 if (adjusting_time && (time_adjust == 0))
445 ppc_adjtimex();
361 } 446 }
362 447
363 next_dec = lpaca->next_jiffy_update_tb - cur_tb; 448 next_dec = tb_ticks_per_jiffy - ticks;
364 if (next_dec > lpaca->default_decr)
365 next_dec = lpaca->default_decr;
366 set_dec(next_dec); 449 set_dec(next_dec);
367 450
368#ifdef CONFIG_PPC_ISERIES 451#ifdef CONFIG_PPC_ISERIES
@@ -370,17 +453,47 @@ int timer_interrupt(struct pt_regs * regs)
370 process_hvlpevents(regs); 453 process_hvlpevents(regs);
371#endif 454#endif
372 455
456#ifdef CONFIG_PPC64
373 /* collect purr register values often, for accurate calculations */ 457 /* collect purr register values often, for accurate calculations */
374 if (firmware_has_feature(FW_FEATURE_SPLPAR)) { 458 if (firmware_has_feature(FW_FEATURE_SPLPAR)) {
375 struct cpu_usage *cu = &__get_cpu_var(cpu_usage_array); 459 struct cpu_usage *cu = &__get_cpu_var(cpu_usage_array);
376 cu->current_tb = mfspr(SPRN_PURR); 460 cu->current_tb = mfspr(SPRN_PURR);
377 } 461 }
462#endif
378 463
379 irq_exit(); 464 irq_exit();
465}
466
467void wakeup_decrementer(void)
468{
469 int i;
380 470
381 return 1; 471 set_dec(tb_ticks_per_jiffy);
472 /*
473 * We don't expect this to be called on a machine with a 601,
474 * so using get_tbl is fine.
475 */
476 tb_last_stamp = tb_last_jiffy = get_tb();
477 for_each_cpu(i)
478 per_cpu(last_jiffy, i) = tb_last_stamp;
382} 479}
383 480
481#ifdef CONFIG_SMP
482void __init smp_space_timers(unsigned int max_cpus)
483{
484 int i;
485 unsigned long offset = tb_ticks_per_jiffy / max_cpus;
486 unsigned long previous_tb = per_cpu(last_jiffy, boot_cpuid);
487
488 for_each_cpu(i) {
489 if (i != boot_cpuid) {
490 previous_tb += offset;
491 per_cpu(last_jiffy, i) = previous_tb;
492 }
493 }
494}
495#endif
496
384/* 497/*
385 * Scheduler clock - returns current time in nanosec units. 498 * Scheduler clock - returns current time in nanosec units.
386 * 499 *
@@ -390,6 +503,8 @@ int timer_interrupt(struct pt_regs * regs)
390 */ 503 */
391unsigned long long sched_clock(void) 504unsigned long long sched_clock(void)
392{ 505{
506 if (__USE_RTC())
507 return get_rtc();
393 return mulhdu(get_tb(), tb_to_ns_scale) << tb_to_ns_shift; 508 return mulhdu(get_tb(), tb_to_ns_scale) << tb_to_ns_shift;
394} 509}
395 510
@@ -398,31 +513,31 @@ int do_settimeofday(struct timespec *tv)
398 time_t wtm_sec, new_sec = tv->tv_sec; 513 time_t wtm_sec, new_sec = tv->tv_sec;
399 long wtm_nsec, new_nsec = tv->tv_nsec; 514 long wtm_nsec, new_nsec = tv->tv_nsec;
400 unsigned long flags; 515 unsigned long flags;
401 unsigned long delta_xsec;
402 long int tb_delta; 516 long int tb_delta;
403 unsigned long new_xsec; 517 u64 new_xsec, tb_delta_xs;
404 518
405 if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC) 519 if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
406 return -EINVAL; 520 return -EINVAL;
407 521
408 write_seqlock_irqsave(&xtime_lock, flags); 522 write_seqlock_irqsave(&xtime_lock, flags);
409 /* Updating the RTC is not the job of this code. If the time is 523
410 * stepped under NTP, the RTC will be update after STA_UNSYNC 524 /*
411 * is cleared. Tool like clock/hwclock either copy the RTC 525 * Updating the RTC is not the job of this code. If the time is
526 * stepped under NTP, the RTC will be updated after STA_UNSYNC
527 * is cleared. Tools like clock/hwclock either copy the RTC
412 * to the system time, in which case there is no point in writing 528 * to the system time, in which case there is no point in writing
413 * to the RTC again, or write to the RTC but then they don't call 529 * to the RTC again, or write to the RTC but then they don't call
414 * settimeofday to perform this operation. 530 * settimeofday to perform this operation.
415 */ 531 */
416#ifdef CONFIG_PPC_ISERIES 532#ifdef CONFIG_PPC_ISERIES
417 if ( first_settimeofday ) { 533 if (first_settimeofday) {
418 iSeries_tb_recal(); 534 iSeries_tb_recal();
419 first_settimeofday = 0; 535 first_settimeofday = 0;
420 } 536 }
421#endif 537#endif
422 tb_delta = tb_ticks_since(tb_last_stamp); 538 tb_delta = tb_ticks_since(tb_last_stamp);
423 tb_delta += (jiffies - wall_jiffies) * tb_ticks_per_jiffy; 539 tb_delta += (jiffies - wall_jiffies) * tb_ticks_per_jiffy;
424 540 tb_delta_xs = mulhdu(tb_delta, do_gtod.varp->tb_to_xs);
425 new_nsec -= tb_delta / tb_ticks_per_usec / 1000;
426 541
427 wtm_sec = wall_to_monotonic.tv_sec + (xtime.tv_sec - new_sec); 542 wtm_sec = wall_to_monotonic.tv_sec + (xtime.tv_sec - new_sec);
428 wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - new_nsec); 543 wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - new_nsec);
@@ -437,28 +552,18 @@ int do_settimeofday(struct timespec *tv)
437 552
438 ntp_clear(); 553 ntp_clear();
439 554
440 delta_xsec = mulhdu( (tb_last_stamp-do_gtod.varp->tb_orig_stamp), 555 new_xsec = 0;
441 do_gtod.varp->tb_to_xs ); 556 if (new_nsec != 0) {
442 557 new_xsec = (u64)new_nsec * XSEC_PER_SEC;
443 new_xsec = (new_nsec * XSEC_PER_SEC) / NSEC_PER_SEC; 558 do_div(new_xsec, NSEC_PER_SEC);
444 new_xsec += new_sec * XSEC_PER_SEC;
445 if ( new_xsec > delta_xsec ) {
446 do_gtod.varp->stamp_xsec = new_xsec - delta_xsec;
447 systemcfg->stamp_xsec = new_xsec - delta_xsec;
448 }
449 else {
450 /* This is only for the case where the user is setting the time
451 * way back to a time such that the boot time would have been
452 * before 1970 ... eg. we booted ten days ago, and we are setting
453 * the time to Jan 5, 1970 */
454 do_gtod.varp->stamp_xsec = new_xsec;
455 do_gtod.varp->tb_orig_stamp = tb_last_stamp;
456 systemcfg->stamp_xsec = new_xsec;
457 systemcfg->tb_orig_stamp = tb_last_stamp;
458 } 559 }
560 new_xsec += (u64)new_sec * XSEC_PER_SEC - tb_delta_xs;
561 update_gtod(tb_last_jiffy, new_xsec, do_gtod.varp->tb_to_xs);
459 562
563#ifdef CONFIG_PPC64
460 systemcfg->tz_minuteswest = sys_tz.tz_minuteswest; 564 systemcfg->tz_minuteswest = sys_tz.tz_minuteswest;
461 systemcfg->tz_dsttime = sys_tz.tz_dsttime; 565 systemcfg->tz_dsttime = sys_tz.tz_dsttime;
566#endif
462 567
463 write_sequnlock_irqrestore(&xtime_lock, flags); 568 write_sequnlock_irqrestore(&xtime_lock, flags);
464 clock_was_set(); 569 clock_was_set();
@@ -467,11 +572,9 @@ int do_settimeofday(struct timespec *tv)
467 572
468EXPORT_SYMBOL(do_settimeofday); 573EXPORT_SYMBOL(do_settimeofday);
469 574
470#if defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_MAPLE) || defined(CONFIG_PPC_BPA)
471void __init generic_calibrate_decr(void) 575void __init generic_calibrate_decr(void)
472{ 576{
473 struct device_node *cpu; 577 struct device_node *cpu;
474 struct div_result divres;
475 unsigned int *fp; 578 unsigned int *fp;
476 int node_found; 579 int node_found;
477 580
@@ -505,37 +608,74 @@ void __init generic_calibrate_decr(void)
505 ppc_proc_freq = *fp; 608 ppc_proc_freq = *fp;
506 } 609 }
507 } 610 }
611#ifdef CONFIG_BOOKE
612 /* Set the time base to zero */
613 mtspr(SPRN_TBWL, 0);
614 mtspr(SPRN_TBWU, 0);
615
616 /* Clear any pending timer interrupts */
617 mtspr(SPRN_TSR, TSR_ENW | TSR_WIS | TSR_DIS | TSR_FIS);
618
619 /* Enable decrementer interrupt */
620 mtspr(SPRN_TCR, TCR_DIE);
621#endif
508 if (!node_found) 622 if (!node_found)
509 printk(KERN_ERR "WARNING: Estimating processor frequency " 623 printk(KERN_ERR "WARNING: Estimating processor frequency "
510 "(not found)\n"); 624 "(not found)\n");
511 625
512 of_node_put(cpu); 626 of_node_put(cpu);
627}
513 628
514 printk(KERN_INFO "time_init: decrementer frequency = %lu.%.6lu MHz\n", 629unsigned long get_boot_time(void)
515 ppc_tb_freq/1000000, ppc_tb_freq%1000000); 630{
516 printk(KERN_INFO "time_init: processor frequency = %lu.%.6lu MHz\n", 631 struct rtc_time tm;
517 ppc_proc_freq/1000000, ppc_proc_freq%1000000);
518
519 tb_ticks_per_jiffy = ppc_tb_freq / HZ;
520 tb_ticks_per_sec = tb_ticks_per_jiffy * HZ;
521 tb_ticks_per_usec = ppc_tb_freq / 1000000;
522 tb_to_us = mulhwu_scale_factor(ppc_tb_freq, 1000000);
523 div128_by_32(1024*1024, 0, tb_ticks_per_sec, &divres);
524 tb_to_xs = divres.result_low;
525 632
526 setup_default_decr(); 633 if (ppc_md.get_boot_time)
634 return ppc_md.get_boot_time();
635 if (!ppc_md.get_rtc_time)
636 return 0;
637 ppc_md.get_rtc_time(&tm);
638 return mktime(tm.tm_year+1900, tm.tm_mon+1, tm.tm_mday,
639 tm.tm_hour, tm.tm_min, tm.tm_sec);
527} 640}
528#endif
529 641
642/* This function is only called on the boot processor */
530void __init time_init(void) 643void __init time_init(void)
531{ 644{
532 /* This function is only called on the boot processor */
533 unsigned long flags; 645 unsigned long flags;
534 struct rtc_time tm; 646 unsigned long tm = 0;
535 struct div_result res; 647 struct div_result res;
536 unsigned long scale, shift; 648 u64 scale;
649 unsigned shift;
650
651 if (ppc_md.time_init != NULL)
652 timezone_offset = ppc_md.time_init();
653
654 if (__USE_RTC()) {
655 /* 601 processor: dec counts down by 128 every 128ns */
656 ppc_tb_freq = 1000000000;
657 tb_last_stamp = get_rtcl();
658 tb_last_jiffy = tb_last_stamp;
659 } else {
660 /* Normal PowerPC with timebase register */
661 ppc_md.calibrate_decr();
662 printk(KERN_INFO "time_init: decrementer frequency = %lu.%.6lu MHz\n",
663 ppc_tb_freq / 1000000, ppc_tb_freq % 1000000);
664 printk(KERN_INFO "time_init: processor frequency = %lu.%.6lu MHz\n",
665 ppc_proc_freq / 1000000, ppc_proc_freq % 1000000);
666 tb_last_stamp = tb_last_jiffy = get_tb();
667 }
668
669 tb_ticks_per_jiffy = ppc_tb_freq / HZ;
670 tb_ticks_per_sec = tb_ticks_per_jiffy * HZ;
671 tb_ticks_per_usec = ppc_tb_freq / 1000000;
672 tb_to_us = mulhwu_scale_factor(ppc_tb_freq, 1000000);
673 div128_by_32(1024*1024, 0, tb_ticks_per_sec, &res);
674 tb_to_xs = res.result_low;
537 675
538 ppc_md.calibrate_decr(); 676#ifdef CONFIG_PPC64
677 get_paca()->default_decr = tb_ticks_per_jiffy;
678#endif
539 679
540 /* 680 /*
541 * Compute scale factor for sched_clock. 681 * Compute scale factor for sched_clock.
@@ -559,29 +699,36 @@ void __init time_init(void)
559#ifdef CONFIG_PPC_ISERIES 699#ifdef CONFIG_PPC_ISERIES
560 if (!piranha_simulator) 700 if (!piranha_simulator)
561#endif 701#endif
562 ppc_md.get_boot_time(&tm); 702 tm = get_boot_time();
563 703
564 write_seqlock_irqsave(&xtime_lock, flags); 704 write_seqlock_irqsave(&xtime_lock, flags);
565 xtime.tv_sec = mktime(tm.tm_year + 1900, tm.tm_mon + 1, tm.tm_mday, 705 xtime.tv_sec = tm;
566 tm.tm_hour, tm.tm_min, tm.tm_sec); 706 xtime.tv_nsec = 0;
567 tb_last_stamp = get_tb();
568 do_gtod.varp = &do_gtod.vars[0]; 707 do_gtod.varp = &do_gtod.vars[0];
569 do_gtod.var_idx = 0; 708 do_gtod.var_idx = 0;
570 do_gtod.varp->tb_orig_stamp = tb_last_stamp; 709 do_gtod.varp->tb_orig_stamp = tb_last_jiffy;
571 get_paca()->next_jiffy_update_tb = tb_last_stamp + tb_ticks_per_jiffy; 710 __get_cpu_var(last_jiffy) = tb_last_stamp;
572 do_gtod.varp->stamp_xsec = xtime.tv_sec * XSEC_PER_SEC; 711 do_gtod.varp->stamp_xsec = (u64) xtime.tv_sec * XSEC_PER_SEC;
573 do_gtod.tb_ticks_per_sec = tb_ticks_per_sec; 712 do_gtod.tb_ticks_per_sec = tb_ticks_per_sec;
574 do_gtod.varp->tb_to_xs = tb_to_xs; 713 do_gtod.varp->tb_to_xs = tb_to_xs;
575 do_gtod.tb_to_us = tb_to_us; 714 do_gtod.tb_to_us = tb_to_us;
576 systemcfg->tb_orig_stamp = tb_last_stamp; 715#ifdef CONFIG_PPC64
716 systemcfg->tb_orig_stamp = tb_last_jiffy;
577 systemcfg->tb_update_count = 0; 717 systemcfg->tb_update_count = 0;
578 systemcfg->tb_ticks_per_sec = tb_ticks_per_sec; 718 systemcfg->tb_ticks_per_sec = tb_ticks_per_sec;
579 systemcfg->stamp_xsec = xtime.tv_sec * XSEC_PER_SEC; 719 systemcfg->stamp_xsec = xtime.tv_sec * XSEC_PER_SEC;
580 systemcfg->tb_to_xs = tb_to_xs; 720 systemcfg->tb_to_xs = tb_to_xs;
721#endif
581 722
582 time_freq = 0; 723 time_freq = 0;
583 724
584 xtime.tv_nsec = 0; 725 /* If platform provided a timezone (pmac), we correct the time */
726 if (timezone_offset) {
727 sys_tz.tz_minuteswest = -timezone_offset / 60;
728 sys_tz.tz_dsttime = 0;
729 xtime.tv_sec -= timezone_offset;
730 }
731
585 last_rtc_update = xtime.tv_sec; 732 last_rtc_update = xtime.tv_sec;
586 set_normalized_timespec(&wall_to_monotonic, 733 set_normalized_timespec(&wall_to_monotonic,
587 -xtime.tv_sec, -xtime.tv_nsec); 734 -xtime.tv_sec, -xtime.tv_nsec);
@@ -604,25 +751,28 @@ void __init time_init(void)
604 751
605void ppc_adjtimex(void) 752void ppc_adjtimex(void)
606{ 753{
607 unsigned long den, new_tb_ticks_per_sec, tb_ticks, old_xsec, new_tb_to_xs, new_xsec, new_stamp_xsec; 754#ifdef CONFIG_PPC64
755 unsigned long den, new_tb_ticks_per_sec, tb_ticks, old_xsec,
756 new_tb_to_xs, new_xsec, new_stamp_xsec;
608 unsigned long tb_ticks_per_sec_delta; 757 unsigned long tb_ticks_per_sec_delta;
609 long delta_freq, ltemp; 758 long delta_freq, ltemp;
610 struct div_result divres; 759 struct div_result divres;
611 unsigned long flags; 760 unsigned long flags;
612 struct gettimeofday_vars * temp_varp;
613 unsigned temp_idx;
614 long singleshot_ppm = 0; 761 long singleshot_ppm = 0;
615 762
616 /* Compute parts per million frequency adjustment to accomplish the time adjustment 763 /*
617 implied by time_offset to be applied over the elapsed time indicated by time_constant. 764 * Compute parts per million frequency adjustment to
618 Use SHIFT_USEC to get it into the same units as time_freq. */ 765 * accomplish the time adjustment implied by time_offset to be
766 * applied over the elapsed time indicated by time_constant.
767 * Use SHIFT_USEC to get it into the same units as
768 * time_freq.
769 */
619 if ( time_offset < 0 ) { 770 if ( time_offset < 0 ) {
620 ltemp = -time_offset; 771 ltemp = -time_offset;
621 ltemp <<= SHIFT_USEC - SHIFT_UPDATE; 772 ltemp <<= SHIFT_USEC - SHIFT_UPDATE;
622 ltemp >>= SHIFT_KG + time_constant; 773 ltemp >>= SHIFT_KG + time_constant;
623 ltemp = -ltemp; 774 ltemp = -ltemp;
624 } 775 } else {
625 else {
626 ltemp = time_offset; 776 ltemp = time_offset;
627 ltemp <<= SHIFT_USEC - SHIFT_UPDATE; 777 ltemp <<= SHIFT_USEC - SHIFT_UPDATE;
628 ltemp >>= SHIFT_KG + time_constant; 778 ltemp >>= SHIFT_KG + time_constant;
@@ -639,7 +789,10 @@ void ppc_adjtimex(void)
639 789
640 adjusting_time = 1; 790 adjusting_time = 1;
641 791
642 /* Compute parts per million frequency adjustment to match time_adjust */ 792 /*
793 * Compute parts per million frequency adjustment
794 * to match time_adjust
795 */
643 singleshot_ppm = tickadj * HZ; 796 singleshot_ppm = tickadj * HZ;
644 /* 797 /*
645 * The adjustment should be tickadj*HZ to match the code in 798 * The adjustment should be tickadj*HZ to match the code in
@@ -647,7 +800,7 @@ void ppc_adjtimex(void)
647 * large. 3/4 of tickadj*HZ seems about right 800 * large. 3/4 of tickadj*HZ seems about right
648 */ 801 */
649 singleshot_ppm -= singleshot_ppm / 4; 802 singleshot_ppm -= singleshot_ppm / 4;
650 /* Use SHIFT_USEC to get it into the same units as time_freq */ 803 /* Use SHIFT_USEC to get it into the same units as time_freq */
651 singleshot_ppm <<= SHIFT_USEC; 804 singleshot_ppm <<= SHIFT_USEC;
652 if ( time_adjust < 0 ) 805 if ( time_adjust < 0 )
653 singleshot_ppm = -singleshot_ppm; 806 singleshot_ppm = -singleshot_ppm;
@@ -663,7 +816,10 @@ void ppc_adjtimex(void)
663 /* Add up all of the frequency adjustments */ 816 /* Add up all of the frequency adjustments */
664 delta_freq = time_freq + ltemp + singleshot_ppm; 817 delta_freq = time_freq + ltemp + singleshot_ppm;
665 818
666 /* Compute a new value for tb_ticks_per_sec based on the frequency adjustment */ 819 /*
820 * Compute a new value for tb_ticks_per_sec based on
821 * the frequency adjustment
822 */
667 den = 1000000 * (1 << (SHIFT_USEC - 8)); 823 den = 1000000 * (1 << (SHIFT_USEC - 8));
668 if ( delta_freq < 0 ) { 824 if ( delta_freq < 0 ) {
669 tb_ticks_per_sec_delta = ( tb_ticks_per_sec * ( (-delta_freq) >> (SHIFT_USEC - 8))) / den; 825 tb_ticks_per_sec_delta = ( tb_ticks_per_sec * ( (-delta_freq) >> (SHIFT_USEC - 8))) / den;
@@ -678,61 +834,37 @@ void ppc_adjtimex(void)
678 printk("ppc_adjtimex: ltemp = %ld, time_freq = %ld, singleshot_ppm = %ld\n", ltemp, time_freq, singleshot_ppm); 834 printk("ppc_adjtimex: ltemp = %ld, time_freq = %ld, singleshot_ppm = %ld\n", ltemp, time_freq, singleshot_ppm);
679 printk("ppc_adjtimex: tb_ticks_per_sec - base = %ld new = %ld\n", tb_ticks_per_sec, new_tb_ticks_per_sec); 835 printk("ppc_adjtimex: tb_ticks_per_sec - base = %ld new = %ld\n", tb_ticks_per_sec, new_tb_ticks_per_sec);
680#endif 836#endif
681 837
682 /* Compute a new value of tb_to_xs (used to convert tb to microseconds and a new value of 838 /*
683 stamp_xsec which is the time (in 1/2^20 second units) corresponding to tb_orig_stamp. This 839 * Compute a new value of tb_to_xs (used to convert tb to
684 new value of stamp_xsec compensates for the change in frequency (implied by the new tb_to_xs) 840 * microseconds) and a new value of stamp_xsec which is the
685 which guarantees that the current time remains the same */ 841 * time (in 1/2^20 second units) corresponding to
842 * tb_orig_stamp. This new value of stamp_xsec compensates
843 * for the change in frequency (implied by the new tb_to_xs)
844 * which guarantees that the current time remains the same.
845 */
686 write_seqlock_irqsave( &xtime_lock, flags ); 846 write_seqlock_irqsave( &xtime_lock, flags );
687 tb_ticks = get_tb() - do_gtod.varp->tb_orig_stamp; 847 tb_ticks = get_tb() - do_gtod.varp->tb_orig_stamp;
688 div128_by_32( 1024*1024, 0, new_tb_ticks_per_sec, &divres ); 848 div128_by_32(1024*1024, 0, new_tb_ticks_per_sec, &divres);
689 new_tb_to_xs = divres.result_low; 849 new_tb_to_xs = divres.result_low;
690 new_xsec = mulhdu( tb_ticks, new_tb_to_xs ); 850 new_xsec = mulhdu(tb_ticks, new_tb_to_xs);
691 851
692 old_xsec = mulhdu( tb_ticks, do_gtod.varp->tb_to_xs ); 852 old_xsec = mulhdu(tb_ticks, do_gtod.varp->tb_to_xs);
693 new_stamp_xsec = do_gtod.varp->stamp_xsec + old_xsec - new_xsec; 853 new_stamp_xsec = do_gtod.varp->stamp_xsec + old_xsec - new_xsec;
694 854
695 /* There are two copies of tb_to_xs and stamp_xsec so that no lock is needed to access and use these 855 update_gtod(do_gtod.varp->tb_orig_stamp, new_stamp_xsec, new_tb_to_xs);
696 values in do_gettimeofday. We alternate the copies and as long as a reasonable time elapses between
697 changes, there will never be inconsistent values. ntpd has a minimum of one minute between updates */
698
699 temp_idx = (do_gtod.var_idx == 0);
700 temp_varp = &do_gtod.vars[temp_idx];
701
702 temp_varp->tb_to_xs = new_tb_to_xs;
703 temp_varp->stamp_xsec = new_stamp_xsec;
704 temp_varp->tb_orig_stamp = do_gtod.varp->tb_orig_stamp;
705 smp_mb();
706 do_gtod.varp = temp_varp;
707 do_gtod.var_idx = temp_idx;
708
709 /*
710 * tb_update_count is used to allow the problem state gettimeofday code
711 * to assure itself that it sees a consistent view of the tb_to_xs and
712 * stamp_xsec variables. It reads the tb_update_count, then reads
713 * tb_to_xs and stamp_xsec and then reads tb_update_count again. If
714 * the two values of tb_update_count match and are even then the
715 * tb_to_xs and stamp_xsec values are consistent. If not, then it
716 * loops back and reads them again until this criteria is met.
717 */
718 ++(systemcfg->tb_update_count);
719 smp_wmb();
720 systemcfg->tb_to_xs = new_tb_to_xs;
721 systemcfg->stamp_xsec = new_stamp_xsec;
722 smp_wmb();
723 ++(systemcfg->tb_update_count);
724 856
725 write_sequnlock_irqrestore( &xtime_lock, flags ); 857 write_sequnlock_irqrestore( &xtime_lock, flags );
726 858#endif /* CONFIG_PPC64 */
727} 859}
728 860
729 861
730#define TICK_SIZE tick
731#define FEBRUARY 2 862#define FEBRUARY 2
732#define STARTOFTIME 1970 863#define STARTOFTIME 1970
733#define SECDAY 86400L 864#define SECDAY 86400L
734#define SECYR (SECDAY * 365) 865#define SECYR (SECDAY * 365)
735#define leapyear(year) ((year) % 4 == 0) 866#define leapyear(year) ((year) % 4 == 0 && \
867 ((year) % 100 != 0 || (year) % 400 == 0))
736#define days_in_year(a) (leapyear(a) ? 366 : 365) 868#define days_in_year(a) (leapyear(a) ? 366 : 365)
737#define days_in_month(a) (month_days[(a) - 1]) 869#define days_in_month(a) (month_days[(a) - 1])
738 870
@@ -750,37 +882,25 @@ void GregorianDay(struct rtc_time * tm)
750 int day; 882 int day;
751 int MonthOffset[] = { 0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334 }; 883 int MonthOffset[] = { 0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334 };
752 884
753 lastYear=tm->tm_year-1; 885 lastYear = tm->tm_year - 1;
754 886
755 /* 887 /*
756 * Number of leap corrections to apply up to end of last year 888 * Number of leap corrections to apply up to end of last year
757 */ 889 */
758 leapsToDate = lastYear/4 - lastYear/100 + lastYear/400; 890 leapsToDate = lastYear / 4 - lastYear / 100 + lastYear / 400;
759 891
760 /* 892 /*
761 * This year is a leap year if it is divisible by 4 except when it is 893 * This year is a leap year if it is divisible by 4 except when it is
762 * divisible by 100 unless it is divisible by 400 894 * divisible by 100 unless it is divisible by 400
763 * 895 *
764 * e.g. 1904 was a leap year, 1900 was not, 1996 is, and 2000 will be 896 * e.g. 1904 was a leap year, 1900 was not, 1996 is, and 2000 was
765 */ 897 */
766 if((tm->tm_year%4==0) && 898 day = tm->tm_mon > 2 && leapyear(tm->tm_year);
767 ((tm->tm_year%100!=0) || (tm->tm_year%400==0)) &&
768 (tm->tm_mon>2))
769 {
770 /*
771 * We are past Feb. 29 in a leap year
772 */
773 day=1;
774 }
775 else
776 {
777 day=0;
778 }
779 899
780 day += lastYear*365 + leapsToDate + MonthOffset[tm->tm_mon-1] + 900 day += lastYear*365 + leapsToDate + MonthOffset[tm->tm_mon-1] +
781 tm->tm_mday; 901 tm->tm_mday;
782 902
783 tm->tm_wday=day%7; 903 tm->tm_wday = day % 7;
784} 904}
785 905
786void to_tm(int tim, struct rtc_time * tm) 906void to_tm(int tim, struct rtc_time * tm)
@@ -826,14 +946,16 @@ void to_tm(int tim, struct rtc_time * tm)
826 * oscillators and the precision with which the timebase frequency 946 * oscillators and the precision with which the timebase frequency
827 * is measured but does not harm. 947 * is measured but does not harm.
828 */ 948 */
829unsigned mulhwu_scale_factor(unsigned inscale, unsigned outscale) { 949unsigned mulhwu_scale_factor(unsigned inscale, unsigned outscale)
950{
830 unsigned mlt=0, tmp, err; 951 unsigned mlt=0, tmp, err;
831 /* No concern for performance, it's done once: use a stupid 952 /* No concern for performance, it's done once: use a stupid
832 * but safe and compact method to find the multiplier. 953 * but safe and compact method to find the multiplier.
833 */ 954 */
834 955
835 for (tmp = 1U<<31; tmp != 0; tmp >>= 1) { 956 for (tmp = 1U<<31; tmp != 0; tmp >>= 1) {
836 if (mulhwu(inscale, mlt|tmp) < outscale) mlt|=tmp; 957 if (mulhwu(inscale, mlt|tmp) < outscale)
958 mlt |= tmp;
837 } 959 }
838 960
839 /* We might still be off by 1 for the best approximation. 961 /* We might still be off by 1 for the best approximation.
@@ -843,39 +965,41 @@ unsigned mulhwu_scale_factor(unsigned inscale, unsigned outscale) {
843 * some might have been forgotten in the test however. 965 * some might have been forgotten in the test however.
844 */ 966 */
845 967
846 err = inscale*(mlt+1); 968 err = inscale * (mlt+1);
847 if (err <= inscale/2) mlt++; 969 if (err <= inscale/2)
970 mlt++;
848 return mlt; 971 return mlt;
849 } 972}
850 973
851/* 974/*
852 * Divide a 128-bit dividend by a 32-bit divisor, leaving a 128 bit 975 * Divide a 128-bit dividend by a 32-bit divisor, leaving a 128 bit
853 * result. 976 * result.
854 */ 977 */
855 978void div128_by_32(u64 dividend_high, u64 dividend_low,
856void div128_by_32( unsigned long dividend_high, unsigned long dividend_low, 979 unsigned divisor, struct div_result *dr)
857 unsigned divisor, struct div_result *dr )
858{ 980{
859 unsigned long a,b,c,d, w,x,y,z, ra,rb,rc; 981 unsigned long a, b, c, d;
982 unsigned long w, x, y, z;
983 u64 ra, rb, rc;
860 984
861 a = dividend_high >> 32; 985 a = dividend_high >> 32;
862 b = dividend_high & 0xffffffff; 986 b = dividend_high & 0xffffffff;
863 c = dividend_low >> 32; 987 c = dividend_low >> 32;
864 d = dividend_low & 0xffffffff; 988 d = dividend_low & 0xffffffff;
865 989
866 w = a/divisor; 990 w = a / divisor;
867 ra = (a - (w * divisor)) << 32; 991 ra = ((u64)(a - (w * divisor)) << 32) + b;
868 992
869 x = (ra + b)/divisor; 993 rb = ((u64) do_div(ra, divisor) << 32) + c;
870 rb = ((ra + b) - (x * divisor)) << 32; 994 x = ra;
871 995
872 y = (rb + c)/divisor; 996 rc = ((u64) do_div(rb, divisor) << 32) + d;
873 rc = ((rb + c) - (y * divisor)) << 32; 997 y = rb;
874 998
875 z = (rc + d)/divisor; 999 do_div(rc, divisor);
1000 z = rc;
876 1001
877 dr->result_high = (w << 32) + x; 1002 dr->result_high = ((u64)w << 32) + x;
878 dr->result_low = (y << 32) + z; 1003 dr->result_low = ((u64)y << 32) + z;
879 1004
880} 1005}
881
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
new file mode 100644
index 000000000000..5d638ecddbd0
--- /dev/null
+++ b/arch/powerpc/kernel/traps.c
@@ -0,0 +1,1101 @@
1/*
2 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * Modified by Cort Dougan (cort@cs.nmt.edu)
10 * and Paul Mackerras (paulus@samba.org)
11 */
12
13/*
14 * This file handles the architecture-dependent parts of hardware exceptions
15 */
16
17#include <linux/config.h>
18#include <linux/errno.h>
19#include <linux/sched.h>
20#include <linux/kernel.h>
21#include <linux/mm.h>
22#include <linux/stddef.h>
23#include <linux/unistd.h>
24#include <linux/ptrace.h>
25#include <linux/slab.h>
26#include <linux/user.h>
27#include <linux/a.out.h>
28#include <linux/interrupt.h>
29#include <linux/init.h>
30#include <linux/module.h>
31#include <linux/prctl.h>
32#include <linux/delay.h>
33#include <linux/kprobes.h>
34
35#include <asm/kdebug.h>
36#include <asm/pgtable.h>
37#include <asm/uaccess.h>
38#include <asm/system.h>
39#include <asm/io.h>
40#include <asm/machdep.h>
41#include <asm/rtas.h>
42#include <asm/xmon.h>
43#include <asm/pmc.h>
44#ifdef CONFIG_PPC32
45#include <asm/reg.h>
46#endif
47#ifdef CONFIG_PMAC_BACKLIGHT
48#include <asm/backlight.h>
49#endif
50#ifdef CONFIG_PPC64
51#include <asm/firmware.h>
52#include <asm/processor.h>
53#include <asm/systemcfg.h>
54#endif
55
56#ifdef CONFIG_PPC64 /* XXX */
57#define _IO_BASE pci_io_base
58#endif
59
60#ifdef CONFIG_DEBUGGER
61int (*__debugger)(struct pt_regs *regs);
62int (*__debugger_ipi)(struct pt_regs *regs);
63int (*__debugger_bpt)(struct pt_regs *regs);
64int (*__debugger_sstep)(struct pt_regs *regs);
65int (*__debugger_iabr_match)(struct pt_regs *regs);
66int (*__debugger_dabr_match)(struct pt_regs *regs);
67int (*__debugger_fault_handler)(struct pt_regs *regs);
68
69EXPORT_SYMBOL(__debugger);
70EXPORT_SYMBOL(__debugger_ipi);
71EXPORT_SYMBOL(__debugger_bpt);
72EXPORT_SYMBOL(__debugger_sstep);
73EXPORT_SYMBOL(__debugger_iabr_match);
74EXPORT_SYMBOL(__debugger_dabr_match);
75EXPORT_SYMBOL(__debugger_fault_handler);
76#endif
77
78struct notifier_block *powerpc_die_chain;
79static DEFINE_SPINLOCK(die_notifier_lock);
80
81int register_die_notifier(struct notifier_block *nb)
82{
83 int err = 0;
84 unsigned long flags;
85
86 spin_lock_irqsave(&die_notifier_lock, flags);
87 err = notifier_chain_register(&powerpc_die_chain, nb);
88 spin_unlock_irqrestore(&die_notifier_lock, flags);
89 return err;
90}
91
92/*
93 * Trap & Exception support
94 */
95
96static DEFINE_SPINLOCK(die_lock);
97
98int die(const char *str, struct pt_regs *regs, long err)
99{
100 static int die_counter;
101 int nl = 0;
102
103 if (debugger(regs))
104 return 1;
105
106 console_verbose();
107 spin_lock_irq(&die_lock);
108 bust_spinlocks(1);
109#ifdef CONFIG_PMAC_BACKLIGHT
110 if (_machine == _MACH_Pmac) {
111 set_backlight_enable(1);
112 set_backlight_level(BACKLIGHT_MAX);
113 }
114#endif
115 printk("Oops: %s, sig: %ld [#%d]\n", str, err, ++die_counter);
116#ifdef CONFIG_PREEMPT
117 printk("PREEMPT ");
118 nl = 1;
119#endif
120#ifdef CONFIG_SMP
121 printk("SMP NR_CPUS=%d ", NR_CPUS);
122 nl = 1;
123#endif
124#ifdef CONFIG_DEBUG_PAGEALLOC
125 printk("DEBUG_PAGEALLOC ");
126 nl = 1;
127#endif
128#ifdef CONFIG_NUMA
129 printk("NUMA ");
130 nl = 1;
131#endif
132#ifdef CONFIG_PPC64
133 switch (systemcfg->platform) {
134 case PLATFORM_PSERIES:
135 printk("PSERIES ");
136 nl = 1;
137 break;
138 case PLATFORM_PSERIES_LPAR:
139 printk("PSERIES LPAR ");
140 nl = 1;
141 break;
142 case PLATFORM_ISERIES_LPAR:
143 printk("ISERIES LPAR ");
144 nl = 1;
145 break;
146 case PLATFORM_POWERMAC:
147 printk("POWERMAC ");
148 nl = 1;
149 break;
150 case PLATFORM_BPA:
151 printk("BPA ");
152 nl = 1;
153 break;
154 }
155#endif
156 if (nl)
157 printk("\n");
158 print_modules();
159 show_regs(regs);
160 bust_spinlocks(0);
161 spin_unlock_irq(&die_lock);
162
163 if (in_interrupt())
164 panic("Fatal exception in interrupt");
165
166 if (panic_on_oops) {
167#ifdef CONFIG_PPC64
168 printk(KERN_EMERG "Fatal exception: panic in 5 seconds\n");
169 ssleep(5);
170#endif
171 panic("Fatal exception");
172 }
173 do_exit(err);
174
175 return 0;
176}
177
178void _exception(int signr, struct pt_regs *regs, int code, unsigned long addr)
179{
180 siginfo_t info;
181
182 if (!user_mode(regs)) {
183 if (die("Exception in kernel mode", regs, signr))
184 return;
185 }
186
187 memset(&info, 0, sizeof(info));
188 info.si_signo = signr;
189 info.si_code = code;
190 info.si_addr = (void __user *) addr;
191 force_sig_info(signr, &info, current);
192
193 /*
194 * Init gets no signals that it doesn't have a handler for.
195 * That's all very well, but if it has caused a synchronous
196 * exception and we ignore the resulting signal, it will just
197 * generate the same exception over and over again and we get
198 * nowhere. Better to kill it and let the kernel panic.
199 */
200 if (current->pid == 1) {
201 __sighandler_t handler;
202
203 spin_lock_irq(&current->sighand->siglock);
204 handler = current->sighand->action[signr-1].sa.sa_handler;
205 spin_unlock_irq(&current->sighand->siglock);
206 if (handler == SIG_DFL) {
207 /* init has generated a synchronous exception
208 and it doesn't have a handler for the signal */
209 printk(KERN_CRIT "init has generated signal %d "
210 "but has no handler for it\n", signr);
211 do_exit(signr);
212 }
213 }
214}
215
216#ifdef CONFIG_PPC64
217void system_reset_exception(struct pt_regs *regs)
218{
219 /* See if any machine dependent calls */
220 if (ppc_md.system_reset_exception)
221 ppc_md.system_reset_exception(regs);
222
223 die("System Reset", regs, SIGABRT);
224
225 /* Must die if the interrupt is not recoverable */
226 if (!(regs->msr & MSR_RI))
227 panic("Unrecoverable System Reset");
228
229 /* What should we do here? We could issue a shutdown or hard reset. */
230}
231#endif
232
233/*
234 * I/O accesses can cause machine checks on powermacs.
235 * Check if the NIP corresponds to the address of a sync
236 * instruction for which there is an entry in the exception
237 * table.
238 * Note that the 601 only takes a machine check on TEA
239 * (transfer error ack) signal assertion, and does not
240 * set any of the top 16 bits of SRR1.
241 * -- paulus.
242 */
243static inline int check_io_access(struct pt_regs *regs)
244{
245#ifdef CONFIG_PPC_PMAC
246 unsigned long msr = regs->msr;
247 const struct exception_table_entry *entry;
248 unsigned int *nip = (unsigned int *)regs->nip;
249
250 if (((msr & 0xffff0000) == 0 || (msr & (0x80000 | 0x40000)))
251 && (entry = search_exception_tables(regs->nip)) != NULL) {
252 /*
253 * Check that it's a sync instruction, or somewhere
254 * in the twi; isync; nop sequence that inb/inw/inl uses.
255 * As the address is in the exception table
256 * we should be able to read the instr there.
257 * For the debug message, we look at the preceding
258 * load or store.
259 */
260 if (*nip == 0x60000000) /* nop */
261 nip -= 2;
262 else if (*nip == 0x4c00012c) /* isync */
263 --nip;
264 if (*nip == 0x7c0004ac || (*nip >> 26) == 3) {
265 /* sync or twi */
266 unsigned int rb;
267
268 --nip;
269 rb = (*nip >> 11) & 0x1f;
270 printk(KERN_DEBUG "%s bad port %lx at %p\n",
271 (*nip & 0x100)? "OUT to": "IN from",
272 regs->gpr[rb] - _IO_BASE, nip);
273 regs->msr |= MSR_RI;
274 regs->nip = entry->fixup;
275 return 1;
276 }
277 }
278#endif /* CONFIG_PPC_PMAC */
279 return 0;
280}
281
282#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
283/* On 4xx, the reason for the machine check or program exception
284 is in the ESR. */
285#define get_reason(regs) ((regs)->dsisr)
286#ifndef CONFIG_FSL_BOOKE
287#define get_mc_reason(regs) ((regs)->dsisr)
288#else
289#define get_mc_reason(regs) (mfspr(SPRN_MCSR))
290#endif
291#define REASON_FP ESR_FP
292#define REASON_ILLEGAL (ESR_PIL | ESR_PUO)
293#define REASON_PRIVILEGED ESR_PPR
294#define REASON_TRAP ESR_PTR
295
296/* single-step stuff */
297#define single_stepping(regs) (current->thread.dbcr0 & DBCR0_IC)
298#define clear_single_step(regs) (current->thread.dbcr0 &= ~DBCR0_IC)
299
300#else
301/* On non-4xx, the reason for the machine check or program
302 exception is in the MSR. */
303#define get_reason(regs) ((regs)->msr)
304#define get_mc_reason(regs) ((regs)->msr)
305#define REASON_FP 0x100000
306#define REASON_ILLEGAL 0x80000
307#define REASON_PRIVILEGED 0x40000
308#define REASON_TRAP 0x20000
309
310#define single_stepping(regs) ((regs)->msr & MSR_SE)
311#define clear_single_step(regs) ((regs)->msr &= ~MSR_SE)
312#endif
313
314/*
315 * This is "fall-back" implementation for configurations
316 * which don't provide platform-specific machine check info
317 */
318void __attribute__ ((weak))
319platform_machine_check(struct pt_regs *regs)
320{
321}
322
323void machine_check_exception(struct pt_regs *regs)
324{
325#ifdef CONFIG_PPC64
326 int recover = 0;
327
328 /* See if any machine dependent calls */
329 if (ppc_md.machine_check_exception)
330 recover = ppc_md.machine_check_exception(regs);
331
332 if (recover)
333 return;
334#else
335 unsigned long reason = get_mc_reason(regs);
336
337 if (user_mode(regs)) {
338 regs->msr |= MSR_RI;
339 _exception(SIGBUS, regs, BUS_ADRERR, regs->nip);
340 return;
341 }
342
343#if defined(CONFIG_8xx) && defined(CONFIG_PCI)
344 /* the qspan pci read routines can cause machine checks -- Cort */
345 bad_page_fault(regs, regs->dar, SIGBUS);
346 return;
347#endif
348
349 if (debugger_fault_handler(regs)) {
350 regs->msr |= MSR_RI;
351 return;
352 }
353
354 if (check_io_access(regs))
355 return;
356
357#if defined(CONFIG_4xx) && !defined(CONFIG_440A)
358 if (reason & ESR_IMCP) {
359 printk("Instruction");
360 mtspr(SPRN_ESR, reason & ~ESR_IMCP);
361 } else
362 printk("Data");
363 printk(" machine check in kernel mode.\n");
364#elif defined(CONFIG_440A)
365 printk("Machine check in kernel mode.\n");
366 if (reason & ESR_IMCP){
367 printk("Instruction Synchronous Machine Check exception\n");
368 mtspr(SPRN_ESR, reason & ~ESR_IMCP);
369 }
370 else {
371 u32 mcsr = mfspr(SPRN_MCSR);
372 if (mcsr & MCSR_IB)
373 printk("Instruction Read PLB Error\n");
374 if (mcsr & MCSR_DRB)
375 printk("Data Read PLB Error\n");
376 if (mcsr & MCSR_DWB)
377 printk("Data Write PLB Error\n");
378 if (mcsr & MCSR_TLBP)
379 printk("TLB Parity Error\n");
380 if (mcsr & MCSR_ICP){
381 flush_instruction_cache();
382 printk("I-Cache Parity Error\n");
383 }
384 if (mcsr & MCSR_DCSP)
385 printk("D-Cache Search Parity Error\n");
386 if (mcsr & MCSR_DCFP)
387 printk("D-Cache Flush Parity Error\n");
388 if (mcsr & MCSR_IMPE)
389 printk("Machine Check exception is imprecise\n");
390
391 /* Clear MCSR */
392 mtspr(SPRN_MCSR, mcsr);
393 }
394#elif defined (CONFIG_E500)
395 printk("Machine check in kernel mode.\n");
396 printk("Caused by (from MCSR=%lx): ", reason);
397
398 if (reason & MCSR_MCP)
399 printk("Machine Check Signal\n");
400 if (reason & MCSR_ICPERR)
401 printk("Instruction Cache Parity Error\n");
402 if (reason & MCSR_DCP_PERR)
403 printk("Data Cache Push Parity Error\n");
404 if (reason & MCSR_DCPERR)
405 printk("Data Cache Parity Error\n");
406 if (reason & MCSR_GL_CI)
407 printk("Guarded Load or Cache-Inhibited stwcx.\n");
408 if (reason & MCSR_BUS_IAERR)
409 printk("Bus - Instruction Address Error\n");
410 if (reason & MCSR_BUS_RAERR)
411 printk("Bus - Read Address Error\n");
412 if (reason & MCSR_BUS_WAERR)
413 printk("Bus - Write Address Error\n");
414 if (reason & MCSR_BUS_IBERR)
415 printk("Bus - Instruction Data Error\n");
416 if (reason & MCSR_BUS_RBERR)
417 printk("Bus - Read Data Bus Error\n");
418 if (reason & MCSR_BUS_WBERR)
419 printk("Bus - Read Data Bus Error\n");
420 if (reason & MCSR_BUS_IPERR)
421 printk("Bus - Instruction Parity Error\n");
422 if (reason & MCSR_BUS_RPERR)
423 printk("Bus - Read Parity Error\n");
424#elif defined (CONFIG_E200)
425 printk("Machine check in kernel mode.\n");
426 printk("Caused by (from MCSR=%lx): ", reason);
427
428 if (reason & MCSR_MCP)
429 printk("Machine Check Signal\n");
430 if (reason & MCSR_CP_PERR)
431 printk("Cache Push Parity Error\n");
432 if (reason & MCSR_CPERR)
433 printk("Cache Parity Error\n");
434 if (reason & MCSR_EXCP_ERR)
435 printk("ISI, ITLB, or Bus Error on first instruction fetch for an exception handler\n");
436 if (reason & MCSR_BUS_IRERR)
437 printk("Bus - Read Bus Error on instruction fetch\n");
438 if (reason & MCSR_BUS_DRERR)
439 printk("Bus - Read Bus Error on data load\n");
440 if (reason & MCSR_BUS_WRERR)
441 printk("Bus - Write Bus Error on buffered store or cache line push\n");
442#else /* !CONFIG_4xx && !CONFIG_E500 && !CONFIG_E200 */
443 printk("Machine check in kernel mode.\n");
444 printk("Caused by (from SRR1=%lx): ", reason);
445 switch (reason & 0x601F0000) {
446 case 0x80000:
447 printk("Machine check signal\n");
448 break;
449 case 0: /* for 601 */
450 case 0x40000:
451 case 0x140000: /* 7450 MSS error and TEA */
452 printk("Transfer error ack signal\n");
453 break;
454 case 0x20000:
455 printk("Data parity error signal\n");
456 break;
457 case 0x10000:
458 printk("Address parity error signal\n");
459 break;
460 case 0x20000000:
461 printk("L1 Data Cache error\n");
462 break;
463 case 0x40000000:
464 printk("L1 Instruction Cache error\n");
465 break;
466 case 0x00100000:
467 printk("L2 data cache parity error\n");
468 break;
469 default:
470 printk("Unknown values in msr\n");
471 }
472#endif /* CONFIG_4xx */
473
474 /*
475 * Optional platform-provided routine to print out
476 * additional info, e.g. bus error registers.
477 */
478 platform_machine_check(regs);
479#endif /* CONFIG_PPC64 */
480
481 if (debugger_fault_handler(regs))
482 return;
483 die("Machine check", regs, SIGBUS);
484
485 /* Must die if the interrupt is not recoverable */
486 if (!(regs->msr & MSR_RI))
487 panic("Unrecoverable Machine check");
488}
489
490void SMIException(struct pt_regs *regs)
491{
492 die("System Management Interrupt", regs, SIGABRT);
493}
494
495void unknown_exception(struct pt_regs *regs)
496{
497 printk("Bad trap at PC: %lx, SR: %lx, vector=%lx\n",
498 regs->nip, regs->msr, regs->trap);
499
500 _exception(SIGTRAP, regs, 0, 0);
501}
502
503void instruction_breakpoint_exception(struct pt_regs *regs)
504{
505 if (notify_die(DIE_IABR_MATCH, "iabr_match", regs, 5,
506 5, SIGTRAP) == NOTIFY_STOP)
507 return;
508 if (debugger_iabr_match(regs))
509 return;
510 _exception(SIGTRAP, regs, TRAP_BRKPT, regs->nip);
511}
512
513void RunModeException(struct pt_regs *regs)
514{
515 _exception(SIGTRAP, regs, 0, 0);
516}
517
518void __kprobes single_step_exception(struct pt_regs *regs)
519{
520 regs->msr &= ~(MSR_SE | MSR_BE); /* Turn off 'trace' bits */
521
522 if (notify_die(DIE_SSTEP, "single_step", regs, 5,
523 5, SIGTRAP) == NOTIFY_STOP)
524 return;
525 if (debugger_sstep(regs))
526 return;
527
528 _exception(SIGTRAP, regs, TRAP_TRACE, regs->nip);
529}
530
531/*
532 * After we have successfully emulated an instruction, we have to
533 * check if the instruction was being single-stepped, and if so,
534 * pretend we got a single-step exception. This was pointed out
535 * by Kumar Gala. -- paulus
536 */
537static void emulate_single_step(struct pt_regs *regs)
538{
539 if (single_stepping(regs)) {
540 clear_single_step(regs);
541 _exception(SIGTRAP, regs, TRAP_TRACE, 0);
542 }
543}
544
545static void parse_fpe(struct pt_regs *regs)
546{
547 int code = 0;
548 unsigned long fpscr;
549
550 flush_fp_to_thread(current);
551
552 fpscr = current->thread.fpscr.val;
553
554 /* Invalid operation */
555 if ((fpscr & FPSCR_VE) && (fpscr & FPSCR_VX))
556 code = FPE_FLTINV;
557
558 /* Overflow */
559 else if ((fpscr & FPSCR_OE) && (fpscr & FPSCR_OX))
560 code = FPE_FLTOVF;
561
562 /* Underflow */
563 else if ((fpscr & FPSCR_UE) && (fpscr & FPSCR_UX))
564 code = FPE_FLTUND;
565
566 /* Divide by zero */
567 else if ((fpscr & FPSCR_ZE) && (fpscr & FPSCR_ZX))
568 code = FPE_FLTDIV;
569
570 /* Inexact result */
571 else if ((fpscr & FPSCR_XE) && (fpscr & FPSCR_XX))
572 code = FPE_FLTRES;
573
574 _exception(SIGFPE, regs, code, regs->nip);
575}
576
577/*
578 * Illegal instruction emulation support. Originally written to
579 * provide the PVR to user applications using the mfspr rd, PVR.
580 * Return non-zero if we can't emulate, or -EFAULT if the associated
581 * memory access caused an access fault. Return zero on success.
582 *
583 * There are a couple of ways to do this, either "decode" the instruction
584 * or directly match lots of bits. In this case, matching lots of
585 * bits is faster and easier.
586 *
587 */
588#define INST_MFSPR_PVR 0x7c1f42a6
589#define INST_MFSPR_PVR_MASK 0xfc1fffff
590
591#define INST_DCBA 0x7c0005ec
592#define INST_DCBA_MASK 0x7c0007fe
593
594#define INST_MCRXR 0x7c000400
595#define INST_MCRXR_MASK 0x7c0007fe
596
597#define INST_STRING 0x7c00042a
598#define INST_STRING_MASK 0x7c0007fe
599#define INST_STRING_GEN_MASK 0x7c00067e
600#define INST_LSWI 0x7c0004aa
601#define INST_LSWX 0x7c00042a
602#define INST_STSWI 0x7c0005aa
603#define INST_STSWX 0x7c00052a
604
605static int emulate_string_inst(struct pt_regs *regs, u32 instword)
606{
607 u8 rT = (instword >> 21) & 0x1f;
608 u8 rA = (instword >> 16) & 0x1f;
609 u8 NB_RB = (instword >> 11) & 0x1f;
610 u32 num_bytes;
611 unsigned long EA;
612 int pos = 0;
613
614 /* Early out if we are an invalid form of lswx */
615 if ((instword & INST_STRING_MASK) == INST_LSWX)
616 if ((rT == rA) || (rT == NB_RB))
617 return -EINVAL;
618
619 EA = (rA == 0) ? 0 : regs->gpr[rA];
620
621 switch (instword & INST_STRING_MASK) {
622 case INST_LSWX:
623 case INST_STSWX:
624 EA += NB_RB;
625 num_bytes = regs->xer & 0x7f;
626 break;
627 case INST_LSWI:
628 case INST_STSWI:
629 num_bytes = (NB_RB == 0) ? 32 : NB_RB;
630 break;
631 default:
632 return -EINVAL;
633 }
634
635 while (num_bytes != 0)
636 {
637 u8 val;
638 u32 shift = 8 * (3 - (pos & 0x3));
639
640 switch ((instword & INST_STRING_MASK)) {
641 case INST_LSWX:
642 case INST_LSWI:
643 if (get_user(val, (u8 __user *)EA))
644 return -EFAULT;
645 /* first time updating this reg,
646 * zero it out */
647 if (pos == 0)
648 regs->gpr[rT] = 0;
649 regs->gpr[rT] |= val << shift;
650 break;
651 case INST_STSWI:
652 case INST_STSWX:
653 val = regs->gpr[rT] >> shift;
654 if (put_user(val, (u8 __user *)EA))
655 return -EFAULT;
656 break;
657 }
658 /* move EA to next address */
659 EA += 1;
660 num_bytes--;
661
662 /* manage our position within the register */
663 if (++pos == 4) {
664 pos = 0;
665 if (++rT == 32)
666 rT = 0;
667 }
668 }
669
670 return 0;
671}
672
673static int emulate_instruction(struct pt_regs *regs)
674{
675 u32 instword;
676 u32 rd;
677
678 if (!user_mode(regs))
679 return -EINVAL;
680 CHECK_FULL_REGS(regs);
681
682 if (get_user(instword, (u32 __user *)(regs->nip)))
683 return -EFAULT;
684
685 /* Emulate the mfspr rD, PVR. */
686 if ((instword & INST_MFSPR_PVR_MASK) == INST_MFSPR_PVR) {
687 rd = (instword >> 21) & 0x1f;
688 regs->gpr[rd] = mfspr(SPRN_PVR);
689 return 0;
690 }
691
692 /* Emulating the dcba insn is just a no-op. */
693 if ((instword & INST_DCBA_MASK) == INST_DCBA)
694 return 0;
695
696 /* Emulate the mcrxr insn. */
697 if ((instword & INST_MCRXR_MASK) == INST_MCRXR) {
698 int shift = (instword >> 21) & 0x1c;
699 unsigned long msk = 0xf0000000UL >> shift;
700
701 regs->ccr = (regs->ccr & ~msk) | ((regs->xer >> shift) & msk);
702 regs->xer &= ~0xf0000000UL;
703 return 0;
704 }
705
706 /* Emulate load/store string insn. */
707 if ((instword & INST_STRING_GEN_MASK) == INST_STRING)
708 return emulate_string_inst(regs, instword);
709
710 return -EINVAL;
711}
712
713/*
714 * Look through the list of trap instructions that are used for BUG(),
715 * BUG_ON() and WARN_ON() and see if we hit one. At this point we know
716 * that the exception was caused by a trap instruction of some kind.
717 * Returns 1 if we should continue (i.e. it was a WARN_ON) or 0
718 * otherwise.
719 */
720extern struct bug_entry __start___bug_table[], __stop___bug_table[];
721
722#ifndef CONFIG_MODULES
723#define module_find_bug(x) NULL
724#endif
725
726struct bug_entry *find_bug(unsigned long bugaddr)
727{
728 struct bug_entry *bug;
729
730 for (bug = __start___bug_table; bug < __stop___bug_table; ++bug)
731 if (bugaddr == bug->bug_addr)
732 return bug;
733 return module_find_bug(bugaddr);
734}
735
736static int check_bug_trap(struct pt_regs *regs)
737{
738 struct bug_entry *bug;
739 unsigned long addr;
740
741 if (regs->msr & MSR_PR)
742 return 0; /* not in kernel */
743 addr = regs->nip; /* address of trap instruction */
744 if (addr < PAGE_OFFSET)
745 return 0;
746 bug = find_bug(regs->nip);
747 if (bug == NULL)
748 return 0;
749 if (bug->line & BUG_WARNING_TRAP) {
750 /* this is a WARN_ON rather than BUG/BUG_ON */
751#ifdef CONFIG_XMON
752 xmon_printf(KERN_ERR "Badness in %s at %s:%d\n",
753 bug->function, bug->file,
754 bug->line & ~BUG_WARNING_TRAP);
755#endif /* CONFIG_XMON */
756 printk(KERN_ERR "Badness in %s at %s:%d\n",
757 bug->function, bug->file,
758 bug->line & ~BUG_WARNING_TRAP);
759 dump_stack();
760 return 1;
761 }
762#ifdef CONFIG_XMON
763 xmon_printf(KERN_CRIT "kernel BUG in %s at %s:%d!\n",
764 bug->function, bug->file, bug->line);
765 xmon(regs);
766#endif /* CONFIG_XMON */
767 printk(KERN_CRIT "kernel BUG in %s at %s:%d!\n",
768 bug->function, bug->file, bug->line);
769
770 return 0;
771}
772
773void __kprobes program_check_exception(struct pt_regs *regs)
774{
775 unsigned int reason = get_reason(regs);
776 extern int do_mathemu(struct pt_regs *regs);
777
778#ifdef CONFIG_MATH_EMULATION
779 /* (reason & REASON_ILLEGAL) would be the obvious thing here,
780 * but there seems to be a hardware bug on the 405GP (RevD)
781 * that means ESR is sometimes set incorrectly - either to
782 * ESR_DST (!?) or 0. In the process of chasing this with the
783 * hardware people - not sure if it can happen on any illegal
784 * instruction or only on FP instructions, whether there is a
785 * pattern to occurences etc. -dgibson 31/Mar/2003 */
786 if (!(reason & REASON_TRAP) && do_mathemu(regs) == 0) {
787 emulate_single_step(regs);
788 return;
789 }
790#endif /* CONFIG_MATH_EMULATION */
791
792 if (reason & REASON_FP) {
793 /* IEEE FP exception */
794 parse_fpe(regs);
795 return;
796 }
797 if (reason & REASON_TRAP) {
798 /* trap exception */
799 if (notify_die(DIE_BPT, "breakpoint", regs, 5, 5, SIGTRAP)
800 == NOTIFY_STOP)
801 return;
802 if (debugger_bpt(regs))
803 return;
804 if (check_bug_trap(regs)) {
805 regs->nip += 4;
806 return;
807 }
808 _exception(SIGTRAP, regs, TRAP_BRKPT, regs->nip);
809 return;
810 }
811
812 /* Try to emulate it if we should. */
813 if (reason & (REASON_ILLEGAL | REASON_PRIVILEGED)) {
814 switch (emulate_instruction(regs)) {
815 case 0:
816 regs->nip += 4;
817 emulate_single_step(regs);
818 return;
819 case -EFAULT:
820 _exception(SIGSEGV, regs, SEGV_MAPERR, regs->nip);
821 return;
822 }
823 }
824
825 if (reason & REASON_PRIVILEGED)
826 _exception(SIGILL, regs, ILL_PRVOPC, regs->nip);
827 else
828 _exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
829}
830
831void alignment_exception(struct pt_regs *regs)
832{
833 int fixed;
834
835 fixed = fix_alignment(regs);
836
837 if (fixed == 1) {
838 regs->nip += 4; /* skip over emulated instruction */
839 emulate_single_step(regs);
840 return;
841 }
842
843 /* Operand address was bad */
844 if (fixed == -EFAULT) {
845 if (user_mode(regs))
846 _exception(SIGSEGV, regs, SEGV_ACCERR, regs->dar);
847 else
848 /* Search exception table */
849 bad_page_fault(regs, regs->dar, SIGSEGV);
850 return;
851 }
852 _exception(SIGBUS, regs, BUS_ADRALN, regs->dar);
853}
854
855void StackOverflow(struct pt_regs *regs)
856{
857 printk(KERN_CRIT "Kernel stack overflow in process %p, r1=%lx\n",
858 current, regs->gpr[1]);
859 debugger(regs);
860 show_regs(regs);
861 panic("kernel stack overflow");
862}
863
864void nonrecoverable_exception(struct pt_regs *regs)
865{
866 printk(KERN_ERR "Non-recoverable exception at PC=%lx MSR=%lx\n",
867 regs->nip, regs->msr);
868 debugger(regs);
869 die("nonrecoverable exception", regs, SIGKILL);
870}
871
872void trace_syscall(struct pt_regs *regs)
873{
874 printk("Task: %p(%d), PC: %08lX/%08lX, Syscall: %3ld, Result: %s%ld %s\n",
875 current, current->pid, regs->nip, regs->link, regs->gpr[0],
876 regs->ccr&0x10000000?"Error=":"", regs->gpr[3], print_tainted());
877}
878
879void kernel_fp_unavailable_exception(struct pt_regs *regs)
880{
881 printk(KERN_EMERG "Unrecoverable FP Unavailable Exception "
882 "%lx at %lx\n", regs->trap, regs->nip);
883 die("Unrecoverable FP Unavailable Exception", regs, SIGABRT);
884}
885
886void altivec_unavailable_exception(struct pt_regs *regs)
887{
888#if !defined(CONFIG_ALTIVEC)
889 if (user_mode(regs)) {
890 /* A user program has executed an altivec instruction,
891 but this kernel doesn't support altivec. */
892 _exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
893 return;
894 }
895#endif
896 printk(KERN_EMERG "Unrecoverable VMX/Altivec Unavailable Exception "
897 "%lx at %lx\n", regs->trap, regs->nip);
898 die("Unrecoverable VMX/Altivec Unavailable Exception", regs, SIGABRT);
899}
900
901#ifdef CONFIG_PPC64
902extern perf_irq_t perf_irq;
903#endif
904
905#if defined(CONFIG_PPC64) || defined(CONFIG_E500)
906void performance_monitor_exception(struct pt_regs *regs)
907{
908 perf_irq(regs);
909}
910#endif
911
912#ifdef CONFIG_8xx
913void SoftwareEmulation(struct pt_regs *regs)
914{
915 extern int do_mathemu(struct pt_regs *);
916 extern int Soft_emulate_8xx(struct pt_regs *);
917 int errcode;
918
919 CHECK_FULL_REGS(regs);
920
921 if (!user_mode(regs)) {
922 debugger(regs);
923 die("Kernel Mode Software FPU Emulation", regs, SIGFPE);
924 }
925
926#ifdef CONFIG_MATH_EMULATION
927 errcode = do_mathemu(regs);
928#else
929 errcode = Soft_emulate_8xx(regs);
930#endif
931 if (errcode) {
932 if (errcode > 0)
933 _exception(SIGFPE, regs, 0, 0);
934 else if (errcode == -EFAULT)
935 _exception(SIGSEGV, regs, 0, 0);
936 else
937 _exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
938 } else
939 emulate_single_step(regs);
940}
941#endif /* CONFIG_8xx */
942
943#if defined(CONFIG_40x) || defined(CONFIG_BOOKE)
944
945void DebugException(struct pt_regs *regs, unsigned long debug_status)
946{
947 if (debug_status & DBSR_IC) { /* instruction completion */
948 regs->msr &= ~MSR_DE;
949 if (user_mode(regs)) {
950 current->thread.dbcr0 &= ~DBCR0_IC;
951 } else {
952 /* Disable instruction completion */
953 mtspr(SPRN_DBCR0, mfspr(SPRN_DBCR0) & ~DBCR0_IC);
954 /* Clear the instruction completion event */
955 mtspr(SPRN_DBSR, DBSR_IC);
956 if (debugger_sstep(regs))
957 return;
958 }
959 _exception(SIGTRAP, regs, TRAP_TRACE, 0);
960 }
961}
962#endif /* CONFIG_4xx || CONFIG_BOOKE */
963
964#if !defined(CONFIG_TAU_INT)
965void TAUException(struct pt_regs *regs)
966{
967 printk("TAU trap at PC: %lx, MSR: %lx, vector=%lx %s\n",
968 regs->nip, regs->msr, regs->trap, print_tainted());
969}
970#endif /* CONFIG_INT_TAU */
971
972#ifdef CONFIG_ALTIVEC
973void altivec_assist_exception(struct pt_regs *regs)
974{
975 int err;
976
977 if (!user_mode(regs)) {
978 printk(KERN_EMERG "VMX/Altivec assist exception in kernel mode"
979 " at %lx\n", regs->nip);
980 die("Kernel VMX/Altivec assist exception", regs, SIGILL);
981 }
982
983 flush_altivec_to_thread(current);
984
985 err = emulate_altivec(regs);
986 if (err == 0) {
987 regs->nip += 4; /* skip emulated instruction */
988 emulate_single_step(regs);
989 return;
990 }
991
992 if (err == -EFAULT) {
993 /* got an error reading the instruction */
994 _exception(SIGSEGV, regs, SEGV_ACCERR, regs->nip);
995 } else {
996 /* didn't recognize the instruction */
997 /* XXX quick hack for now: set the non-Java bit in the VSCR */
998 if (printk_ratelimit())
999 printk(KERN_ERR "Unrecognized altivec instruction "
1000 "in %s at %lx\n", current->comm, regs->nip);
1001 current->thread.vscr.u[3] |= 0x10000;
1002 }
1003}
1004#endif /* CONFIG_ALTIVEC */
1005
1006#ifdef CONFIG_FSL_BOOKE
1007void CacheLockingException(struct pt_regs *regs, unsigned long address,
1008 unsigned long error_code)
1009{
1010 /* We treat cache locking instructions from the user
1011 * as priv ops, in the future we could try to do
1012 * something smarter
1013 */
1014 if (error_code & (ESR_DLK|ESR_ILK))
1015 _exception(SIGILL, regs, ILL_PRVOPC, regs->nip);
1016 return;
1017}
1018#endif /* CONFIG_FSL_BOOKE */
1019
1020#ifdef CONFIG_SPE
1021void SPEFloatingPointException(struct pt_regs *regs)
1022{
1023 unsigned long spefscr;
1024 int fpexc_mode;
1025 int code = 0;
1026
1027 spefscr = current->thread.spefscr;
1028 fpexc_mode = current->thread.fpexc_mode;
1029
1030 /* Hardware does not neccessarily set sticky
1031 * underflow/overflow/invalid flags */
1032 if ((spefscr & SPEFSCR_FOVF) && (fpexc_mode & PR_FP_EXC_OVF)) {
1033 code = FPE_FLTOVF;
1034 spefscr |= SPEFSCR_FOVFS;
1035 }
1036 else if ((spefscr & SPEFSCR_FUNF) && (fpexc_mode & PR_FP_EXC_UND)) {
1037 code = FPE_FLTUND;
1038 spefscr |= SPEFSCR_FUNFS;
1039 }
1040 else if ((spefscr & SPEFSCR_FDBZ) && (fpexc_mode & PR_FP_EXC_DIV))
1041 code = FPE_FLTDIV;
1042 else if ((spefscr & SPEFSCR_FINV) && (fpexc_mode & PR_FP_EXC_INV)) {
1043 code = FPE_FLTINV;
1044 spefscr |= SPEFSCR_FINVS;
1045 }
1046 else if ((spefscr & (SPEFSCR_FG | SPEFSCR_FX)) && (fpexc_mode & PR_FP_EXC_RES))
1047 code = FPE_FLTRES;
1048
1049 current->thread.spefscr = spefscr;
1050
1051 _exception(SIGFPE, regs, code, regs->nip);
1052 return;
1053}
1054#endif
1055
1056/*
1057 * We enter here if we get an unrecoverable exception, that is, one
1058 * that happened at a point where the RI (recoverable interrupt) bit
1059 * in the MSR is 0. This indicates that SRR0/1 are live, and that
1060 * we therefore lost state by taking this exception.
1061 */
1062void unrecoverable_exception(struct pt_regs *regs)
1063{
1064 printk(KERN_EMERG "Unrecoverable exception %lx at %lx\n",
1065 regs->trap, regs->nip);
1066 die("Unrecoverable exception", regs, SIGABRT);
1067}
1068
1069#ifdef CONFIG_BOOKE_WDT
1070/*
1071 * Default handler for a Watchdog exception,
1072 * spins until a reboot occurs
1073 */
1074void __attribute__ ((weak)) WatchdogHandler(struct pt_regs *regs)
1075{
1076 /* Generic WatchdogHandler, implement your own */
1077 mtspr(SPRN_TCR, mfspr(SPRN_TCR)&(~TCR_WIE));
1078 return;
1079}
1080
1081void WatchdogException(struct pt_regs *regs)
1082{
1083 printk (KERN_EMERG "PowerPC Book-E Watchdog Exception\n");
1084 WatchdogHandler(regs);
1085}
1086#endif
1087
1088/*
1089 * We enter here if we discover during exception entry that we are
1090 * running in supervisor mode with a userspace value in the stack pointer.
1091 */
1092void kernel_bad_stack(struct pt_regs *regs)
1093{
1094 printk(KERN_EMERG "Bad kernel stack pointer %lx at %lx\n",
1095 regs->gpr[1], regs->nip);
1096 die("Bad kernel stack pointer", regs, SIGABRT);
1097}
1098
1099void __init trap_init(void)
1100{
1101}
diff --git a/arch/ppc/kernel/vecemu.c b/arch/powerpc/kernel/vecemu.c
index 604d0947cb20..604d0947cb20 100644
--- a/arch/ppc/kernel/vecemu.c
+++ b/arch/powerpc/kernel/vecemu.c
diff --git a/arch/ppc64/kernel/vector.S b/arch/powerpc/kernel/vector.S
index b79d33e4001e..66b3d03c5fa5 100644
--- a/arch/ppc64/kernel/vector.S
+++ b/arch/powerpc/kernel/vector.S
@@ -1,11 +1,26 @@
1#include <linux/config.h>
1#include <asm/ppc_asm.h> 2#include <asm/ppc_asm.h>
2#include <asm/processor.h> 3#include <asm/reg.h>
3 4
4/* 5/*
5 * The routines below are in assembler so we can closely control the 6 * The routines below are in assembler so we can closely control the
6 * usage of floating-point registers. These routines must be called 7 * usage of floating-point registers. These routines must be called
7 * with preempt disabled. 8 * with preempt disabled.
8 */ 9 */
10#ifdef CONFIG_PPC32
11 .data
12fpzero:
13 .long 0
14fpone:
15 .long 0x3f800000 /* 1.0 in single-precision FP */
16fphalf:
17 .long 0x3f000000 /* 0.5 in single-precision FP */
18
19#define LDCONST(fr, name) \
20 lis r11,name@ha; \
21 lfs fr,name@l(r11)
22#else
23
9 .section ".toc","aw" 24 .section ".toc","aw"
10fpzero: 25fpzero:
11 .tc FD_0_0[TC],0 26 .tc FD_0_0[TC],0
@@ -14,32 +29,42 @@ fpone:
14fphalf: 29fphalf:
15 .tc FD_3fe00000_0[TC],0x3fe0000000000000 /* 0.5 */ 30 .tc FD_3fe00000_0[TC],0x3fe0000000000000 /* 0.5 */
16 31
32#define LDCONST(fr, name) \
33 lfd fr,name@toc(r2)
34#endif
35
17 .text 36 .text
18/* 37/*
19 * Internal routine to enable floating point and set FPSCR to 0. 38 * Internal routine to enable floating point and set FPSCR to 0.
20 * Don't call it from C; it doesn't use the normal calling convention. 39 * Don't call it from C; it doesn't use the normal calling convention.
21 */ 40 */
22fpenable: 41fpenable:
42#ifdef CONFIG_PPC32
43 stwu r1,-64(r1)
44#else
45 stdu r1,-64(r1)
46#endif
23 mfmsr r10 47 mfmsr r10
24 ori r11,r10,MSR_FP 48 ori r11,r10,MSR_FP
25 mtmsr r11 49 mtmsr r11
26 isync 50 isync
27 stfd fr31,-8(r1) 51 stfd fr0,24(r1)
28 stfd fr0,-16(r1) 52 stfd fr1,16(r1)
29 stfd fr1,-24(r1) 53 stfd fr31,8(r1)
54 LDCONST(fr1, fpzero)
30 mffs fr31 55 mffs fr31
31 lfd fr1,fpzero@toc(r2)
32 mtfsf 0xff,fr1 56 mtfsf 0xff,fr1
33 blr 57 blr
34 58
35fpdisable: 59fpdisable:
36 mtlr r12 60 mtlr r12
37 mtfsf 0xff,fr31 61 mtfsf 0xff,fr31
38 lfd fr1,-24(r1) 62 lfd fr31,8(r1)
39 lfd fr0,-16(r1) 63 lfd fr1,16(r1)
40 lfd fr31,-8(r1) 64 lfd fr0,24(r1)
41 mtmsr r10 65 mtmsr r10
42 isync 66 isync
67 addi r1,r1,64
43 blr 68 blr
44 69
45/* 70/*
@@ -82,7 +107,7 @@ _GLOBAL(vsubfp)
82_GLOBAL(vmaddfp) 107_GLOBAL(vmaddfp)
83 mflr r12 108 mflr r12
84 bl fpenable 109 bl fpenable
85 stfd fr2,-32(r1) 110 stfd fr2,32(r1)
86 li r0,4 111 li r0,4
87 mtctr r0 112 mtctr r0
88 li r7,0 113 li r7,0
@@ -93,7 +118,7 @@ _GLOBAL(vmaddfp)
93 stfsx fr0,r3,r7 118 stfsx fr0,r3,r7
94 addi r7,r7,4 119 addi r7,r7,4
95 bdnz 1b 120 bdnz 1b
96 lfd fr2,-32(r1) 121 lfd fr2,32(r1)
97 b fpdisable 122 b fpdisable
98 123
99/* 124/*
@@ -102,7 +127,7 @@ _GLOBAL(vmaddfp)
102_GLOBAL(vnmsubfp) 127_GLOBAL(vnmsubfp)
103 mflr r12 128 mflr r12
104 bl fpenable 129 bl fpenable
105 stfd fr2,-32(r1) 130 stfd fr2,32(r1)
106 li r0,4 131 li r0,4
107 mtctr r0 132 mtctr r0
108 li r7,0 133 li r7,0
@@ -113,7 +138,7 @@ _GLOBAL(vnmsubfp)
113 stfsx fr0,r3,r7 138 stfsx fr0,r3,r7
114 addi r7,r7,4 139 addi r7,r7,4
115 bdnz 1b 140 bdnz 1b
116 lfd fr2,-32(r1) 141 lfd fr2,32(r1)
117 b fpdisable 142 b fpdisable
118 143
119/* 144/*
@@ -124,7 +149,7 @@ _GLOBAL(vrefp)
124 mflr r12 149 mflr r12
125 bl fpenable 150 bl fpenable
126 li r0,4 151 li r0,4
127 lfd fr1,fpone@toc(r2) 152 LDCONST(fr1, fpone)
128 mtctr r0 153 mtctr r0
129 li r6,0 154 li r6,0
1301: lfsx fr0,r4,r6 1551: lfsx fr0,r4,r6
@@ -143,13 +168,13 @@ _GLOBAL(vrefp)
143_GLOBAL(vrsqrtefp) 168_GLOBAL(vrsqrtefp)
144 mflr r12 169 mflr r12
145 bl fpenable 170 bl fpenable
146 stfd fr2,-32(r1) 171 stfd fr2,32(r1)
147 stfd fr3,-40(r1) 172 stfd fr3,40(r1)
148 stfd fr4,-48(r1) 173 stfd fr4,48(r1)
149 stfd fr5,-56(r1) 174 stfd fr5,56(r1)
150 li r0,4 175 li r0,4
151 lfd fr4,fpone@toc(r2) 176 LDCONST(fr4, fpone)
152 lfd fr5,fphalf@toc(r2) 177 LDCONST(fr5, fphalf)
153 mtctr r0 178 mtctr r0
154 li r6,0 179 li r6,0
1551: lfsx fr0,r4,r6 1801: lfsx fr0,r4,r6
@@ -165,8 +190,8 @@ _GLOBAL(vrsqrtefp)
165 stfsx fr1,r3,r6 190 stfsx fr1,r3,r6
166 addi r6,r6,4 191 addi r6,r6,4
167 bdnz 1b 192 bdnz 1b
168 lfd fr5,-56(r1) 193 lfd fr5,56(r1)
169 lfd fr4,-48(r1) 194 lfd fr4,48(r1)
170 lfd fr3,-40(r1) 195 lfd fr3,40(r1)
171 lfd fr2,-32(r1) 196 lfd fr2,32(r1)
172 b fpdisable 197 b fpdisable
diff --git a/arch/ppc64/kernel/vio.c b/arch/powerpc/kernel/vio.c
index 0e555b7a6587..97082a4203ad 100644
--- a/arch/ppc64/kernel/vio.c
+++ b/arch/powerpc/kernel/vio.c
@@ -69,6 +69,16 @@ static int vio_bus_remove(struct device *dev)
69 return 1; 69 return 1;
70} 70}
71 71
72/* convert from struct device to struct vio_dev and pass to driver. */
73static void vio_bus_shutdown(struct device *dev)
74{
75 struct vio_dev *viodev = to_vio_dev(dev);
76 struct vio_driver *viodrv = to_vio_driver(dev->driver);
77
78 if (viodrv->shutdown)
79 viodrv->shutdown(viodev);
80}
81
72/** 82/**
73 * vio_register_driver: - Register a new vio driver 83 * vio_register_driver: - Register a new vio driver
74 * @drv: The vio_driver structure to be registered. 84 * @drv: The vio_driver structure to be registered.
@@ -76,13 +86,13 @@ static int vio_bus_remove(struct device *dev)
76int vio_register_driver(struct vio_driver *viodrv) 86int vio_register_driver(struct vio_driver *viodrv)
77{ 87{
78 printk(KERN_DEBUG "%s: driver %s registering\n", __FUNCTION__, 88 printk(KERN_DEBUG "%s: driver %s registering\n", __FUNCTION__,
79 viodrv->name); 89 viodrv->driver.name);
80 90
81 /* fill in 'struct driver' fields */ 91 /* fill in 'struct driver' fields */
82 viodrv->driver.name = viodrv->name;
83 viodrv->driver.bus = &vio_bus_type; 92 viodrv->driver.bus = &vio_bus_type;
84 viodrv->driver.probe = vio_bus_probe; 93 viodrv->driver.probe = vio_bus_probe;
85 viodrv->driver.remove = vio_bus_remove; 94 viodrv->driver.remove = vio_bus_remove;
95 viodrv->driver.shutdown = vio_bus_shutdown;
86 96
87 return driver_register(&viodrv->driver); 97 return driver_register(&viodrv->driver);
88} 98}
diff --git a/arch/powerpc/kernel/vmlinux.lds.S b/arch/powerpc/kernel/vmlinux.lds.S
new file mode 100644
index 000000000000..d4dfcfbce272
--- /dev/null
+++ b/arch/powerpc/kernel/vmlinux.lds.S
@@ -0,0 +1,279 @@
1#include <linux/config.h>
2#ifdef CONFIG_PPC64
3#include <asm/page.h>
4#else
5#define PAGE_SIZE 4096
6#endif
7#include <asm-generic/vmlinux.lds.h>
8
9#ifdef CONFIG_PPC64
10OUTPUT_ARCH(powerpc:common64)
11jiffies = jiffies_64;
12#else
13OUTPUT_ARCH(powerpc:common)
14jiffies = jiffies_64 + 4;
15#endif
16SECTIONS
17{
18 /* Sections to be discarded. */
19 /DISCARD/ : {
20 *(.exitcall.exit)
21 *(.exit.data)
22 }
23
24
25 /* Read-only sections, merged into text segment: */
26#ifdef CONFIG_PPC32
27 . = + SIZEOF_HEADERS;
28 .interp : { *(.interp) }
29 .hash : { *(.hash) }
30 .dynsym : { *(.dynsym) }
31 .dynstr : { *(.dynstr) }
32 .rel.text : { *(.rel.text) }
33 .rela.text : { *(.rela.text) }
34 .rel.data : { *(.rel.data) }
35 .rela.data : { *(.rela.data) }
36 .rel.rodata : { *(.rel.rodata) }
37 .rela.rodata : { *(.rela.rodata) }
38 .rel.got : { *(.rel.got) }
39 .rela.got : { *(.rela.got) }
40 .rel.ctors : { *(.rel.ctors) }
41 .rela.ctors : { *(.rela.ctors) }
42 .rel.dtors : { *(.rel.dtors) }
43 .rela.dtors : { *(.rela.dtors) }
44 .rel.bss : { *(.rel.bss) }
45 .rela.bss : { *(.rela.bss) }
46 .rel.plt : { *(.rel.plt) }
47 .rela.plt : { *(.rela.plt) }
48/* .init : { *(.init) } =0*/
49 .plt : { *(.plt) }
50#endif
51 .text : {
52 *(.text .text.*)
53 SCHED_TEXT
54 LOCK_TEXT
55 KPROBES_TEXT
56 *(.fixup)
57#ifdef CONFIG_PPC32
58 *(.got1)
59 __got2_start = .;
60 *(.got2)
61 __got2_end = .;
62#else
63 . = ALIGN(PAGE_SIZE);
64 _etext = .;
65#endif
66 }
67#ifdef CONFIG_PPC32
68 _etext = .;
69 PROVIDE (etext = .);
70
71 RODATA
72 .fini : { *(.fini) } =0
73 .ctors : { *(.ctors) }
74 .dtors : { *(.dtors) }
75
76 .fixup : { *(.fixup) }
77#endif
78
79 __ex_table : {
80 __start___ex_table = .;
81 *(__ex_table)
82 __stop___ex_table = .;
83 }
84
85 __bug_table : {
86 __start___bug_table = .;
87 *(__bug_table)
88 __stop___bug_table = .;
89 }
90
91#ifdef CONFIG_PPC64
92 __ftr_fixup : {
93 __start___ftr_fixup = .;
94 *(__ftr_fixup)
95 __stop___ftr_fixup = .;
96 }
97
98 RODATA
99#endif
100
101#ifdef CONFIG_PPC32
102 /* Read-write section, merged into data segment: */
103 . = ALIGN(PAGE_SIZE);
104 _sdata = .;
105 .data :
106 {
107 *(.data)
108 *(.data1)
109 *(.sdata)
110 *(.sdata2)
111 *(.got.plt) *(.got)
112 *(.dynamic)
113 CONSTRUCTORS
114 }
115
116 . = ALIGN(PAGE_SIZE);
117 __nosave_begin = .;
118 .data_nosave : { *(.data.nosave) }
119 . = ALIGN(PAGE_SIZE);
120 __nosave_end = .;
121
122 . = ALIGN(32);
123 .data.cacheline_aligned : { *(.data.cacheline_aligned) }
124
125 _edata = .;
126 PROVIDE (edata = .);
127
128 . = ALIGN(8192);
129 .data.init_task : { *(.data.init_task) }
130#endif
131
132 /* will be freed after init */
133 . = ALIGN(PAGE_SIZE);
134 __init_begin = .;
135 .init.text : {
136 _sinittext = .;
137 *(.init.text)
138 _einittext = .;
139 }
140#ifdef CONFIG_PPC32
141 /* .exit.text is discarded at runtime, not link time,
142 to deal with references from __bug_table */
143 .exit.text : { *(.exit.text) }
144#endif
145 .init.data : {
146 *(.init.data);
147 __vtop_table_begin = .;
148 *(.vtop_fixup);
149 __vtop_table_end = .;
150 __ptov_table_begin = .;
151 *(.ptov_fixup);
152 __ptov_table_end = .;
153 }
154
155 . = ALIGN(16);
156 .init.setup : {
157 __setup_start = .;
158 *(.init.setup)
159 __setup_end = .;
160 }
161
162 .initcall.init : {
163 __initcall_start = .;
164 *(.initcall1.init)
165 *(.initcall2.init)
166 *(.initcall3.init)
167 *(.initcall4.init)
168 *(.initcall5.init)
169 *(.initcall6.init)
170 *(.initcall7.init)
171 __initcall_end = .;
172 }
173
174 .con_initcall.init : {
175 __con_initcall_start = .;
176 *(.con_initcall.init)
177 __con_initcall_end = .;
178 }
179
180 SECURITY_INIT
181
182#ifdef CONFIG_PPC32
183 __start___ftr_fixup = .;
184 __ftr_fixup : { *(__ftr_fixup) }
185 __stop___ftr_fixup = .;
186#else
187 . = ALIGN(PAGE_SIZE);
188 .init.ramfs : {
189 __initramfs_start = .;
190 *(.init.ramfs)
191 __initramfs_end = .;
192 }
193#endif
194
195#ifdef CONFIG_PPC32
196 . = ALIGN(32);
197#endif
198 .data.percpu : {
199 __per_cpu_start = .;
200 *(.data.percpu)
201 __per_cpu_end = .;
202 }
203
204 . = ALIGN(PAGE_SIZE);
205#ifdef CONFIG_PPC64
206 . = ALIGN(16384);
207 __init_end = .;
208 /* freed after init ends here */
209
210 /* Read/write sections */
211 . = ALIGN(PAGE_SIZE);
212 . = ALIGN(16384);
213 _sdata = .;
214 /* The initial task and kernel stack */
215 .data.init_task : {
216 *(.data.init_task)
217 }
218
219 . = ALIGN(PAGE_SIZE);
220 .data.page_aligned : {
221 *(.data.page_aligned)
222 }
223
224 .data.cacheline_aligned : {
225 *(.data.cacheline_aligned)
226 }
227
228 .data : {
229 *(.data .data.rel* .toc1)
230 *(.branch_lt)
231 }
232
233 .opd : {
234 *(.opd)
235 }
236
237 .got : {
238 __toc_start = .;
239 *(.got)
240 *(.toc)
241 . = ALIGN(PAGE_SIZE);
242 _edata = .;
243 }
244
245 . = ALIGN(PAGE_SIZE);
246#else
247 __initramfs_start = .;
248 .init.ramfs : {
249 *(.init.ramfs)
250 }
251 __initramfs_end = .;
252
253 . = ALIGN(4096);
254 __init_end = .;
255
256 . = ALIGN(4096);
257 _sextratext = .;
258 _eextratext = .;
259
260 __bss_start = .;
261#endif
262
263 .bss : {
264 __bss_start = .;
265 *(.sbss) *(.scommon)
266 *(.dynbss)
267 *(.bss)
268 *(COMMON)
269 __bss_stop = .;
270 }
271
272#ifdef CONFIG_PPC64
273 . = ALIGN(PAGE_SIZE);
274#endif
275 _end = . ;
276#ifdef CONFIG_PPC32
277 PROVIDE (end = .);
278#endif
279}
diff --git a/arch/powerpc/lib/Makefile b/arch/powerpc/lib/Makefile
new file mode 100644
index 000000000000..e6b2be3bcec1
--- /dev/null
+++ b/arch/powerpc/lib/Makefile
@@ -0,0 +1,19 @@
1#
2# Makefile for ppc-specific library files..
3#
4
5ifeq ($(CONFIG_PPC_MERGE),y)
6obj-y := string.o
7endif
8
9obj-y += strcase.o
10obj-$(CONFIG_PPC32) += div64.o copy_32.o checksum_32.o
11obj-$(CONFIG_PPC64) += checksum_64.o copypage_64.o copyuser_64.o \
12 memcpy_64.o usercopy_64.o mem_64.o
13obj-$(CONFIG_PPC_ISERIES) += e2a.o
14obj-$(CONFIG_XMON) += sstep.o
15
16ifeq ($(CONFIG_PPC64),y)
17obj-$(CONFIG_SMP) += locks.o
18obj-$(CONFIG_DEBUG_KERNEL) += sstep.o
19endif
diff --git a/arch/powerpc/lib/checksum_32.S b/arch/powerpc/lib/checksum_32.S
new file mode 100644
index 000000000000..7874e8a80455
--- /dev/null
+++ b/arch/powerpc/lib/checksum_32.S
@@ -0,0 +1,225 @@
1/*
2 * This file contains assembly-language implementations
3 * of IP-style 1's complement checksum routines.
4 *
5 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
11 *
12 * Severely hacked about by Paul Mackerras (paulus@cs.anu.edu.au).
13 */
14
15#include <linux/sys.h>
16#include <asm/processor.h>
17#include <asm/errno.h>
18#include <asm/ppc_asm.h>
19
20 .text
21
22/*
23 * ip_fast_csum(buf, len) -- Optimized for IP header
24 * len is in words and is always >= 5.
25 */
26_GLOBAL(ip_fast_csum)
27 lwz r0,0(r3)
28 lwzu r5,4(r3)
29 addic. r4,r4,-2
30 addc r0,r0,r5
31 mtctr r4
32 blelr-
331: lwzu r4,4(r3)
34 adde r0,r0,r4
35 bdnz 1b
36 addze r0,r0 /* add in final carry */
37 rlwinm r3,r0,16,0,31 /* fold two halves together */
38 add r3,r0,r3
39 not r3,r3
40 srwi r3,r3,16
41 blr
42
43/*
44 * Compute checksum of TCP or UDP pseudo-header:
45 * csum_tcpudp_magic(saddr, daddr, len, proto, sum)
46 */
47_GLOBAL(csum_tcpudp_magic)
48 rlwimi r5,r6,16,0,15 /* put proto in upper half of len */
49 addc r0,r3,r4 /* add 4 32-bit words together */
50 adde r0,r0,r5
51 adde r0,r0,r7
52 addze r0,r0 /* add in final carry */
53 rlwinm r3,r0,16,0,31 /* fold two halves together */
54 add r3,r0,r3
55 not r3,r3
56 srwi r3,r3,16
57 blr
58
59/*
60 * computes the checksum of a memory block at buff, length len,
61 * and adds in "sum" (32-bit)
62 *
63 * csum_partial(buff, len, sum)
64 */
65_GLOBAL(csum_partial)
66 addic r0,r5,0
67 subi r3,r3,4
68 srwi. r6,r4,2
69 beq 3f /* if we're doing < 4 bytes */
70 andi. r5,r3,2 /* Align buffer to longword boundary */
71 beq+ 1f
72 lhz r5,4(r3) /* do 2 bytes to get aligned */
73 addi r3,r3,2
74 subi r4,r4,2
75 addc r0,r0,r5
76 srwi. r6,r4,2 /* # words to do */
77 beq 3f
781: mtctr r6
792: lwzu r5,4(r3) /* the bdnz has zero overhead, so it should */
80 adde r0,r0,r5 /* be unnecessary to unroll this loop */
81 bdnz 2b
82 andi. r4,r4,3
833: cmpwi 0,r4,2
84 blt+ 4f
85 lhz r5,4(r3)
86 addi r3,r3,2
87 subi r4,r4,2
88 adde r0,r0,r5
894: cmpwi 0,r4,1
90 bne+ 5f
91 lbz r5,4(r3)
92 slwi r5,r5,8 /* Upper byte of word */
93 adde r0,r0,r5
945: addze r3,r0 /* add in final carry */
95 blr
96
97/*
98 * Computes the checksum of a memory block at src, length len,
99 * and adds in "sum" (32-bit), while copying the block to dst.
100 * If an access exception occurs on src or dst, it stores -EFAULT
101 * to *src_err or *dst_err respectively, and (for an error on
102 * src) zeroes the rest of dst.
103 *
104 * csum_partial_copy_generic(src, dst, len, sum, src_err, dst_err)
105 */
106_GLOBAL(csum_partial_copy_generic)
107 addic r0,r6,0
108 subi r3,r3,4
109 subi r4,r4,4
110 srwi. r6,r5,2
111 beq 3f /* if we're doing < 4 bytes */
112 andi. r9,r4,2 /* Align dst to longword boundary */
113 beq+ 1f
11481: lhz r6,4(r3) /* do 2 bytes to get aligned */
115 addi r3,r3,2
116 subi r5,r5,2
11791: sth r6,4(r4)
118 addi r4,r4,2
119 addc r0,r0,r6
120 srwi. r6,r5,2 /* # words to do */
121 beq 3f
1221: srwi. r6,r5,4 /* # groups of 4 words to do */
123 beq 10f
124 mtctr r6
12571: lwz r6,4(r3)
12672: lwz r9,8(r3)
12773: lwz r10,12(r3)
12874: lwzu r11,16(r3)
129 adde r0,r0,r6
13075: stw r6,4(r4)
131 adde r0,r0,r9
13276: stw r9,8(r4)
133 adde r0,r0,r10
13477: stw r10,12(r4)
135 adde r0,r0,r11
13678: stwu r11,16(r4)
137 bdnz 71b
13810: rlwinm. r6,r5,30,30,31 /* # words left to do */
139 beq 13f
140 mtctr r6
14182: lwzu r9,4(r3)
14292: stwu r9,4(r4)
143 adde r0,r0,r9
144 bdnz 82b
14513: andi. r5,r5,3
1463: cmpwi 0,r5,2
147 blt+ 4f
14883: lhz r6,4(r3)
149 addi r3,r3,2
150 subi r5,r5,2
15193: sth r6,4(r4)
152 addi r4,r4,2
153 adde r0,r0,r6
1544: cmpwi 0,r5,1
155 bne+ 5f
15684: lbz r6,4(r3)
15794: stb r6,4(r4)
158 slwi r6,r6,8 /* Upper byte of word */
159 adde r0,r0,r6
1605: addze r3,r0 /* add in final carry */
161 blr
162
163/* These shouldn't go in the fixup section, since that would
164 cause the ex_table addresses to get out of order. */
165
166src_error_4:
167 mfctr r6 /* update # bytes remaining from ctr */
168 rlwimi r5,r6,4,0,27
169 b 79f
170src_error_1:
171 li r6,0
172 subi r5,r5,2
17395: sth r6,4(r4)
174 addi r4,r4,2
17579: srwi. r6,r5,2
176 beq 3f
177 mtctr r6
178src_error_2:
179 li r6,0
18096: stwu r6,4(r4)
181 bdnz 96b
1823: andi. r5,r5,3
183 beq src_error
184src_error_3:
185 li r6,0
186 mtctr r5
187 addi r4,r4,3
18897: stbu r6,1(r4)
189 bdnz 97b
190src_error:
191 cmpwi 0,r7,0
192 beq 1f
193 li r6,-EFAULT
194 stw r6,0(r7)
1951: addze r3,r0
196 blr
197
198dst_error:
199 cmpwi 0,r8,0
200 beq 1f
201 li r6,-EFAULT
202 stw r6,0(r8)
2031: addze r3,r0
204 blr
205
206.section __ex_table,"a"
207 .long 81b,src_error_1
208 .long 91b,dst_error
209 .long 71b,src_error_4
210 .long 72b,src_error_4
211 .long 73b,src_error_4
212 .long 74b,src_error_4
213 .long 75b,dst_error
214 .long 76b,dst_error
215 .long 77b,dst_error
216 .long 78b,dst_error
217 .long 82b,src_error_2
218 .long 92b,dst_error
219 .long 83b,src_error_3
220 .long 93b,dst_error
221 .long 84b,src_error_3
222 .long 94b,dst_error
223 .long 95b,dst_error
224 .long 96b,dst_error
225 .long 97b,dst_error
diff --git a/arch/ppc64/lib/checksum.S b/arch/powerpc/lib/checksum_64.S
index ef96c6c58efc..ef96c6c58efc 100644
--- a/arch/ppc64/lib/checksum.S
+++ b/arch/powerpc/lib/checksum_64.S
diff --git a/arch/powerpc/lib/copy_32.S b/arch/powerpc/lib/copy_32.S
new file mode 100644
index 000000000000..bee51414812e
--- /dev/null
+++ b/arch/powerpc/lib/copy_32.S
@@ -0,0 +1,543 @@
1/*
2 * Memory copy functions for 32-bit PowerPC.
3 *
4 * Copyright (C) 1996-2005 Paul Mackerras.
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11#include <linux/config.h>
12#include <asm/processor.h>
13#include <asm/cache.h>
14#include <asm/errno.h>
15#include <asm/ppc_asm.h>
16
17#define COPY_16_BYTES \
18 lwz r7,4(r4); \
19 lwz r8,8(r4); \
20 lwz r9,12(r4); \
21 lwzu r10,16(r4); \
22 stw r7,4(r6); \
23 stw r8,8(r6); \
24 stw r9,12(r6); \
25 stwu r10,16(r6)
26
27#define COPY_16_BYTES_WITHEX(n) \
288 ## n ## 0: \
29 lwz r7,4(r4); \
308 ## n ## 1: \
31 lwz r8,8(r4); \
328 ## n ## 2: \
33 lwz r9,12(r4); \
348 ## n ## 3: \
35 lwzu r10,16(r4); \
368 ## n ## 4: \
37 stw r7,4(r6); \
388 ## n ## 5: \
39 stw r8,8(r6); \
408 ## n ## 6: \
41 stw r9,12(r6); \
428 ## n ## 7: \
43 stwu r10,16(r6)
44
45#define COPY_16_BYTES_EXCODE(n) \
469 ## n ## 0: \
47 addi r5,r5,-(16 * n); \
48 b 104f; \
499 ## n ## 1: \
50 addi r5,r5,-(16 * n); \
51 b 105f; \
52.section __ex_table,"a"; \
53 .align 2; \
54 .long 8 ## n ## 0b,9 ## n ## 0b; \
55 .long 8 ## n ## 1b,9 ## n ## 0b; \
56 .long 8 ## n ## 2b,9 ## n ## 0b; \
57 .long 8 ## n ## 3b,9 ## n ## 0b; \
58 .long 8 ## n ## 4b,9 ## n ## 1b; \
59 .long 8 ## n ## 5b,9 ## n ## 1b; \
60 .long 8 ## n ## 6b,9 ## n ## 1b; \
61 .long 8 ## n ## 7b,9 ## n ## 1b; \
62 .text
63
64 .text
65 .stabs "arch/powerpc/lib/",N_SO,0,0,0f
66 .stabs "copy32.S",N_SO,0,0,0f
670:
68
69CACHELINE_BYTES = L1_CACHE_BYTES
70LG_CACHELINE_BYTES = L1_CACHE_SHIFT
71CACHELINE_MASK = (L1_CACHE_BYTES-1)
72
73/*
74 * Use dcbz on the complete cache lines in the destination
75 * to set them to zero. This requires that the destination
76 * area is cacheable. -- paulus
77 */
78_GLOBAL(cacheable_memzero)
79 mr r5,r4
80 li r4,0
81 addi r6,r3,-4
82 cmplwi 0,r5,4
83 blt 7f
84 stwu r4,4(r6)
85 beqlr
86 andi. r0,r6,3
87 add r5,r0,r5
88 subf r6,r0,r6
89 clrlwi r7,r6,32-LG_CACHELINE_BYTES
90 add r8,r7,r5
91 srwi r9,r8,LG_CACHELINE_BYTES
92 addic. r9,r9,-1 /* total number of complete cachelines */
93 ble 2f
94 xori r0,r7,CACHELINE_MASK & ~3
95 srwi. r0,r0,2
96 beq 3f
97 mtctr r0
984: stwu r4,4(r6)
99 bdnz 4b
1003: mtctr r9
101 li r7,4
102#if !defined(CONFIG_8xx)
10310: dcbz r7,r6
104#else
10510: stw r4, 4(r6)
106 stw r4, 8(r6)
107 stw r4, 12(r6)
108 stw r4, 16(r6)
109#if CACHE_LINE_SIZE >= 32
110 stw r4, 20(r6)
111 stw r4, 24(r6)
112 stw r4, 28(r6)
113 stw r4, 32(r6)
114#endif /* CACHE_LINE_SIZE */
115#endif
116 addi r6,r6,CACHELINE_BYTES
117 bdnz 10b
118 clrlwi r5,r8,32-LG_CACHELINE_BYTES
119 addi r5,r5,4
1202: srwi r0,r5,2
121 mtctr r0
122 bdz 6f
1231: stwu r4,4(r6)
124 bdnz 1b
1256: andi. r5,r5,3
1267: cmpwi 0,r5,0
127 beqlr
128 mtctr r5
129 addi r6,r6,3
1308: stbu r4,1(r6)
131 bdnz 8b
132 blr
133
134_GLOBAL(memset)
135 rlwimi r4,r4,8,16,23
136 rlwimi r4,r4,16,0,15
137 addi r6,r3,-4
138 cmplwi 0,r5,4
139 blt 7f
140 stwu r4,4(r6)
141 beqlr
142 andi. r0,r6,3
143 add r5,r0,r5
144 subf r6,r0,r6
145 srwi r0,r5,2
146 mtctr r0
147 bdz 6f
1481: stwu r4,4(r6)
149 bdnz 1b
1506: andi. r5,r5,3
1517: cmpwi 0,r5,0
152 beqlr
153 mtctr r5
154 addi r6,r6,3
1558: stbu r4,1(r6)
156 bdnz 8b
157 blr
158
159/*
160 * This version uses dcbz on the complete cache lines in the
161 * destination area to reduce memory traffic. This requires that
162 * the destination area is cacheable.
163 * We only use this version if the source and dest don't overlap.
164 * -- paulus.
165 */
166_GLOBAL(cacheable_memcpy)
167 add r7,r3,r5 /* test if the src & dst overlap */
168 add r8,r4,r5
169 cmplw 0,r4,r7
170 cmplw 1,r3,r8
171 crand 0,0,4 /* cr0.lt &= cr1.lt */
172 blt memcpy /* if regions overlap */
173
174 addi r4,r4,-4
175 addi r6,r3,-4
176 neg r0,r3
177 andi. r0,r0,CACHELINE_MASK /* # bytes to start of cache line */
178 beq 58f
179
180 cmplw 0,r5,r0 /* is this more than total to do? */
181 blt 63f /* if not much to do */
182 andi. r8,r0,3 /* get it word-aligned first */
183 subf r5,r0,r5
184 mtctr r8
185 beq+ 61f
18670: lbz r9,4(r4) /* do some bytes */
187 stb r9,4(r6)
188 addi r4,r4,1
189 addi r6,r6,1
190 bdnz 70b
19161: srwi. r0,r0,2
192 mtctr r0
193 beq 58f
19472: lwzu r9,4(r4) /* do some words */
195 stwu r9,4(r6)
196 bdnz 72b
197
19858: srwi. r0,r5,LG_CACHELINE_BYTES /* # complete cachelines */
199 clrlwi r5,r5,32-LG_CACHELINE_BYTES
200 li r11,4
201 mtctr r0
202 beq 63f
20353:
204#if !defined(CONFIG_8xx)
205 dcbz r11,r6
206#endif
207 COPY_16_BYTES
208#if L1_CACHE_BYTES >= 32
209 COPY_16_BYTES
210#if L1_CACHE_BYTES >= 64
211 COPY_16_BYTES
212 COPY_16_BYTES
213#if L1_CACHE_BYTES >= 128
214 COPY_16_BYTES
215 COPY_16_BYTES
216 COPY_16_BYTES
217 COPY_16_BYTES
218#endif
219#endif
220#endif
221 bdnz 53b
222
22363: srwi. r0,r5,2
224 mtctr r0
225 beq 64f
22630: lwzu r0,4(r4)
227 stwu r0,4(r6)
228 bdnz 30b
229
23064: andi. r0,r5,3
231 mtctr r0
232 beq+ 65f
23340: lbz r0,4(r4)
234 stb r0,4(r6)
235 addi r4,r4,1
236 addi r6,r6,1
237 bdnz 40b
23865: blr
239
240_GLOBAL(memmove)
241 cmplw 0,r3,r4
242 bgt backwards_memcpy
243 /* fall through */
244
245_GLOBAL(memcpy)
246 srwi. r7,r5,3
247 addi r6,r3,-4
248 addi r4,r4,-4
249 beq 2f /* if less than 8 bytes to do */
250 andi. r0,r6,3 /* get dest word aligned */
251 mtctr r7
252 bne 5f
2531: lwz r7,4(r4)
254 lwzu r8,8(r4)
255 stw r7,4(r6)
256 stwu r8,8(r6)
257 bdnz 1b
258 andi. r5,r5,7
2592: cmplwi 0,r5,4
260 blt 3f
261 lwzu r0,4(r4)
262 addi r5,r5,-4
263 stwu r0,4(r6)
2643: cmpwi 0,r5,0
265 beqlr
266 mtctr r5
267 addi r4,r4,3
268 addi r6,r6,3
2694: lbzu r0,1(r4)
270 stbu r0,1(r6)
271 bdnz 4b
272 blr
2735: subfic r0,r0,4
274 mtctr r0
2756: lbz r7,4(r4)
276 addi r4,r4,1
277 stb r7,4(r6)
278 addi r6,r6,1
279 bdnz 6b
280 subf r5,r0,r5
281 rlwinm. r7,r5,32-3,3,31
282 beq 2b
283 mtctr r7
284 b 1b
285
286_GLOBAL(backwards_memcpy)
287 rlwinm. r7,r5,32-3,3,31 /* r0 = r5 >> 3 */
288 add r6,r3,r5
289 add r4,r4,r5
290 beq 2f
291 andi. r0,r6,3
292 mtctr r7
293 bne 5f
2941: lwz r7,-4(r4)
295 lwzu r8,-8(r4)
296 stw r7,-4(r6)
297 stwu r8,-8(r6)
298 bdnz 1b
299 andi. r5,r5,7
3002: cmplwi 0,r5,4
301 blt 3f
302 lwzu r0,-4(r4)
303 subi r5,r5,4
304 stwu r0,-4(r6)
3053: cmpwi 0,r5,0
306 beqlr
307 mtctr r5
3084: lbzu r0,-1(r4)
309 stbu r0,-1(r6)
310 bdnz 4b
311 blr
3125: mtctr r0
3136: lbzu r7,-1(r4)
314 stbu r7,-1(r6)
315 bdnz 6b
316 subf r5,r0,r5
317 rlwinm. r7,r5,32-3,3,31
318 beq 2b
319 mtctr r7
320 b 1b
321
322_GLOBAL(__copy_tofrom_user)
323 addi r4,r4,-4
324 addi r6,r3,-4
325 neg r0,r3
326 andi. r0,r0,CACHELINE_MASK /* # bytes to start of cache line */
327 beq 58f
328
329 cmplw 0,r5,r0 /* is this more than total to do? */
330 blt 63f /* if not much to do */
331 andi. r8,r0,3 /* get it word-aligned first */
332 mtctr r8
333 beq+ 61f
33470: lbz r9,4(r4) /* do some bytes */
33571: stb r9,4(r6)
336 addi r4,r4,1
337 addi r6,r6,1
338 bdnz 70b
33961: subf r5,r0,r5
340 srwi. r0,r0,2
341 mtctr r0
342 beq 58f
34372: lwzu r9,4(r4) /* do some words */
34473: stwu r9,4(r6)
345 bdnz 72b
346
347 .section __ex_table,"a"
348 .align 2
349 .long 70b,100f
350 .long 71b,101f
351 .long 72b,102f
352 .long 73b,103f
353 .text
354
35558: srwi. r0,r5,LG_CACHELINE_BYTES /* # complete cachelines */
356 clrlwi r5,r5,32-LG_CACHELINE_BYTES
357 li r11,4
358 beq 63f
359
360#ifdef CONFIG_8xx
361 /* Don't use prefetch on 8xx */
362 mtctr r0
363 li r0,0
36453: COPY_16_BYTES_WITHEX(0)
365 bdnz 53b
366
367#else /* not CONFIG_8xx */
368 /* Here we decide how far ahead to prefetch the source */
369 li r3,4
370 cmpwi r0,1
371 li r7,0
372 ble 114f
373 li r7,1
374#if MAX_COPY_PREFETCH > 1
375 /* Heuristically, for large transfers we prefetch
376 MAX_COPY_PREFETCH cachelines ahead. For small transfers
377 we prefetch 1 cacheline ahead. */
378 cmpwi r0,MAX_COPY_PREFETCH
379 ble 112f
380 li r7,MAX_COPY_PREFETCH
381112: mtctr r7
382111: dcbt r3,r4
383 addi r3,r3,CACHELINE_BYTES
384 bdnz 111b
385#else
386 dcbt r3,r4
387 addi r3,r3,CACHELINE_BYTES
388#endif /* MAX_COPY_PREFETCH > 1 */
389
390114: subf r8,r7,r0
391 mr r0,r7
392 mtctr r8
393
39453: dcbt r3,r4
39554: dcbz r11,r6
396 .section __ex_table,"a"
397 .align 2
398 .long 54b,105f
399 .text
400/* the main body of the cacheline loop */
401 COPY_16_BYTES_WITHEX(0)
402#if L1_CACHE_BYTES >= 32
403 COPY_16_BYTES_WITHEX(1)
404#if L1_CACHE_BYTES >= 64
405 COPY_16_BYTES_WITHEX(2)
406 COPY_16_BYTES_WITHEX(3)
407#if L1_CACHE_BYTES >= 128
408 COPY_16_BYTES_WITHEX(4)
409 COPY_16_BYTES_WITHEX(5)
410 COPY_16_BYTES_WITHEX(6)
411 COPY_16_BYTES_WITHEX(7)
412#endif
413#endif
414#endif
415 bdnz 53b
416 cmpwi r0,0
417 li r3,4
418 li r7,0
419 bne 114b
420#endif /* CONFIG_8xx */
421
42263: srwi. r0,r5,2
423 mtctr r0
424 beq 64f
42530: lwzu r0,4(r4)
42631: stwu r0,4(r6)
427 bdnz 30b
428
42964: andi. r0,r5,3
430 mtctr r0
431 beq+ 65f
43240: lbz r0,4(r4)
43341: stb r0,4(r6)
434 addi r4,r4,1
435 addi r6,r6,1
436 bdnz 40b
43765: li r3,0
438 blr
439
440/* read fault, initial single-byte copy */
441100: li r9,0
442 b 90f
443/* write fault, initial single-byte copy */
444101: li r9,1
44590: subf r5,r8,r5
446 li r3,0
447 b 99f
448/* read fault, initial word copy */
449102: li r9,0
450 b 91f
451/* write fault, initial word copy */
452103: li r9,1
45391: li r3,2
454 b 99f
455
456/*
457 * this stuff handles faults in the cacheline loop and branches to either
458 * 104f (if in read part) or 105f (if in write part), after updating r5
459 */
460 COPY_16_BYTES_EXCODE(0)
461#if L1_CACHE_BYTES >= 32
462 COPY_16_BYTES_EXCODE(1)
463#if L1_CACHE_BYTES >= 64
464 COPY_16_BYTES_EXCODE(2)
465 COPY_16_BYTES_EXCODE(3)
466#if L1_CACHE_BYTES >= 128
467 COPY_16_BYTES_EXCODE(4)
468 COPY_16_BYTES_EXCODE(5)
469 COPY_16_BYTES_EXCODE(6)
470 COPY_16_BYTES_EXCODE(7)
471#endif
472#endif
473#endif
474
475/* read fault in cacheline loop */
476104: li r9,0
477 b 92f
478/* fault on dcbz (effectively a write fault) */
479/* or write fault in cacheline loop */
480105: li r9,1
48192: li r3,LG_CACHELINE_BYTES
482 mfctr r8
483 add r0,r0,r8
484 b 106f
485/* read fault in final word loop */
486108: li r9,0
487 b 93f
488/* write fault in final word loop */
489109: li r9,1
49093: andi. r5,r5,3
491 li r3,2
492 b 99f
493/* read fault in final byte loop */
494110: li r9,0
495 b 94f
496/* write fault in final byte loop */
497111: li r9,1
49894: li r5,0
499 li r3,0
500/*
501 * At this stage the number of bytes not copied is
502 * r5 + (ctr << r3), and r9 is 0 for read or 1 for write.
503 */
50499: mfctr r0
505106: slw r3,r0,r3
506 add. r3,r3,r5
507 beq 120f /* shouldn't happen */
508 cmpwi 0,r9,0
509 bne 120f
510/* for a read fault, first try to continue the copy one byte at a time */
511 mtctr r3
512130: lbz r0,4(r4)
513131: stb r0,4(r6)
514 addi r4,r4,1
515 addi r6,r6,1
516 bdnz 130b
517/* then clear out the destination: r3 bytes starting at 4(r6) */
518132: mfctr r3
519 srwi. r0,r3,2
520 li r9,0
521 mtctr r0
522 beq 113f
523112: stwu r9,4(r6)
524 bdnz 112b
525113: andi. r0,r3,3
526 mtctr r0
527 beq 120f
528114: stb r9,4(r6)
529 addi r6,r6,1
530 bdnz 114b
531120: blr
532
533 .section __ex_table,"a"
534 .align 2
535 .long 30b,108b
536 .long 31b,109b
537 .long 40b,110b
538 .long 41b,111b
539 .long 130b,132b
540 .long 131b,120b
541 .long 112b,120b
542 .long 114b,120b
543 .text
diff --git a/arch/ppc64/lib/copypage.S b/arch/powerpc/lib/copypage_64.S
index 733d61618bbf..733d61618bbf 100644
--- a/arch/ppc64/lib/copypage.S
+++ b/arch/powerpc/lib/copypage_64.S
diff --git a/arch/ppc64/lib/copyuser.S b/arch/powerpc/lib/copyuser_64.S
index a0b3fbbd6fb1..a0b3fbbd6fb1 100644
--- a/arch/ppc64/lib/copyuser.S
+++ b/arch/powerpc/lib/copyuser_64.S
diff --git a/arch/powerpc/lib/div64.S b/arch/powerpc/lib/div64.S
new file mode 100644
index 000000000000..83d9832fd919
--- /dev/null
+++ b/arch/powerpc/lib/div64.S
@@ -0,0 +1,59 @@
1/*
2 * Divide a 64-bit unsigned number by a 32-bit unsigned number.
3 * This routine assumes that the top 32 bits of the dividend are
4 * non-zero to start with.
5 * On entry, r3 points to the dividend, which get overwritten with
6 * the 64-bit quotient, and r4 contains the divisor.
7 * On exit, r3 contains the remainder.
8 *
9 * Copyright (C) 2002 Paul Mackerras, IBM Corp.
10 *
11 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License
13 * as published by the Free Software Foundation; either version
14 * 2 of the License, or (at your option) any later version.
15 */
16#include <asm/ppc_asm.h>
17#include <asm/processor.h>
18
19_GLOBAL(__div64_32)
20 lwz r5,0(r3) # get the dividend into r5/r6
21 lwz r6,4(r3)
22 cmplw r5,r4
23 li r7,0
24 li r8,0
25 blt 1f
26 divwu r7,r5,r4 # if dividend.hi >= divisor,
27 mullw r0,r7,r4 # quotient.hi = dividend.hi / divisor
28 subf. r5,r0,r5 # dividend.hi %= divisor
29 beq 3f
301: mr r11,r5 # here dividend.hi != 0
31 andis. r0,r5,0xc000
32 bne 2f
33 cntlzw r0,r5 # we are shifting the dividend right
34 li r10,-1 # to make it < 2^32, and shifting
35 srw r10,r10,r0 # the divisor right the same amount,
36 addc r9,r4,r10 # rounding up (so the estimate cannot
37 andc r11,r6,r10 # ever be too large, only too small)
38 andc r9,r9,r10
39 addze r9,r9
40 or r11,r5,r11
41 rotlw r9,r9,r0
42 rotlw r11,r11,r0
43 divwu r11,r11,r9 # then we divide the shifted quantities
442: mullw r10,r11,r4 # to get an estimate of the quotient,
45 mulhwu r9,r11,r4 # multiply the estimate by the divisor,
46 subfc r6,r10,r6 # take the product from the divisor,
47 add r8,r8,r11 # and add the estimate to the accumulated
48 subfe. r5,r9,r5 # quotient
49 bne 1b
503: cmplw r6,r4
51 blt 4f
52 divwu r0,r6,r4 # perform the remaining 32-bit division
53 mullw r10,r0,r4 # and get the remainder
54 add r8,r8,r0
55 subf r6,r10,r6
564: stw r7,0(r3) # return the quotient in *r3
57 stw r8,4(r3)
58 mr r3,r6 # return the remainder in r3
59 blr
diff --git a/arch/ppc64/lib/e2a.c b/arch/powerpc/lib/e2a.c
index d2b834887920..d2b834887920 100644
--- a/arch/ppc64/lib/e2a.c
+++ b/arch/powerpc/lib/e2a.c
diff --git a/arch/ppc64/lib/locks.c b/arch/powerpc/lib/locks.c
index 033643ab69e0..3794715b2972 100644
--- a/arch/ppc64/lib/locks.c
+++ b/arch/powerpc/lib/locks.c
@@ -17,11 +17,12 @@
17#include <linux/spinlock.h> 17#include <linux/spinlock.h>
18#include <linux/module.h> 18#include <linux/module.h>
19#include <linux/stringify.h> 19#include <linux/stringify.h>
20#include <asm/hvcall.h> 20#include <linux/smp.h>
21#include <asm/iSeries/HvCall.h>
22 21
23/* waiting for a spinlock... */ 22/* waiting for a spinlock... */
24#if defined(CONFIG_PPC_SPLPAR) || defined(CONFIG_PPC_ISERIES) 23#if defined(CONFIG_PPC_SPLPAR) || defined(CONFIG_PPC_ISERIES)
24#include <asm/hvcall.h>
25#include <asm/iSeries/HvCall.h>
25 26
26void __spin_yield(raw_spinlock_t *lock) 27void __spin_yield(raw_spinlock_t *lock)
27{ 28{
diff --git a/arch/powerpc/lib/mem_64.S b/arch/powerpc/lib/mem_64.S
new file mode 100644
index 000000000000..68df20283ff5
--- /dev/null
+++ b/arch/powerpc/lib/mem_64.S
@@ -0,0 +1,119 @@
1/*
2 * String handling functions for PowerPC.
3 *
4 * Copyright (C) 1996 Paul Mackerras.
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11#include <asm/processor.h>
12#include <asm/errno.h>
13#include <asm/ppc_asm.h>
14
15_GLOBAL(memset)
16 neg r0,r3
17 rlwimi r4,r4,8,16,23
18 andi. r0,r0,7 /* # bytes to be 8-byte aligned */
19 rlwimi r4,r4,16,0,15
20 cmplw cr1,r5,r0 /* do we get that far? */
21 rldimi r4,r4,32,0
22 mtcrf 1,r0
23 mr r6,r3
24 blt cr1,8f
25 beq+ 3f /* if already 8-byte aligned */
26 subf r5,r0,r5
27 bf 31,1f
28 stb r4,0(r6)
29 addi r6,r6,1
301: bf 30,2f
31 sth r4,0(r6)
32 addi r6,r6,2
332: bf 29,3f
34 stw r4,0(r6)
35 addi r6,r6,4
363: srdi. r0,r5,6
37 clrldi r5,r5,58
38 mtctr r0
39 beq 5f
404: std r4,0(r6)
41 std r4,8(r6)
42 std r4,16(r6)
43 std r4,24(r6)
44 std r4,32(r6)
45 std r4,40(r6)
46 std r4,48(r6)
47 std r4,56(r6)
48 addi r6,r6,64
49 bdnz 4b
505: srwi. r0,r5,3
51 clrlwi r5,r5,29
52 mtcrf 1,r0
53 beq 8f
54 bf 29,6f
55 std r4,0(r6)
56 std r4,8(r6)
57 std r4,16(r6)
58 std r4,24(r6)
59 addi r6,r6,32
606: bf 30,7f
61 std r4,0(r6)
62 std r4,8(r6)
63 addi r6,r6,16
647: bf 31,8f
65 std r4,0(r6)
66 addi r6,r6,8
678: cmpwi r5,0
68 mtcrf 1,r5
69 beqlr+
70 bf 29,9f
71 stw r4,0(r6)
72 addi r6,r6,4
739: bf 30,10f
74 sth r4,0(r6)
75 addi r6,r6,2
7610: bflr 31
77 stb r4,0(r6)
78 blr
79
80_GLOBAL(memmove)
81 cmplw 0,r3,r4
82 bgt .backwards_memcpy
83 b .memcpy
84
85_GLOBAL(backwards_memcpy)
86 rlwinm. r7,r5,32-3,3,31 /* r0 = r5 >> 3 */
87 add r6,r3,r5
88 add r4,r4,r5
89 beq 2f
90 andi. r0,r6,3
91 mtctr r7
92 bne 5f
931: lwz r7,-4(r4)
94 lwzu r8,-8(r4)
95 stw r7,-4(r6)
96 stwu r8,-8(r6)
97 bdnz 1b
98 andi. r5,r5,7
992: cmplwi 0,r5,4
100 blt 3f
101 lwzu r0,-4(r4)
102 subi r5,r5,4
103 stwu r0,-4(r6)
1043: cmpwi 0,r5,0
105 beqlr
106 mtctr r5
1074: lbzu r0,-1(r4)
108 stbu r0,-1(r6)
109 bdnz 4b
110 blr
1115: mtctr r0
1126: lbzu r7,-1(r4)
113 stbu r7,-1(r6)
114 bdnz 6b
115 subf r5,r0,r5
116 rlwinm. r7,r5,32-3,3,31
117 beq 2b
118 mtctr r7
119 b 1b
diff --git a/arch/ppc64/lib/memcpy.S b/arch/powerpc/lib/memcpy_64.S
index 9ccacdf5bcb9..9ccacdf5bcb9 100644
--- a/arch/ppc64/lib/memcpy.S
+++ b/arch/powerpc/lib/memcpy_64.S
diff --git a/arch/powerpc/lib/rheap.c b/arch/powerpc/lib/rheap.c
new file mode 100644
index 000000000000..42c5de2c898f
--- /dev/null
+++ b/arch/powerpc/lib/rheap.c
@@ -0,0 +1,693 @@
1/*
2 * arch/ppc/syslib/rheap.c
3 *
4 * A Remote Heap. Remote means that we don't touch the memory that the
5 * heap points to. Normal heap implementations use the memory they manage
6 * to place their list. We cannot do that because the memory we manage may
7 * have special properties, for example it is uncachable or of different
8 * endianess.
9 *
10 * Author: Pantelis Antoniou <panto@intracom.gr>
11 *
12 * 2004 (c) INTRACOM S.A. Greece. This file is licensed under
13 * the terms of the GNU General Public License version 2. This program
14 * is licensed "as is" without any warranty of any kind, whether express
15 * or implied.
16 */
17#include <linux/types.h>
18#include <linux/errno.h>
19#include <linux/mm.h>
20#include <linux/slab.h>
21
22#include <asm/rheap.h>
23
24/*
25 * Fixup a list_head, needed when copying lists. If the pointers fall
26 * between s and e, apply the delta. This assumes that
27 * sizeof(struct list_head *) == sizeof(unsigned long *).
28 */
29static inline void fixup(unsigned long s, unsigned long e, int d,
30 struct list_head *l)
31{
32 unsigned long *pp;
33
34 pp = (unsigned long *)&l->next;
35 if (*pp >= s && *pp < e)
36 *pp += d;
37
38 pp = (unsigned long *)&l->prev;
39 if (*pp >= s && *pp < e)
40 *pp += d;
41}
42
43/* Grow the allocated blocks */
44static int grow(rh_info_t * info, int max_blocks)
45{
46 rh_block_t *block, *blk;
47 int i, new_blocks;
48 int delta;
49 unsigned long blks, blke;
50
51 if (max_blocks <= info->max_blocks)
52 return -EINVAL;
53
54 new_blocks = max_blocks - info->max_blocks;
55
56 block = kmalloc(sizeof(rh_block_t) * max_blocks, GFP_KERNEL);
57 if (block == NULL)
58 return -ENOMEM;
59
60 if (info->max_blocks > 0) {
61
62 /* copy old block area */
63 memcpy(block, info->block,
64 sizeof(rh_block_t) * info->max_blocks);
65
66 delta = (char *)block - (char *)info->block;
67
68 /* and fixup list pointers */
69 blks = (unsigned long)info->block;
70 blke = (unsigned long)(info->block + info->max_blocks);
71
72 for (i = 0, blk = block; i < info->max_blocks; i++, blk++)
73 fixup(blks, blke, delta, &blk->list);
74
75 fixup(blks, blke, delta, &info->empty_list);
76 fixup(blks, blke, delta, &info->free_list);
77 fixup(blks, blke, delta, &info->taken_list);
78
79 /* free the old allocated memory */
80 if ((info->flags & RHIF_STATIC_BLOCK) == 0)
81 kfree(info->block);
82 }
83
84 info->block = block;
85 info->empty_slots += new_blocks;
86 info->max_blocks = max_blocks;
87 info->flags &= ~RHIF_STATIC_BLOCK;
88
89 /* add all new blocks to the free list */
90 for (i = 0, blk = block + info->max_blocks; i < new_blocks; i++, blk++)
91 list_add(&blk->list, &info->empty_list);
92
93 return 0;
94}
95
96/*
97 * Assure at least the required amount of empty slots. If this function
98 * causes a grow in the block area then all pointers kept to the block
99 * area are invalid!
100 */
101static int assure_empty(rh_info_t * info, int slots)
102{
103 int max_blocks;
104
105 /* This function is not meant to be used to grow uncontrollably */
106 if (slots >= 4)
107 return -EINVAL;
108
109 /* Enough space */
110 if (info->empty_slots >= slots)
111 return 0;
112
113 /* Next 16 sized block */
114 max_blocks = ((info->max_blocks + slots) + 15) & ~15;
115
116 return grow(info, max_blocks);
117}
118
119static rh_block_t *get_slot(rh_info_t * info)
120{
121 rh_block_t *blk;
122
123 /* If no more free slots, and failure to extend. */
124 /* XXX: You should have called assure_empty before */
125 if (info->empty_slots == 0) {
126 printk(KERN_ERR "rh: out of slots; crash is imminent.\n");
127 return NULL;
128 }
129
130 /* Get empty slot to use */
131 blk = list_entry(info->empty_list.next, rh_block_t, list);
132 list_del_init(&blk->list);
133 info->empty_slots--;
134
135 /* Initialize */
136 blk->start = NULL;
137 blk->size = 0;
138 blk->owner = NULL;
139
140 return blk;
141}
142
143static inline void release_slot(rh_info_t * info, rh_block_t * blk)
144{
145 list_add(&blk->list, &info->empty_list);
146 info->empty_slots++;
147}
148
149static void attach_free_block(rh_info_t * info, rh_block_t * blkn)
150{
151 rh_block_t *blk;
152 rh_block_t *before;
153 rh_block_t *after;
154 rh_block_t *next;
155 int size;
156 unsigned long s, e, bs, be;
157 struct list_head *l;
158
159 /* We assume that they are aligned properly */
160 size = blkn->size;
161 s = (unsigned long)blkn->start;
162 e = s + size;
163
164 /* Find the blocks immediately before and after the given one
165 * (if any) */
166 before = NULL;
167 after = NULL;
168 next = NULL;
169
170 list_for_each(l, &info->free_list) {
171 blk = list_entry(l, rh_block_t, list);
172
173 bs = (unsigned long)blk->start;
174 be = bs + blk->size;
175
176 if (next == NULL && s >= bs)
177 next = blk;
178
179 if (be == s)
180 before = blk;
181
182 if (e == bs)
183 after = blk;
184
185 /* If both are not null, break now */
186 if (before != NULL && after != NULL)
187 break;
188 }
189
190 /* Now check if they are really adjacent */
191 if (before != NULL && s != (unsigned long)before->start + before->size)
192 before = NULL;
193
194 if (after != NULL && e != (unsigned long)after->start)
195 after = NULL;
196
197 /* No coalescing; list insert and return */
198 if (before == NULL && after == NULL) {
199
200 if (next != NULL)
201 list_add(&blkn->list, &next->list);
202 else
203 list_add(&blkn->list, &info->free_list);
204
205 return;
206 }
207
208 /* We don't need it anymore */
209 release_slot(info, blkn);
210
211 /* Grow the before block */
212 if (before != NULL && after == NULL) {
213 before->size += size;
214 return;
215 }
216
217 /* Grow the after block backwards */
218 if (before == NULL && after != NULL) {
219 after->start = (int8_t *)after->start - size;
220 after->size += size;
221 return;
222 }
223
224 /* Grow the before block, and release the after block */
225 before->size += size + after->size;
226 list_del(&after->list);
227 release_slot(info, after);
228}
229
230static void attach_taken_block(rh_info_t * info, rh_block_t * blkn)
231{
232 rh_block_t *blk;
233 struct list_head *l;
234
235 /* Find the block immediately before the given one (if any) */
236 list_for_each(l, &info->taken_list) {
237 blk = list_entry(l, rh_block_t, list);
238 if (blk->start > blkn->start) {
239 list_add_tail(&blkn->list, &blk->list);
240 return;
241 }
242 }
243
244 list_add_tail(&blkn->list, &info->taken_list);
245}
246
247/*
248 * Create a remote heap dynamically. Note that no memory for the blocks
249 * are allocated. It will upon the first allocation
250 */
251rh_info_t *rh_create(unsigned int alignment)
252{
253 rh_info_t *info;
254
255 /* Alignment must be a power of two */
256 if ((alignment & (alignment - 1)) != 0)
257 return ERR_PTR(-EINVAL);
258
259 info = kmalloc(sizeof(*info), GFP_KERNEL);
260 if (info == NULL)
261 return ERR_PTR(-ENOMEM);
262
263 info->alignment = alignment;
264
265 /* Initially everything as empty */
266 info->block = NULL;
267 info->max_blocks = 0;
268 info->empty_slots = 0;
269 info->flags = 0;
270
271 INIT_LIST_HEAD(&info->empty_list);
272 INIT_LIST_HEAD(&info->free_list);
273 INIT_LIST_HEAD(&info->taken_list);
274
275 return info;
276}
277
278/*
279 * Destroy a dynamically created remote heap. Deallocate only if the areas
280 * are not static
281 */
282void rh_destroy(rh_info_t * info)
283{
284 if ((info->flags & RHIF_STATIC_BLOCK) == 0 && info->block != NULL)
285 kfree(info->block);
286
287 if ((info->flags & RHIF_STATIC_INFO) == 0)
288 kfree(info);
289}
290
291/*
292 * Initialize in place a remote heap info block. This is needed to support
293 * operation very early in the startup of the kernel, when it is not yet safe
294 * to call kmalloc.
295 */
296void rh_init(rh_info_t * info, unsigned int alignment, int max_blocks,
297 rh_block_t * block)
298{
299 int i;
300 rh_block_t *blk;
301
302 /* Alignment must be a power of two */
303 if ((alignment & (alignment - 1)) != 0)
304 return;
305
306 info->alignment = alignment;
307
308 /* Initially everything as empty */
309 info->block = block;
310 info->max_blocks = max_blocks;
311 info->empty_slots = max_blocks;
312 info->flags = RHIF_STATIC_INFO | RHIF_STATIC_BLOCK;
313
314 INIT_LIST_HEAD(&info->empty_list);
315 INIT_LIST_HEAD(&info->free_list);
316 INIT_LIST_HEAD(&info->taken_list);
317
318 /* Add all new blocks to the free list */
319 for (i = 0, blk = block; i < max_blocks; i++, blk++)
320 list_add(&blk->list, &info->empty_list);
321}
322
323/* Attach a free memory region, coalesces regions if adjuscent */
324int rh_attach_region(rh_info_t * info, void *start, int size)
325{
326 rh_block_t *blk;
327 unsigned long s, e, m;
328 int r;
329
330 /* The region must be aligned */
331 s = (unsigned long)start;
332 e = s + size;
333 m = info->alignment - 1;
334
335 /* Round start up */
336 s = (s + m) & ~m;
337
338 /* Round end down */
339 e = e & ~m;
340
341 /* Take final values */
342 start = (void *)s;
343 size = (int)(e - s);
344
345 /* Grow the blocks, if needed */
346 r = assure_empty(info, 1);
347 if (r < 0)
348 return r;
349
350 blk = get_slot(info);
351 blk->start = start;
352 blk->size = size;
353 blk->owner = NULL;
354
355 attach_free_block(info, blk);
356
357 return 0;
358}
359
360/* Detatch given address range, splits free block if needed. */
361void *rh_detach_region(rh_info_t * info, void *start, int size)
362{
363 struct list_head *l;
364 rh_block_t *blk, *newblk;
365 unsigned long s, e, m, bs, be;
366
367 /* Validate size */
368 if (size <= 0)
369 return ERR_PTR(-EINVAL);
370
371 /* The region must be aligned */
372 s = (unsigned long)start;
373 e = s + size;
374 m = info->alignment - 1;
375
376 /* Round start up */
377 s = (s + m) & ~m;
378
379 /* Round end down */
380 e = e & ~m;
381
382 if (assure_empty(info, 1) < 0)
383 return ERR_PTR(-ENOMEM);
384
385 blk = NULL;
386 list_for_each(l, &info->free_list) {
387 blk = list_entry(l, rh_block_t, list);
388 /* The range must lie entirely inside one free block */
389 bs = (unsigned long)blk->start;
390 be = (unsigned long)blk->start + blk->size;
391 if (s >= bs && e <= be)
392 break;
393 blk = NULL;
394 }
395
396 if (blk == NULL)
397 return ERR_PTR(-ENOMEM);
398
399 /* Perfect fit */
400 if (bs == s && be == e) {
401 /* Delete from free list, release slot */
402 list_del(&blk->list);
403 release_slot(info, blk);
404 return (void *)s;
405 }
406
407 /* blk still in free list, with updated start and/or size */
408 if (bs == s || be == e) {
409 if (bs == s)
410 blk->start = (int8_t *)blk->start + size;
411 blk->size -= size;
412
413 } else {
414 /* The front free fragment */
415 blk->size = s - bs;
416
417 /* the back free fragment */
418 newblk = get_slot(info);
419 newblk->start = (void *)e;
420 newblk->size = be - e;
421
422 list_add(&newblk->list, &blk->list);
423 }
424
425 return (void *)s;
426}
427
428void *rh_alloc(rh_info_t * info, int size, const char *owner)
429{
430 struct list_head *l;
431 rh_block_t *blk;
432 rh_block_t *newblk;
433 void *start;
434
435 /* Validate size */
436 if (size <= 0)
437 return ERR_PTR(-EINVAL);
438
439 /* Align to configured alignment */
440 size = (size + (info->alignment - 1)) & ~(info->alignment - 1);
441
442 if (assure_empty(info, 1) < 0)
443 return ERR_PTR(-ENOMEM);
444
445 blk = NULL;
446 list_for_each(l, &info->free_list) {
447 blk = list_entry(l, rh_block_t, list);
448 if (size <= blk->size)
449 break;
450 blk = NULL;
451 }
452
453 if (blk == NULL)
454 return ERR_PTR(-ENOMEM);
455
456 /* Just fits */
457 if (blk->size == size) {
458 /* Move from free list to taken list */
459 list_del(&blk->list);
460 blk->owner = owner;
461 start = blk->start;
462
463 attach_taken_block(info, blk);
464
465 return start;
466 }
467
468 newblk = get_slot(info);
469 newblk->start = blk->start;
470 newblk->size = size;
471 newblk->owner = owner;
472
473 /* blk still in free list, with updated start, size */
474 blk->start = (int8_t *)blk->start + size;
475 blk->size -= size;
476
477 start = newblk->start;
478
479 attach_taken_block(info, newblk);
480
481 return start;
482}
483
484/* allocate at precisely the given address */
485void *rh_alloc_fixed(rh_info_t * info, void *start, int size, const char *owner)
486{
487 struct list_head *l;
488 rh_block_t *blk, *newblk1, *newblk2;
489 unsigned long s, e, m, bs, be;
490
491 /* Validate size */
492 if (size <= 0)
493 return ERR_PTR(-EINVAL);
494
495 /* The region must be aligned */
496 s = (unsigned long)start;
497 e = s + size;
498 m = info->alignment - 1;
499
500 /* Round start up */
501 s = (s + m) & ~m;
502
503 /* Round end down */
504 e = e & ~m;
505
506 if (assure_empty(info, 2) < 0)
507 return ERR_PTR(-ENOMEM);
508
509 blk = NULL;
510 list_for_each(l, &info->free_list) {
511 blk = list_entry(l, rh_block_t, list);
512 /* The range must lie entirely inside one free block */
513 bs = (unsigned long)blk->start;
514 be = (unsigned long)blk->start + blk->size;
515 if (s >= bs && e <= be)
516 break;
517 }
518
519 if (blk == NULL)
520 return ERR_PTR(-ENOMEM);
521
522 /* Perfect fit */
523 if (bs == s && be == e) {
524 /* Move from free list to taken list */
525 list_del(&blk->list);
526 blk->owner = owner;
527
528 start = blk->start;
529 attach_taken_block(info, blk);
530
531 return start;
532
533 }
534
535 /* blk still in free list, with updated start and/or size */
536 if (bs == s || be == e) {
537 if (bs == s)
538 blk->start = (int8_t *)blk->start + size;
539 blk->size -= size;
540
541 } else {
542 /* The front free fragment */
543 blk->size = s - bs;
544
545 /* The back free fragment */
546 newblk2 = get_slot(info);
547 newblk2->start = (void *)e;
548 newblk2->size = be - e;
549
550 list_add(&newblk2->list, &blk->list);
551 }
552
553 newblk1 = get_slot(info);
554 newblk1->start = (void *)s;
555 newblk1->size = e - s;
556 newblk1->owner = owner;
557
558 start = newblk1->start;
559 attach_taken_block(info, newblk1);
560
561 return start;
562}
563
564int rh_free(rh_info_t * info, void *start)
565{
566 rh_block_t *blk, *blk2;
567 struct list_head *l;
568 int size;
569
570 /* Linear search for block */
571 blk = NULL;
572 list_for_each(l, &info->taken_list) {
573 blk2 = list_entry(l, rh_block_t, list);
574 if (start < blk2->start)
575 break;
576 blk = blk2;
577 }
578
579 if (blk == NULL || start > (blk->start + blk->size))
580 return -EINVAL;
581
582 /* Remove from taken list */
583 list_del(&blk->list);
584
585 /* Get size of freed block */
586 size = blk->size;
587 attach_free_block(info, blk);
588
589 return size;
590}
591
592int rh_get_stats(rh_info_t * info, int what, int max_stats, rh_stats_t * stats)
593{
594 rh_block_t *blk;
595 struct list_head *l;
596 struct list_head *h;
597 int nr;
598
599 switch (what) {
600
601 case RHGS_FREE:
602 h = &info->free_list;
603 break;
604
605 case RHGS_TAKEN:
606 h = &info->taken_list;
607 break;
608
609 default:
610 return -EINVAL;
611 }
612
613 /* Linear search for block */
614 nr = 0;
615 list_for_each(l, h) {
616 blk = list_entry(l, rh_block_t, list);
617 if (stats != NULL && nr < max_stats) {
618 stats->start = blk->start;
619 stats->size = blk->size;
620 stats->owner = blk->owner;
621 stats++;
622 }
623 nr++;
624 }
625
626 return nr;
627}
628
629int rh_set_owner(rh_info_t * info, void *start, const char *owner)
630{
631 rh_block_t *blk, *blk2;
632 struct list_head *l;
633 int size;
634
635 /* Linear search for block */
636 blk = NULL;
637 list_for_each(l, &info->taken_list) {
638 blk2 = list_entry(l, rh_block_t, list);
639 if (start < blk2->start)
640 break;
641 blk = blk2;
642 }
643
644 if (blk == NULL || start > (blk->start + blk->size))
645 return -EINVAL;
646
647 blk->owner = owner;
648 size = blk->size;
649
650 return size;
651}
652
653void rh_dump(rh_info_t * info)
654{
655 static rh_stats_t st[32]; /* XXX maximum 32 blocks */
656 int maxnr;
657 int i, nr;
658
659 maxnr = sizeof(st) / sizeof(st[0]);
660
661 printk(KERN_INFO
662 "info @0x%p (%d slots empty / %d max)\n",
663 info, info->empty_slots, info->max_blocks);
664
665 printk(KERN_INFO " Free:\n");
666 nr = rh_get_stats(info, RHGS_FREE, maxnr, st);
667 if (nr > maxnr)
668 nr = maxnr;
669 for (i = 0; i < nr; i++)
670 printk(KERN_INFO
671 " 0x%p-0x%p (%u)\n",
672 st[i].start, (int8_t *) st[i].start + st[i].size,
673 st[i].size);
674 printk(KERN_INFO "\n");
675
676 printk(KERN_INFO " Taken:\n");
677 nr = rh_get_stats(info, RHGS_TAKEN, maxnr, st);
678 if (nr > maxnr)
679 nr = maxnr;
680 for (i = 0; i < nr; i++)
681 printk(KERN_INFO
682 " 0x%p-0x%p (%u) %s\n",
683 st[i].start, (int8_t *) st[i].start + st[i].size,
684 st[i].size, st[i].owner != NULL ? st[i].owner : "");
685 printk(KERN_INFO "\n");
686}
687
688void rh_dump_blk(rh_info_t * info, rh_block_t * blk)
689{
690 printk(KERN_INFO
691 "blk @0x%p: 0x%p-0x%p (%u)\n",
692 blk, blk->start, (int8_t *) blk->start + blk->size, blk->size);
693}
diff --git a/arch/ppc64/lib/sstep.c b/arch/powerpc/lib/sstep.c
index e79123d1485c..666c2aa55016 100644
--- a/arch/ppc64/lib/sstep.c
+++ b/arch/powerpc/lib/sstep.c
@@ -10,13 +10,18 @@
10 */ 10 */
11#include <linux/kernel.h> 11#include <linux/kernel.h>
12#include <linux/ptrace.h> 12#include <linux/ptrace.h>
13#include <linux/config.h>
13#include <asm/sstep.h> 14#include <asm/sstep.h>
14#include <asm/processor.h> 15#include <asm/processor.h>
15 16
16extern char system_call_common[]; 17extern char system_call_common[];
17 18
19#ifdef CONFIG_PPC64
18/* Bits in SRR1 that are copied from MSR */ 20/* Bits in SRR1 that are copied from MSR */
19#define MSR_MASK 0xffffffff87c0ffff 21#define MSR_MASK 0xffffffff87c0ffff
22#else
23#define MSR_MASK 0x87c0ffff
24#endif
20 25
21/* 26/*
22 * Determine whether a conditional branch instruction would branch. 27 * Determine whether a conditional branch instruction would branch.
@@ -66,6 +71,7 @@ int emulate_step(struct pt_regs *regs, unsigned int instr)
66 if (branch_taken(instr, regs)) 71 if (branch_taken(instr, regs))
67 regs->nip = imm; 72 regs->nip = imm;
68 return 1; 73 return 1;
74#ifdef CONFIG_PPC64
69 case 17: /* sc */ 75 case 17: /* sc */
70 /* 76 /*
71 * N.B. this uses knowledge about how the syscall 77 * N.B. this uses knowledge about how the syscall
@@ -79,6 +85,7 @@ int emulate_step(struct pt_regs *regs, unsigned int instr)
79 regs->nip = (unsigned long) &system_call_common; 85 regs->nip = (unsigned long) &system_call_common;
80 regs->msr = MSR_KERNEL; 86 regs->msr = MSR_KERNEL;
81 return 1; 87 return 1;
88#endif
82 case 18: /* b */ 89 case 18: /* b */
83 imm = instr & 0x03fffffc; 90 imm = instr & 0x03fffffc;
84 if (imm & 0x02000000) 91 if (imm & 0x02000000)
@@ -121,6 +128,15 @@ int emulate_step(struct pt_regs *regs, unsigned int instr)
121 if ((regs->msr & MSR_SF) == 0) 128 if ((regs->msr & MSR_SF) == 0)
122 regs->nip &= 0xffffffffUL; 129 regs->nip &= 0xffffffffUL;
123 return 1; 130 return 1;
131 case 0x124: /* mtmsr */
132 imm = regs->gpr[rd];
133 if ((imm & MSR_RI) == 0)
134 /* can't step mtmsr that would clear MSR_RI */
135 return -1;
136 regs->msr = imm;
137 regs->nip += 4;
138 return 1;
139#ifdef CONFIG_PPC64
124 case 0x164: /* mtmsrd */ 140 case 0x164: /* mtmsrd */
125 /* only MSR_EE and MSR_RI get changed if bit 15 set */ 141 /* only MSR_EE and MSR_RI get changed if bit 15 set */
126 /* mtmsrd doesn't change MSR_HV and MSR_ME */ 142 /* mtmsrd doesn't change MSR_HV and MSR_ME */
@@ -135,6 +151,7 @@ int emulate_step(struct pt_regs *regs, unsigned int instr)
135 if ((imm & MSR_SF) == 0) 151 if ((imm & MSR_SF) == 0)
136 regs->nip &= 0xffffffffUL; 152 regs->nip &= 0xffffffffUL;
137 return 1; 153 return 1;
154#endif
138 } 155 }
139 } 156 }
140 return 0; 157 return 0;
diff --git a/arch/ppc64/lib/strcase.c b/arch/powerpc/lib/strcase.c
index e84f243368c0..36b521091bbc 100644
--- a/arch/ppc64/lib/strcase.c
+++ b/arch/powerpc/lib/strcase.c
@@ -1,11 +1,3 @@
1/*
2 * c 2001 PPC 64 Team, IBM Corp
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 */
9#include <linux/ctype.h> 1#include <linux/ctype.h>
10 2
11int strcasecmp(const char *s1, const char *s2) 3int strcasecmp(const char *s1, const char *s2)
diff --git a/arch/powerpc/lib/string.S b/arch/powerpc/lib/string.S
new file mode 100644
index 000000000000..b9ca84ed8927
--- /dev/null
+++ b/arch/powerpc/lib/string.S
@@ -0,0 +1,198 @@
1/*
2 * String handling functions for PowerPC.
3 *
4 * Copyright (C) 1996 Paul Mackerras.
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11#include <linux/config.h>
12#include <asm/processor.h>
13#include <asm/errno.h>
14#include <asm/ppc_asm.h>
15
16 .section __ex_table,"a"
17#ifdef CONFIG_PPC64
18 .align 3
19#define EXTBL .llong
20#else
21 .align 2
22#define EXTBL .long
23#endif
24 .text
25
26_GLOBAL(strcpy)
27 addi r5,r3,-1
28 addi r4,r4,-1
291: lbzu r0,1(r4)
30 cmpwi 0,r0,0
31 stbu r0,1(r5)
32 bne 1b
33 blr
34
35/* This clears out any unused part of the destination buffer,
36 just as the libc version does. -- paulus */
37_GLOBAL(strncpy)
38 cmpwi 0,r5,0
39 beqlr
40 mtctr r5
41 addi r6,r3,-1
42 addi r4,r4,-1
431: lbzu r0,1(r4)
44 cmpwi 0,r0,0
45 stbu r0,1(r6)
46 bdnzf 2,1b /* dec ctr, branch if ctr != 0 && !cr0.eq */
47 bnelr /* if we didn't hit a null char, we're done */
48 mfctr r5
49 cmpwi 0,r5,0 /* any space left in destination buffer? */
50 beqlr /* we know r0 == 0 here */
512: stbu r0,1(r6) /* clear it out if so */
52 bdnz 2b
53 blr
54
55_GLOBAL(strcat)
56 addi r5,r3,-1
57 addi r4,r4,-1
581: lbzu r0,1(r5)
59 cmpwi 0,r0,0
60 bne 1b
61 addi r5,r5,-1
621: lbzu r0,1(r4)
63 cmpwi 0,r0,0
64 stbu r0,1(r5)
65 bne 1b
66 blr
67
68_GLOBAL(strcmp)
69 addi r5,r3,-1
70 addi r4,r4,-1
711: lbzu r3,1(r5)
72 cmpwi 1,r3,0
73 lbzu r0,1(r4)
74 subf. r3,r0,r3
75 beqlr 1
76 beq 1b
77 blr
78
79_GLOBAL(strlen)
80 addi r4,r3,-1
811: lbzu r0,1(r4)
82 cmpwi 0,r0,0
83 bne 1b
84 subf r3,r3,r4
85 blr
86
87_GLOBAL(memcmp)
88 cmpwi 0,r5,0
89 ble- 2f
90 mtctr r5
91 addi r6,r3,-1
92 addi r4,r4,-1
931: lbzu r3,1(r6)
94 lbzu r0,1(r4)
95 subf. r3,r0,r3
96 bdnzt 2,1b
97 blr
982: li r3,0
99 blr
100
101_GLOBAL(memchr)
102 cmpwi 0,r5,0
103 ble- 2f
104 mtctr r5
105 addi r3,r3,-1
1061: lbzu r0,1(r3)
107 cmpw 0,r0,r4
108 bdnzf 2,1b
109 beqlr
1102: li r3,0
111 blr
112
113_GLOBAL(__clear_user)
114 addi r6,r3,-4
115 li r3,0
116 li r5,0
117 cmplwi 0,r4,4
118 blt 7f
119 /* clear a single word */
12011: stwu r5,4(r6)
121 beqlr
122 /* clear word sized chunks */
123 andi. r0,r6,3
124 add r4,r0,r4
125 subf r6,r0,r6
126 srwi r0,r4,2
127 andi. r4,r4,3
128 mtctr r0
129 bdz 7f
1301: stwu r5,4(r6)
131 bdnz 1b
132 /* clear byte sized chunks */
1337: cmpwi 0,r4,0
134 beqlr
135 mtctr r4
136 addi r6,r6,3
1378: stbu r5,1(r6)
138 bdnz 8b
139 blr
14090: mr r3,r4
141 blr
14291: mfctr r3
143 slwi r3,r3,2
144 add r3,r3,r4
145 blr
14692: mfctr r3
147 blr
148
149 .section __ex_table,"a"
150 EXTBL 11b,90b
151 EXTBL 1b,91b
152 EXTBL 8b,92b
153 .text
154
155_GLOBAL(__strncpy_from_user)
156 addi r6,r3,-1
157 addi r4,r4,-1
158 cmpwi 0,r5,0
159 beq 2f
160 mtctr r5
1611: lbzu r0,1(r4)
162 cmpwi 0,r0,0
163 stbu r0,1(r6)
164 bdnzf 2,1b /* dec ctr, branch if ctr != 0 && !cr0.eq */
165 beq 3f
1662: addi r6,r6,1
1673: subf r3,r3,r6
168 blr
16999: li r3,-EFAULT
170 blr
171
172 .section __ex_table,"a"
173 EXTBL 1b,99b
174 .text
175
176/* r3 = str, r4 = len (> 0), r5 = top (highest addr) */
177_GLOBAL(__strnlen_user)
178 addi r7,r3,-1
179 subf r6,r7,r5 /* top+1 - str */
180 cmplw 0,r4,r6
181 bge 0f
182 mr r6,r4
1830: mtctr r6 /* ctr = min(len, top - str) */
1841: lbzu r0,1(r7) /* get next byte */
185 cmpwi 0,r0,0
186 bdnzf 2,1b /* loop if --ctr != 0 && byte != 0 */
187 addi r7,r7,1
188 subf r3,r3,r7 /* number of bytes we have looked at */
189 beqlr /* return if we found a 0 byte */
190 cmpw 0,r3,r4 /* did we look at all len bytes? */
191 blt 99f /* if not, must have hit top */
192 addi r3,r4,1 /* return len + 1 to indicate no null found */
193 blr
19499: li r3,0 /* bad address, return 0 */
195 blr
196
197 .section __ex_table,"a"
198 EXTBL 1b,99b
diff --git a/arch/ppc64/lib/usercopy.c b/arch/powerpc/lib/usercopy_64.c
index 5eea6f3c1e03..5eea6f3c1e03 100644
--- a/arch/ppc64/lib/usercopy.c
+++ b/arch/powerpc/lib/usercopy_64.c
diff --git a/arch/powerpc/mm/44x_mmu.c b/arch/powerpc/mm/44x_mmu.c
new file mode 100644
index 000000000000..3d79ce281b67
--- /dev/null
+++ b/arch/powerpc/mm/44x_mmu.c
@@ -0,0 +1,120 @@
1/*
2 * Modifications by Matt Porter (mporter@mvista.com) to support
3 * PPC44x Book E processors.
4 *
5 * This file contains the routines for initializing the MMU
6 * on the 4xx series of chips.
7 * -- paulus
8 *
9 * Derived from arch/ppc/mm/init.c:
10 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
11 *
12 * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
13 * and Cort Dougan (PReP) (cort@cs.nmt.edu)
14 * Copyright (C) 1996 Paul Mackerras
15 * Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk).
16 *
17 * Derived from "arch/i386/mm/init.c"
18 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
19 *
20 * This program is free software; you can redistribute it and/or
21 * modify it under the terms of the GNU General Public License
22 * as published by the Free Software Foundation; either version
23 * 2 of the License, or (at your option) any later version.
24 *
25 */
26
27#include <linux/config.h>
28#include <linux/signal.h>
29#include <linux/sched.h>
30#include <linux/kernel.h>
31#include <linux/errno.h>
32#include <linux/string.h>
33#include <linux/types.h>
34#include <linux/ptrace.h>
35#include <linux/mman.h>
36#include <linux/mm.h>
37#include <linux/swap.h>
38#include <linux/stddef.h>
39#include <linux/vmalloc.h>
40#include <linux/init.h>
41#include <linux/delay.h>
42#include <linux/highmem.h>
43
44#include <asm/pgalloc.h>
45#include <asm/prom.h>
46#include <asm/io.h>
47#include <asm/mmu_context.h>
48#include <asm/pgtable.h>
49#include <asm/mmu.h>
50#include <asm/uaccess.h>
51#include <asm/smp.h>
52#include <asm/bootx.h>
53#include <asm/machdep.h>
54#include <asm/setup.h>
55
56#include "mmu_decl.h"
57
58extern char etext[], _stext[];
59
60/* Used by the 44x TLB replacement exception handler.
61 * Just needed it declared someplace.
62 */
63unsigned int tlb_44x_index = 0;
64unsigned int tlb_44x_hwater = 62;
65
66/*
67 * "Pins" a 256MB TLB entry in AS0 for kernel lowmem
68 */
69static void __init
70ppc44x_pin_tlb(int slot, unsigned int virt, unsigned int phys)
71{
72 unsigned long attrib = 0;
73
74 __asm__ __volatile__("\
75 clrrwi %2,%2,10\n\
76 ori %2,%2,%4\n\
77 clrrwi %1,%1,10\n\
78 li %0,0\n\
79 ori %0,%0,%5\n\
80 tlbwe %2,%3,%6\n\
81 tlbwe %1,%3,%7\n\
82 tlbwe %0,%3,%8"
83 :
84 : "r" (attrib), "r" (phys), "r" (virt), "r" (slot),
85 "i" (PPC44x_TLB_VALID | PPC44x_TLB_256M),
86 "i" (PPC44x_TLB_SW | PPC44x_TLB_SR | PPC44x_TLB_SX | PPC44x_TLB_G),
87 "i" (PPC44x_TLB_PAGEID),
88 "i" (PPC44x_TLB_XLAT),
89 "i" (PPC44x_TLB_ATTRIB));
90}
91
92/*
93 * MMU_init_hw does the chip-specific initialization of the MMU hardware.
94 */
95void __init MMU_init_hw(void)
96{
97 flush_instruction_cache();
98}
99
100unsigned long __init mmu_mapin_ram(void)
101{
102 unsigned int pinned_tlbs = 1;
103 int i;
104
105 /* Determine number of entries necessary to cover lowmem */
106 pinned_tlbs = (unsigned int)
107 (_ALIGN(total_lowmem, PPC44x_PIN_SIZE) >> PPC44x_PIN_SHIFT);
108
109 /* Write upper watermark to save location */
110 tlb_44x_hwater = PPC44x_LOW_SLOT - pinned_tlbs;
111
112 /* If necessary, set additional pinned TLBs */
113 if (pinned_tlbs > 1)
114 for (i = (PPC44x_LOW_SLOT-(pinned_tlbs-1)); i < PPC44x_LOW_SLOT; i++) {
115 unsigned int phys_addr = (PPC44x_LOW_SLOT-i) * PPC44x_PIN_SIZE;
116 ppc44x_pin_tlb(i, phys_addr+PAGE_OFFSET, phys_addr);
117 }
118
119 return total_lowmem;
120}
diff --git a/arch/powerpc/mm/4xx_mmu.c b/arch/powerpc/mm/4xx_mmu.c
new file mode 100644
index 000000000000..b7bcbc232f39
--- /dev/null
+++ b/arch/powerpc/mm/4xx_mmu.c
@@ -0,0 +1,141 @@
1/*
2 * This file contains the routines for initializing the MMU
3 * on the 4xx series of chips.
4 * -- paulus
5 *
6 * Derived from arch/ppc/mm/init.c:
7 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
8 *
9 * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
10 * and Cort Dougan (PReP) (cort@cs.nmt.edu)
11 * Copyright (C) 1996 Paul Mackerras
12 * Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk).
13 *
14 * Derived from "arch/i386/mm/init.c"
15 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
16 *
17 * This program is free software; you can redistribute it and/or
18 * modify it under the terms of the GNU General Public License
19 * as published by the Free Software Foundation; either version
20 * 2 of the License, or (at your option) any later version.
21 *
22 */
23
24#include <linux/config.h>
25#include <linux/signal.h>
26#include <linux/sched.h>
27#include <linux/kernel.h>
28#include <linux/errno.h>
29#include <linux/string.h>
30#include <linux/types.h>
31#include <linux/ptrace.h>
32#include <linux/mman.h>
33#include <linux/mm.h>
34#include <linux/swap.h>
35#include <linux/stddef.h>
36#include <linux/vmalloc.h>
37#include <linux/init.h>
38#include <linux/delay.h>
39#include <linux/highmem.h>
40
41#include <asm/pgalloc.h>
42#include <asm/prom.h>
43#include <asm/io.h>
44#include <asm/mmu_context.h>
45#include <asm/pgtable.h>
46#include <asm/mmu.h>
47#include <asm/uaccess.h>
48#include <asm/smp.h>
49#include <asm/bootx.h>
50#include <asm/machdep.h>
51#include <asm/setup.h>
52#include "mmu_decl.h"
53
54extern int __map_without_ltlbs;
55/*
56 * MMU_init_hw does the chip-specific initialization of the MMU hardware.
57 */
58void __init MMU_init_hw(void)
59{
60 /*
61 * The Zone Protection Register (ZPR) defines how protection will
62 * be applied to every page which is a member of a given zone. At
63 * present, we utilize only two of the 4xx's zones.
64 * The zone index bits (of ZSEL) in the PTE are used for software
65 * indicators, except the LSB. For user access, zone 1 is used,
66 * for kernel access, zone 0 is used. We set all but zone 1
67 * to zero, allowing only kernel access as indicated in the PTE.
68 * For zone 1, we set a 01 binary (a value of 10 will not work)
69 * to allow user access as indicated in the PTE. This also allows
70 * kernel access as indicated in the PTE.
71 */
72
73 mtspr(SPRN_ZPR, 0x10000000);
74
75 flush_instruction_cache();
76
77 /*
78 * Set up the real-mode cache parameters for the exception vector
79 * handlers (which are run in real-mode).
80 */
81
82 mtspr(SPRN_DCWR, 0x00000000); /* All caching is write-back */
83
84 /*
85 * Cache instruction and data space where the exception
86 * vectors and the kernel live in real-mode.
87 */
88
89 mtspr(SPRN_DCCR, 0xF0000000); /* 512 MB of data space at 0x0. */
90 mtspr(SPRN_ICCR, 0xF0000000); /* 512 MB of instr. space at 0x0. */
91}
92
93#define LARGE_PAGE_SIZE_16M (1<<24)
94#define LARGE_PAGE_SIZE_4M (1<<22)
95
96unsigned long __init mmu_mapin_ram(void)
97{
98 unsigned long v, s;
99 phys_addr_t p;
100
101 v = KERNELBASE;
102 p = PPC_MEMSTART;
103 s = 0;
104
105 if (__map_without_ltlbs) {
106 return s;
107 }
108
109 while (s <= (total_lowmem - LARGE_PAGE_SIZE_16M)) {
110 pmd_t *pmdp;
111 unsigned long val = p | _PMD_SIZE_16M | _PAGE_HWEXEC | _PAGE_HWWRITE;
112
113 spin_lock(&init_mm.page_table_lock);
114 pmdp = pmd_offset(pgd_offset_k(v), v);
115 pmd_val(*pmdp++) = val;
116 pmd_val(*pmdp++) = val;
117 pmd_val(*pmdp++) = val;
118 pmd_val(*pmdp++) = val;
119 spin_unlock(&init_mm.page_table_lock);
120
121 v += LARGE_PAGE_SIZE_16M;
122 p += LARGE_PAGE_SIZE_16M;
123 s += LARGE_PAGE_SIZE_16M;
124 }
125
126 while (s <= (total_lowmem - LARGE_PAGE_SIZE_4M)) {
127 pmd_t *pmdp;
128 unsigned long val = p | _PMD_SIZE_4M | _PAGE_HWEXEC | _PAGE_HWWRITE;
129
130 spin_lock(&init_mm.page_table_lock);
131 pmdp = pmd_offset(pgd_offset_k(v), v);
132 pmd_val(*pmdp) = val;
133 spin_unlock(&init_mm.page_table_lock);
134
135 v += LARGE_PAGE_SIZE_4M;
136 p += LARGE_PAGE_SIZE_4M;
137 s += LARGE_PAGE_SIZE_4M;
138 }
139
140 return s;
141}
diff --git a/arch/powerpc/mm/Makefile b/arch/powerpc/mm/Makefile
new file mode 100644
index 000000000000..93441e7a2921
--- /dev/null
+++ b/arch/powerpc/mm/Makefile
@@ -0,0 +1,21 @@
1#
2# Makefile for the linux ppc-specific parts of the memory manager.
3#
4
5ifeq ($(CONFIG_PPC64),y)
6EXTRA_CFLAGS += -mno-minimal-toc
7endif
8
9obj-y := fault.o mem.o lmb.o
10obj-$(CONFIG_PPC32) += init_32.o pgtable_32.o mmu_context_32.o
11hash-$(CONFIG_PPC_MULTIPLATFORM) := hash_native_64.o
12obj-$(CONFIG_PPC64) += init_64.o pgtable_64.o mmu_context_64.o \
13 hash_utils_64.o hash_low_64.o tlb_64.o \
14 slb_low.o slb.o stab.o mmap.o imalloc.o \
15 $(hash-y)
16obj-$(CONFIG_PPC_STD_MMU_32) += ppc_mmu_32.o hash_low_32.o tlb_32.o
17obj-$(CONFIG_40x) += 4xx_mmu.o
18obj-$(CONFIG_44x) += 44x_mmu.o
19obj-$(CONFIG_FSL_BOOKE) += fsl_booke_mmu.o
20obj-$(CONFIG_NEED_MULTIPLE_NODES) += numa.o
21obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
diff --git a/arch/ppc64/mm/fault.c b/arch/powerpc/mm/fault.c
index be3f25cf3e9f..841d8b6323a8 100644
--- a/arch/ppc64/mm/fault.c
+++ b/arch/powerpc/mm/fault.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * arch/ppc/mm/fault.c 2 * arch/ppc/mm/fault.c
3 * 3 *
4 * PowerPC version 4 * PowerPC version
5 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) 5 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
6 * 6 *
7 * Derived from "arch/i386/mm/fault.c" 7 * Derived from "arch/i386/mm/fault.c"
@@ -24,10 +24,11 @@
24#include <linux/errno.h> 24#include <linux/errno.h>
25#include <linux/string.h> 25#include <linux/string.h>
26#include <linux/types.h> 26#include <linux/types.h>
27#include <linux/ptrace.h>
27#include <linux/mman.h> 28#include <linux/mman.h>
28#include <linux/mm.h> 29#include <linux/mm.h>
29#include <linux/interrupt.h> 30#include <linux/interrupt.h>
30#include <linux/smp_lock.h> 31#include <linux/highmem.h>
31#include <linux/module.h> 32#include <linux/module.h>
32#include <linux/kprobes.h> 33#include <linux/kprobes.h>
33 34
@@ -37,6 +38,7 @@
37#include <asm/mmu_context.h> 38#include <asm/mmu_context.h>
38#include <asm/system.h> 39#include <asm/system.h>
39#include <asm/uaccess.h> 40#include <asm/uaccess.h>
41#include <asm/tlbflush.h>
40#include <asm/kdebug.h> 42#include <asm/kdebug.h>
41#include <asm/siginfo.h> 43#include <asm/siginfo.h>
42 44
@@ -78,6 +80,7 @@ static int store_updates_sp(struct pt_regs *regs)
78 return 0; 80 return 0;
79} 81}
80 82
83#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
81static void do_dabr(struct pt_regs *regs, unsigned long error_code) 84static void do_dabr(struct pt_regs *regs, unsigned long error_code)
82{ 85{
83 siginfo_t info; 86 siginfo_t info;
@@ -99,12 +102,18 @@ static void do_dabr(struct pt_regs *regs, unsigned long error_code)
99 info.si_addr = (void __user *)regs->nip; 102 info.si_addr = (void __user *)regs->nip;
100 force_sig_info(SIGTRAP, &info, current); 103 force_sig_info(SIGTRAP, &info, current);
101} 104}
105#endif /* !(CONFIG_4xx || CONFIG_BOOKE)*/
102 106
103/* 107/*
104 * The error_code parameter is 108 * For 600- and 800-family processors, the error_code parameter is DSISR
109 * for a data fault, SRR1 for an instruction fault. For 400-family processors
110 * the error_code parameter is ESR for a data fault, 0 for an instruction
111 * fault.
112 * For 64-bit processors, the error_code parameter is
105 * - DSISR for a non-SLB data access fault, 113 * - DSISR for a non-SLB data access fault,
106 * - SRR1 & 0x08000000 for a non-SLB instruction access fault 114 * - SRR1 & 0x08000000 for a non-SLB instruction access fault
107 * - 0 any SLB fault. 115 * - 0 any SLB fault.
116 *
108 * The return value is 0 if the fault was handled, or the signal 117 * The return value is 0 if the fault was handled, or the signal
109 * number if this is a kernel fault that can't be handled here. 118 * number if this is a kernel fault that can't be handled here.
110 */ 119 */
@@ -114,12 +123,25 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
114 struct vm_area_struct * vma; 123 struct vm_area_struct * vma;
115 struct mm_struct *mm = current->mm; 124 struct mm_struct *mm = current->mm;
116 siginfo_t info; 125 siginfo_t info;
117 unsigned long code = SEGV_MAPERR; 126 int code = SEGV_MAPERR;
118 unsigned long is_write = error_code & DSISR_ISSTORE; 127 int is_write = 0;
119 unsigned long trap = TRAP(regs); 128 int trap = TRAP(regs);
120 unsigned long is_exec = trap == 0x400; 129 int is_exec = trap == 0x400;
121 130
122 BUG_ON((trap == 0x380) || (trap == 0x480)); 131#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
132 /*
133 * Fortunately the bit assignments in SRR1 for an instruction
134 * fault and DSISR for a data fault are mostly the same for the
135 * bits we are interested in. But there are some bits which
136 * indicate errors in DSISR but can validly be set in SRR1.
137 */
138 if (trap == 0x400)
139 error_code &= 0x48200000;
140 else
141 is_write = error_code & DSISR_ISSTORE;
142#else
143 is_write = error_code & ESR_DST;
144#endif /* CONFIG_4xx || CONFIG_BOOKE */
123 145
124 if (notify_die(DIE_PAGE_FAULT, "page_fault", regs, error_code, 146 if (notify_die(DIE_PAGE_FAULT, "page_fault", regs, error_code,
125 11, SIGSEGV) == NOTIFY_STOP) 147 11, SIGSEGV) == NOTIFY_STOP)
@@ -134,10 +156,13 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
134 if (!user_mode(regs) && (address >= TASK_SIZE)) 156 if (!user_mode(regs) && (address >= TASK_SIZE))
135 return SIGSEGV; 157 return SIGSEGV;
136 158
159#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
137 if (error_code & DSISR_DABRMATCH) { 160 if (error_code & DSISR_DABRMATCH) {
161 /* DABR match */
138 do_dabr(regs, error_code); 162 do_dabr(regs, error_code);
139 return 0; 163 return 0;
140 } 164 }
165#endif /* !(CONFIG_4xx || CONFIG_BOOKE)*/
141 166
142 if (in_atomic() || mm == NULL) { 167 if (in_atomic() || mm == NULL) {
143 if (!user_mode(regs)) 168 if (!user_mode(regs))
@@ -176,10 +201,8 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
176 vma = find_vma(mm, address); 201 vma = find_vma(mm, address);
177 if (!vma) 202 if (!vma)
178 goto bad_area; 203 goto bad_area;
179 204 if (vma->vm_start <= address)
180 if (vma->vm_start <= address) {
181 goto good_area; 205 goto good_area;
182 }
183 if (!(vma->vm_flags & VM_GROWSDOWN)) 206 if (!(vma->vm_flags & VM_GROWSDOWN))
184 goto bad_area; 207 goto bad_area;
185 208
@@ -214,35 +237,76 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
214 && (!user_mode(regs) || !store_updates_sp(regs))) 237 && (!user_mode(regs) || !store_updates_sp(regs)))
215 goto bad_area; 238 goto bad_area;
216 } 239 }
217
218 if (expand_stack(vma, address)) 240 if (expand_stack(vma, address))
219 goto bad_area; 241 goto bad_area;
220 242
221good_area: 243good_area:
222 code = SEGV_ACCERR; 244 code = SEGV_ACCERR;
245#if defined(CONFIG_6xx)
246 if (error_code & 0x95700000)
247 /* an error such as lwarx to I/O controller space,
248 address matching DABR, eciwx, etc. */
249 goto bad_area;
250#endif /* CONFIG_6xx */
251#if defined(CONFIG_8xx)
252 /* The MPC8xx seems to always set 0x80000000, which is
253 * "undefined". Of those that can be set, this is the only
254 * one which seems bad.
255 */
256 if (error_code & 0x10000000)
257 /* Guarded storage error. */
258 goto bad_area;
259#endif /* CONFIG_8xx */
223 260
224 if (is_exec) { 261 if (is_exec) {
262#ifdef CONFIG_PPC64
225 /* protection fault */ 263 /* protection fault */
226 if (error_code & DSISR_PROTFAULT) 264 if (error_code & DSISR_PROTFAULT)
227 goto bad_area; 265 goto bad_area;
228 if (!(vma->vm_flags & VM_EXEC)) 266 if (!(vma->vm_flags & VM_EXEC))
229 goto bad_area; 267 goto bad_area;
268#endif
269#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
270 pte_t *ptep;
271
272 /* Since 4xx/Book-E supports per-page execute permission,
273 * we lazily flush dcache to icache. */
274 ptep = NULL;
275 if (get_pteptr(mm, address, &ptep) && pte_present(*ptep)) {
276 struct page *page = pte_page(*ptep);
277
278 if (! test_bit(PG_arch_1, &page->flags)) {
279 flush_dcache_icache_page(page);
280 set_bit(PG_arch_1, &page->flags);
281 }
282 pte_update(ptep, 0, _PAGE_HWEXEC);
283 _tlbie(address);
284 pte_unmap(ptep);
285 up_read(&mm->mmap_sem);
286 return 0;
287 }
288 if (ptep != NULL)
289 pte_unmap(ptep);
290#endif
230 /* a write */ 291 /* a write */
231 } else if (is_write) { 292 } else if (is_write) {
232 if (!(vma->vm_flags & VM_WRITE)) 293 if (!(vma->vm_flags & VM_WRITE))
233 goto bad_area; 294 goto bad_area;
234 /* a read */ 295 /* a read */
235 } else { 296 } else {
236 if (!(vma->vm_flags & VM_READ)) 297 /* protection fault */
298 if (error_code & 0x08000000)
299 goto bad_area;
300 if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
237 goto bad_area; 301 goto bad_area;
238 } 302 }
239 303
240 survive:
241 /* 304 /*
242 * If for any reason at all we couldn't handle the fault, 305 * If for any reason at all we couldn't handle the fault,
243 * make sure we exit gracefully rather than endlessly redo 306 * make sure we exit gracefully rather than endlessly redo
244 * the fault. 307 * the fault.
245 */ 308 */
309 survive:
246 switch (handle_mm_fault(mm, vma, address, is_write)) { 310 switch (handle_mm_fault(mm, vma, address, is_write)) {
247 311
248 case VM_FAULT_MINOR: 312 case VM_FAULT_MINOR:
@@ -268,15 +332,11 @@ bad_area:
268bad_area_nosemaphore: 332bad_area_nosemaphore:
269 /* User mode accesses cause a SIGSEGV */ 333 /* User mode accesses cause a SIGSEGV */
270 if (user_mode(regs)) { 334 if (user_mode(regs)) {
271 info.si_signo = SIGSEGV; 335 _exception(SIGSEGV, regs, code, address);
272 info.si_errno = 0;
273 info.si_code = code;
274 info.si_addr = (void __user *) address;
275 force_sig_info(SIGSEGV, &info, current);
276 return 0; 336 return 0;
277 } 337 }
278 338
279 if (trap == 0x400 && (error_code & DSISR_PROTFAULT) 339 if (is_exec && (error_code & DSISR_PROTFAULT)
280 && printk_ratelimit()) 340 && printk_ratelimit())
281 printk(KERN_CRIT "kernel tried to execute NX-protected" 341 printk(KERN_CRIT "kernel tried to execute NX-protected"
282 " page (%lx) - exploit attempt? (uid: %d)\n", 342 " page (%lx) - exploit attempt? (uid: %d)\n",
@@ -315,8 +375,8 @@ do_sigbus:
315 375
316/* 376/*
317 * bad_page_fault is called when we have a bad access from the kernel. 377 * bad_page_fault is called when we have a bad access from the kernel.
318 * It is called from do_page_fault above and from some of the procedures 378 * It is called from the DSI and ISI handlers in head.S and from some
319 * in traps.c. 379 * of the procedures in traps.c.
320 */ 380 */
321void bad_page_fault(struct pt_regs *regs, unsigned long address, int sig) 381void bad_page_fault(struct pt_regs *regs, unsigned long address, int sig)
322{ 382{
diff --git a/arch/powerpc/mm/fsl_booke_mmu.c b/arch/powerpc/mm/fsl_booke_mmu.c
new file mode 100644
index 000000000000..af9ca0eb6d55
--- /dev/null
+++ b/arch/powerpc/mm/fsl_booke_mmu.c
@@ -0,0 +1,237 @@
1/*
2 * Modifications by Kumar Gala (kumar.gala@freescale.com) to support
3 * E500 Book E processors.
4 *
5 * Copyright 2004 Freescale Semiconductor, Inc
6 *
7 * This file contains the routines for initializing the MMU
8 * on the 4xx series of chips.
9 * -- paulus
10 *
11 * Derived from arch/ppc/mm/init.c:
12 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
13 *
14 * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
15 * and Cort Dougan (PReP) (cort@cs.nmt.edu)
16 * Copyright (C) 1996 Paul Mackerras
17 * Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk).
18 *
19 * Derived from "arch/i386/mm/init.c"
20 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
21 *
22 * This program is free software; you can redistribute it and/or
23 * modify it under the terms of the GNU General Public License
24 * as published by the Free Software Foundation; either version
25 * 2 of the License, or (at your option) any later version.
26 *
27 */
28
29#include <linux/config.h>
30#include <linux/signal.h>
31#include <linux/sched.h>
32#include <linux/kernel.h>
33#include <linux/errno.h>
34#include <linux/string.h>
35#include <linux/types.h>
36#include <linux/ptrace.h>
37#include <linux/mman.h>
38#include <linux/mm.h>
39#include <linux/swap.h>
40#include <linux/stddef.h>
41#include <linux/vmalloc.h>
42#include <linux/init.h>
43#include <linux/delay.h>
44#include <linux/highmem.h>
45
46#include <asm/pgalloc.h>
47#include <asm/prom.h>
48#include <asm/io.h>
49#include <asm/mmu_context.h>
50#include <asm/pgtable.h>
51#include <asm/mmu.h>
52#include <asm/uaccess.h>
53#include <asm/smp.h>
54#include <asm/bootx.h>
55#include <asm/machdep.h>
56#include <asm/setup.h>
57
58extern void loadcam_entry(unsigned int index);
59unsigned int tlbcam_index;
60unsigned int num_tlbcam_entries;
61static unsigned long __cam0, __cam1, __cam2;
62extern unsigned long total_lowmem;
63extern unsigned long __max_low_memory;
64#define MAX_LOW_MEM CONFIG_LOWMEM_SIZE
65
66#define NUM_TLBCAMS (16)
67
68struct tlbcam {
69 u32 MAS0;
70 u32 MAS1;
71 u32 MAS2;
72 u32 MAS3;
73 u32 MAS7;
74} TLBCAM[NUM_TLBCAMS];
75
76struct tlbcamrange {
77 unsigned long start;
78 unsigned long limit;
79 phys_addr_t phys;
80} tlbcam_addrs[NUM_TLBCAMS];
81
82extern unsigned int tlbcam_index;
83
84/*
85 * Return PA for this VA if it is mapped by a CAM, or 0
86 */
87unsigned long v_mapped_by_tlbcam(unsigned long va)
88{
89 int b;
90 for (b = 0; b < tlbcam_index; ++b)
91 if (va >= tlbcam_addrs[b].start && va < tlbcam_addrs[b].limit)
92 return tlbcam_addrs[b].phys + (va - tlbcam_addrs[b].start);
93 return 0;
94}
95
96/*
97 * Return VA for a given PA or 0 if not mapped
98 */
99unsigned long p_mapped_by_tlbcam(unsigned long pa)
100{
101 int b;
102 for (b = 0; b < tlbcam_index; ++b)
103 if (pa >= tlbcam_addrs[b].phys
104 && pa < (tlbcam_addrs[b].limit-tlbcam_addrs[b].start)
105 +tlbcam_addrs[b].phys)
106 return tlbcam_addrs[b].start+(pa-tlbcam_addrs[b].phys);
107 return 0;
108}
109
110/*
111 * Set up one of the I/D BAT (block address translation) register pairs.
112 * The parameters are not checked; in particular size must be a power
113 * of 4 between 4k and 256M.
114 */
115void settlbcam(int index, unsigned long virt, phys_addr_t phys,
116 unsigned int size, int flags, unsigned int pid)
117{
118 unsigned int tsize, lz;
119
120 asm ("cntlzw %0,%1" : "=r" (lz) : "r" (size));
121 tsize = (21 - lz) / 2;
122
123#ifdef CONFIG_SMP
124 if ((flags & _PAGE_NO_CACHE) == 0)
125 flags |= _PAGE_COHERENT;
126#endif
127
128 TLBCAM[index].MAS0 = MAS0_TLBSEL(1) | MAS0_ESEL(index) | MAS0_NV(index+1);
129 TLBCAM[index].MAS1 = MAS1_VALID | MAS1_IPROT | MAS1_TSIZE(tsize) | MAS1_TID(pid);
130 TLBCAM[index].MAS2 = virt & PAGE_MASK;
131
132 TLBCAM[index].MAS2 |= (flags & _PAGE_WRITETHRU) ? MAS2_W : 0;
133 TLBCAM[index].MAS2 |= (flags & _PAGE_NO_CACHE) ? MAS2_I : 0;
134 TLBCAM[index].MAS2 |= (flags & _PAGE_COHERENT) ? MAS2_M : 0;
135 TLBCAM[index].MAS2 |= (flags & _PAGE_GUARDED) ? MAS2_G : 0;
136 TLBCAM[index].MAS2 |= (flags & _PAGE_ENDIAN) ? MAS2_E : 0;
137
138 TLBCAM[index].MAS3 = (phys & PAGE_MASK) | MAS3_SX | MAS3_SR;
139 TLBCAM[index].MAS3 |= ((flags & _PAGE_RW) ? MAS3_SW : 0);
140
141#ifndef CONFIG_KGDB /* want user access for breakpoints */
142 if (flags & _PAGE_USER) {
143 TLBCAM[index].MAS3 |= MAS3_UX | MAS3_UR;
144 TLBCAM[index].MAS3 |= ((flags & _PAGE_RW) ? MAS3_UW : 0);
145 }
146#else
147 TLBCAM[index].MAS3 |= MAS3_UX | MAS3_UR;
148 TLBCAM[index].MAS3 |= ((flags & _PAGE_RW) ? MAS3_UW : 0);
149#endif
150
151 tlbcam_addrs[index].start = virt;
152 tlbcam_addrs[index].limit = virt + size - 1;
153 tlbcam_addrs[index].phys = phys;
154
155 loadcam_entry(index);
156}
157
158void invalidate_tlbcam_entry(int index)
159{
160 TLBCAM[index].MAS0 = MAS0_TLBSEL(1) | MAS0_ESEL(index);
161 TLBCAM[index].MAS1 = ~MAS1_VALID;
162
163 loadcam_entry(index);
164}
165
166void __init cam_mapin_ram(unsigned long cam0, unsigned long cam1,
167 unsigned long cam2)
168{
169 settlbcam(0, KERNELBASE, PPC_MEMSTART, cam0, _PAGE_KERNEL, 0);
170 tlbcam_index++;
171 if (cam1) {
172 tlbcam_index++;
173 settlbcam(1, KERNELBASE+cam0, PPC_MEMSTART+cam0, cam1, _PAGE_KERNEL, 0);
174 }
175 if (cam2) {
176 tlbcam_index++;
177 settlbcam(2, KERNELBASE+cam0+cam1, PPC_MEMSTART+cam0+cam1, cam2, _PAGE_KERNEL, 0);
178 }
179}
180
181/*
182 * MMU_init_hw does the chip-specific initialization of the MMU hardware.
183 */
184void __init MMU_init_hw(void)
185{
186 flush_instruction_cache();
187}
188
189unsigned long __init mmu_mapin_ram(void)
190{
191 cam_mapin_ram(__cam0, __cam1, __cam2);
192
193 return __cam0 + __cam1 + __cam2;
194}
195
196
197void __init
198adjust_total_lowmem(void)
199{
200 unsigned long max_low_mem = MAX_LOW_MEM;
201 unsigned long cam_max = 0x10000000;
202 unsigned long ram;
203
204 /* adjust CAM size to max_low_mem */
205 if (max_low_mem < cam_max)
206 cam_max = max_low_mem;
207
208 /* adjust lowmem size to max_low_mem */
209 if (max_low_mem < total_lowmem)
210 ram = max_low_mem;
211 else
212 ram = total_lowmem;
213
214 /* Calculate CAM values */
215 __cam0 = 1UL << 2 * (__ilog2(ram) / 2);
216 if (__cam0 > cam_max)
217 __cam0 = cam_max;
218 ram -= __cam0;
219 if (ram) {
220 __cam1 = 1UL << 2 * (__ilog2(ram) / 2);
221 if (__cam1 > cam_max)
222 __cam1 = cam_max;
223 ram -= __cam1;
224 }
225 if (ram) {
226 __cam2 = 1UL << 2 * (__ilog2(ram) / 2);
227 if (__cam2 > cam_max)
228 __cam2 = cam_max;
229 ram -= __cam2;
230 }
231
232 printk(KERN_INFO "Memory CAM mapping: CAM0=%ldMb, CAM1=%ldMb,"
233 " CAM2=%ldMb residual: %ldMb\n",
234 __cam0 >> 20, __cam1 >> 20, __cam2 >> 20,
235 (total_lowmem - __cam0 - __cam1 - __cam2) >> 20);
236 __max_low_memory = max_low_mem = __cam0 + __cam1 + __cam2;
237}
diff --git a/arch/powerpc/mm/hash_low_32.S b/arch/powerpc/mm/hash_low_32.S
new file mode 100644
index 000000000000..12ccd7155bac
--- /dev/null
+++ b/arch/powerpc/mm/hash_low_32.S
@@ -0,0 +1,618 @@
1/*
2 * arch/ppc/kernel/hashtable.S
3 *
4 * $Id: hashtable.S,v 1.6 1999/10/08 01:56:15 paulus Exp $
5 *
6 * PowerPC version
7 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
8 * Rewritten by Cort Dougan (cort@cs.nmt.edu) for PReP
9 * Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu>
10 * Adapted for Power Macintosh by Paul Mackerras.
11 * Low-level exception handlers and MMU support
12 * rewritten by Paul Mackerras.
13 * Copyright (C) 1996 Paul Mackerras.
14 *
15 * This file contains low-level assembler routines for managing
16 * the PowerPC MMU hash table. (PPC 8xx processors don't use a
17 * hash table, so this file is not used on them.)
18 *
19 * This program is free software; you can redistribute it and/or
20 * modify it under the terms of the GNU General Public License
21 * as published by the Free Software Foundation; either version
22 * 2 of the License, or (at your option) any later version.
23 *
24 */
25
26#include <linux/config.h>
27#include <asm/reg.h>
28#include <asm/page.h>
29#include <asm/pgtable.h>
30#include <asm/cputable.h>
31#include <asm/ppc_asm.h>
32#include <asm/thread_info.h>
33#include <asm/asm-offsets.h>
34
35#ifdef CONFIG_SMP
36 .comm mmu_hash_lock,4
37#endif /* CONFIG_SMP */
38
39/*
40 * Sync CPUs with hash_page taking & releasing the hash
41 * table lock
42 */
43#ifdef CONFIG_SMP
44 .text
45_GLOBAL(hash_page_sync)
46 lis r8,mmu_hash_lock@h
47 ori r8,r8,mmu_hash_lock@l
48 lis r0,0x0fff
49 b 10f
5011: lwz r6,0(r8)
51 cmpwi 0,r6,0
52 bne 11b
5310: lwarx r6,0,r8
54 cmpwi 0,r6,0
55 bne- 11b
56 stwcx. r0,0,r8
57 bne- 10b
58 isync
59 eieio
60 li r0,0
61 stw r0,0(r8)
62 blr
63#endif
64
65/*
66 * Load a PTE into the hash table, if possible.
67 * The address is in r4, and r3 contains an access flag:
68 * _PAGE_RW (0x400) if a write.
69 * r9 contains the SRR1 value, from which we use the MSR_PR bit.
70 * SPRG3 contains the physical address of the current task's thread.
71 *
72 * Returns to the caller if the access is illegal or there is no
73 * mapping for the address. Otherwise it places an appropriate PTE
74 * in the hash table and returns from the exception.
75 * Uses r0, r3 - r8, ctr, lr.
76 */
77 .text
78_GLOBAL(hash_page)
79#ifdef CONFIG_PPC64BRIDGE
80 mfmsr r0
81 clrldi r0,r0,1 /* make sure it's in 32-bit mode */
82 MTMSRD(r0)
83 isync
84#endif
85 tophys(r7,0) /* gets -KERNELBASE into r7 */
86#ifdef CONFIG_SMP
87 addis r8,r7,mmu_hash_lock@h
88 ori r8,r8,mmu_hash_lock@l
89 lis r0,0x0fff
90 b 10f
9111: lwz r6,0(r8)
92 cmpwi 0,r6,0
93 bne 11b
9410: lwarx r6,0,r8
95 cmpwi 0,r6,0
96 bne- 11b
97 stwcx. r0,0,r8
98 bne- 10b
99 isync
100#endif
101 /* Get PTE (linux-style) and check access */
102 lis r0,KERNELBASE@h /* check if kernel address */
103 cmplw 0,r4,r0
104 mfspr r8,SPRN_SPRG3 /* current task's THREAD (phys) */
105 ori r3,r3,_PAGE_USER|_PAGE_PRESENT /* test low addresses as user */
106 lwz r5,PGDIR(r8) /* virt page-table root */
107 blt+ 112f /* assume user more likely */
108 lis r5,swapper_pg_dir@ha /* if kernel address, use */
109 addi r5,r5,swapper_pg_dir@l /* kernel page table */
110 rlwimi r3,r9,32-12,29,29 /* MSR_PR -> _PAGE_USER */
111112: add r5,r5,r7 /* convert to phys addr */
112 rlwimi r5,r4,12,20,29 /* insert top 10 bits of address */
113 lwz r8,0(r5) /* get pmd entry */
114 rlwinm. r8,r8,0,0,19 /* extract address of pte page */
115#ifdef CONFIG_SMP
116 beq- hash_page_out /* return if no mapping */
117#else
118 /* XXX it seems like the 601 will give a machine fault on the
119 rfi if its alignment is wrong (bottom 4 bits of address are
120 8 or 0xc) and we have had a not-taken conditional branch
121 to the address following the rfi. */
122 beqlr-
123#endif
124 rlwimi r8,r4,22,20,29 /* insert next 10 bits of address */
125 rlwinm r0,r3,32-3,24,24 /* _PAGE_RW access -> _PAGE_DIRTY */
126 ori r0,r0,_PAGE_ACCESSED|_PAGE_HASHPTE
127
128 /*
129 * Update the linux PTE atomically. We do the lwarx up-front
130 * because almost always, there won't be a permission violation
131 * and there won't already be an HPTE, and thus we will have
132 * to update the PTE to set _PAGE_HASHPTE. -- paulus.
133 */
134retry:
135 lwarx r6,0,r8 /* get linux-style pte */
136 andc. r5,r3,r6 /* check access & ~permission */
137#ifdef CONFIG_SMP
138 bne- hash_page_out /* return if access not permitted */
139#else
140 bnelr-
141#endif
142 or r5,r0,r6 /* set accessed/dirty bits */
143 stwcx. r5,0,r8 /* attempt to update PTE */
144 bne- retry /* retry if someone got there first */
145
146 mfsrin r3,r4 /* get segment reg for segment */
147 mfctr r0
148 stw r0,_CTR(r11)
149 bl create_hpte /* add the hash table entry */
150
151#ifdef CONFIG_SMP
152 eieio
153 addis r8,r7,mmu_hash_lock@ha
154 li r0,0
155 stw r0,mmu_hash_lock@l(r8)
156#endif
157
158 /* Return from the exception */
159 lwz r5,_CTR(r11)
160 mtctr r5
161 lwz r0,GPR0(r11)
162 lwz r7,GPR7(r11)
163 lwz r8,GPR8(r11)
164 b fast_exception_return
165
166#ifdef CONFIG_SMP
167hash_page_out:
168 eieio
169 addis r8,r7,mmu_hash_lock@ha
170 li r0,0
171 stw r0,mmu_hash_lock@l(r8)
172 blr
173#endif /* CONFIG_SMP */
174
175/*
176 * Add an entry for a particular page to the hash table.
177 *
178 * add_hash_page(unsigned context, unsigned long va, unsigned long pmdval)
179 *
180 * We assume any necessary modifications to the pte (e.g. setting
181 * the accessed bit) have already been done and that there is actually
182 * a hash table in use (i.e. we're not on a 603).
183 */
184_GLOBAL(add_hash_page)
185 mflr r0
186 stw r0,4(r1)
187
188 /* Convert context and va to VSID */
189 mulli r3,r3,897*16 /* multiply context by context skew */
190 rlwinm r0,r4,4,28,31 /* get ESID (top 4 bits of va) */
191 mulli r0,r0,0x111 /* multiply by ESID skew */
192 add r3,r3,r0 /* note create_hpte trims to 24 bits */
193
194#ifdef CONFIG_SMP
195 rlwinm r8,r1,0,0,18 /* use cpu number to make tag */
196 lwz r8,TI_CPU(r8) /* to go in mmu_hash_lock */
197 oris r8,r8,12
198#endif /* CONFIG_SMP */
199
200 /*
201 * We disable interrupts here, even on UP, because we don't
202 * want to race with hash_page, and because we want the
203 * _PAGE_HASHPTE bit to be a reliable indication of whether
204 * the HPTE exists (or at least whether one did once).
205 * We also turn off the MMU for data accesses so that we
206 * we can't take a hash table miss (assuming the code is
207 * covered by a BAT). -- paulus
208 */
209 mfmsr r10
210 SYNC
211 rlwinm r0,r10,0,17,15 /* clear bit 16 (MSR_EE) */
212 rlwinm r0,r0,0,28,26 /* clear MSR_DR */
213 mtmsr r0
214 SYNC_601
215 isync
216
217 tophys(r7,0)
218
219#ifdef CONFIG_SMP
220 addis r9,r7,mmu_hash_lock@ha
221 addi r9,r9,mmu_hash_lock@l
22210: lwarx r0,0,r9 /* take the mmu_hash_lock */
223 cmpi 0,r0,0
224 bne- 11f
225 stwcx. r8,0,r9
226 beq+ 12f
22711: lwz r0,0(r9)
228 cmpi 0,r0,0
229 beq 10b
230 b 11b
23112: isync
232#endif
233
234 /*
235 * Fetch the linux pte and test and set _PAGE_HASHPTE atomically.
236 * If _PAGE_HASHPTE was already set, we don't replace the existing
237 * HPTE, so we just unlock and return.
238 */
239 mr r8,r5
240 rlwimi r8,r4,22,20,29
2411: lwarx r6,0,r8
242 andi. r0,r6,_PAGE_HASHPTE
243 bne 9f /* if HASHPTE already set, done */
244 ori r5,r6,_PAGE_HASHPTE
245 stwcx. r5,0,r8
246 bne- 1b
247
248 bl create_hpte
249
2509:
251#ifdef CONFIG_SMP
252 eieio
253 li r0,0
254 stw r0,0(r9) /* clear mmu_hash_lock */
255#endif
256
257 /* reenable interrupts and DR */
258 mtmsr r10
259 SYNC_601
260 isync
261
262 lwz r0,4(r1)
263 mtlr r0
264 blr
265
266/*
267 * This routine adds a hardware PTE to the hash table.
268 * It is designed to be called with the MMU either on or off.
269 * r3 contains the VSID, r4 contains the virtual address,
270 * r5 contains the linux PTE, r6 contains the old value of the
271 * linux PTE (before setting _PAGE_HASHPTE) and r7 contains the
272 * offset to be added to addresses (0 if the MMU is on,
273 * -KERNELBASE if it is off).
274 * On SMP, the caller should have the mmu_hash_lock held.
275 * We assume that the caller has (or will) set the _PAGE_HASHPTE
276 * bit in the linux PTE in memory. The value passed in r6 should
277 * be the old linux PTE value; if it doesn't have _PAGE_HASHPTE set
278 * this routine will skip the search for an existing HPTE.
279 * This procedure modifies r0, r3 - r6, r8, cr0.
280 * -- paulus.
281 *
282 * For speed, 4 of the instructions get patched once the size and
283 * physical address of the hash table are known. These definitions
284 * of Hash_base and Hash_bits below are just an example.
285 */
286Hash_base = 0xc0180000
287Hash_bits = 12 /* e.g. 256kB hash table */
288Hash_msk = (((1 << Hash_bits) - 1) * 64)
289
290#ifndef CONFIG_PPC64BRIDGE
291/* defines for the PTE format for 32-bit PPCs */
292#define PTE_SIZE 8
293#define PTEG_SIZE 64
294#define LG_PTEG_SIZE 6
295#define LDPTEu lwzu
296#define STPTE stw
297#define CMPPTE cmpw
298#define PTE_H 0x40
299#define PTE_V 0x80000000
300#define TST_V(r) rlwinm. r,r,0,0,0
301#define SET_V(r) oris r,r,PTE_V@h
302#define CLR_V(r,t) rlwinm r,r,0,1,31
303
304#else
305/* defines for the PTE format for 64-bit PPCs */
306#define PTE_SIZE 16
307#define PTEG_SIZE 128
308#define LG_PTEG_SIZE 7
309#define LDPTEu ldu
310#define STPTE std
311#define CMPPTE cmpd
312#define PTE_H 2
313#define PTE_V 1
314#define TST_V(r) andi. r,r,PTE_V
315#define SET_V(r) ori r,r,PTE_V
316#define CLR_V(r,t) li t,PTE_V; andc r,r,t
317#endif /* CONFIG_PPC64BRIDGE */
318
319#define HASH_LEFT 31-(LG_PTEG_SIZE+Hash_bits-1)
320#define HASH_RIGHT 31-LG_PTEG_SIZE
321
322_GLOBAL(create_hpte)
323 /* Convert linux-style PTE (r5) to low word of PPC-style PTE (r8) */
324 rlwinm r8,r5,32-10,31,31 /* _PAGE_RW -> PP lsb */
325 rlwinm r0,r5,32-7,31,31 /* _PAGE_DIRTY -> PP lsb */
326 and r8,r8,r0 /* writable if _RW & _DIRTY */
327 rlwimi r5,r5,32-1,30,30 /* _PAGE_USER -> PP msb */
328 rlwimi r5,r5,32-2,31,31 /* _PAGE_USER -> PP lsb */
329 ori r8,r8,0xe14 /* clear out reserved bits and M */
330 andc r8,r5,r8 /* PP = user? (rw&dirty? 2: 3): 0 */
331BEGIN_FTR_SECTION
332 ori r8,r8,_PAGE_COHERENT /* set M (coherence required) */
333END_FTR_SECTION_IFSET(CPU_FTR_NEED_COHERENT)
334
335 /* Construct the high word of the PPC-style PTE (r5) */
336#ifndef CONFIG_PPC64BRIDGE
337 rlwinm r5,r3,7,1,24 /* put VSID in 0x7fffff80 bits */
338 rlwimi r5,r4,10,26,31 /* put in API (abbrev page index) */
339#else /* CONFIG_PPC64BRIDGE */
340 clrlwi r3,r3,8 /* reduce vsid to 24 bits */
341 sldi r5,r3,12 /* shift vsid into position */
342 rlwimi r5,r4,16,20,24 /* put in API (abbrev page index) */
343#endif /* CONFIG_PPC64BRIDGE */
344 SET_V(r5) /* set V (valid) bit */
345
346 /* Get the address of the primary PTE group in the hash table (r3) */
347_GLOBAL(hash_page_patch_A)
348 addis r0,r7,Hash_base@h /* base address of hash table */
349 rlwimi r0,r3,LG_PTEG_SIZE,HASH_LEFT,HASH_RIGHT /* VSID -> hash */
350 rlwinm r3,r4,20+LG_PTEG_SIZE,HASH_LEFT,HASH_RIGHT /* PI -> hash */
351 xor r3,r3,r0 /* make primary hash */
352 li r0,8 /* PTEs/group */
353
354 /*
355 * Test the _PAGE_HASHPTE bit in the old linux PTE, and skip the search
356 * if it is clear, meaning that the HPTE isn't there already...
357 */
358 andi. r6,r6,_PAGE_HASHPTE
359 beq+ 10f /* no PTE: go look for an empty slot */
360 tlbie r4
361
362 addis r4,r7,htab_hash_searches@ha
363 lwz r6,htab_hash_searches@l(r4)
364 addi r6,r6,1 /* count how many searches we do */
365 stw r6,htab_hash_searches@l(r4)
366
367 /* Search the primary PTEG for a PTE whose 1st (d)word matches r5 */
368 mtctr r0
369 addi r4,r3,-PTE_SIZE
3701: LDPTEu r6,PTE_SIZE(r4) /* get next PTE */
371 CMPPTE 0,r6,r5
372 bdnzf 2,1b /* loop while ctr != 0 && !cr0.eq */
373 beq+ found_slot
374
375 /* Search the secondary PTEG for a matching PTE */
376 ori r5,r5,PTE_H /* set H (secondary hash) bit */
377_GLOBAL(hash_page_patch_B)
378 xoris r4,r3,Hash_msk>>16 /* compute secondary hash */
379 xori r4,r4,(-PTEG_SIZE & 0xffff)
380 addi r4,r4,-PTE_SIZE
381 mtctr r0
3822: LDPTEu r6,PTE_SIZE(r4)
383 CMPPTE 0,r6,r5
384 bdnzf 2,2b
385 beq+ found_slot
386 xori r5,r5,PTE_H /* clear H bit again */
387
388 /* Search the primary PTEG for an empty slot */
38910: mtctr r0
390 addi r4,r3,-PTE_SIZE /* search primary PTEG */
3911: LDPTEu r6,PTE_SIZE(r4) /* get next PTE */
392 TST_V(r6) /* test valid bit */
393 bdnzf 2,1b /* loop while ctr != 0 && !cr0.eq */
394 beq+ found_empty
395
396 /* update counter of times that the primary PTEG is full */
397 addis r4,r7,primary_pteg_full@ha
398 lwz r6,primary_pteg_full@l(r4)
399 addi r6,r6,1
400 stw r6,primary_pteg_full@l(r4)
401
402 /* Search the secondary PTEG for an empty slot */
403 ori r5,r5,PTE_H /* set H (secondary hash) bit */
404_GLOBAL(hash_page_patch_C)
405 xoris r4,r3,Hash_msk>>16 /* compute secondary hash */
406 xori r4,r4,(-PTEG_SIZE & 0xffff)
407 addi r4,r4,-PTE_SIZE
408 mtctr r0
4092: LDPTEu r6,PTE_SIZE(r4)
410 TST_V(r6)
411 bdnzf 2,2b
412 beq+ found_empty
413 xori r5,r5,PTE_H /* clear H bit again */
414
415 /*
416 * Choose an arbitrary slot in the primary PTEG to overwrite.
417 * Since both the primary and secondary PTEGs are full, and we
418 * have no information that the PTEs in the primary PTEG are
419 * more important or useful than those in the secondary PTEG,
420 * and we know there is a definite (although small) speed
421 * advantage to putting the PTE in the primary PTEG, we always
422 * put the PTE in the primary PTEG.
423 */
424 addis r4,r7,next_slot@ha
425 lwz r6,next_slot@l(r4)
426 addi r6,r6,PTE_SIZE
427 andi. r6,r6,7*PTE_SIZE
428 stw r6,next_slot@l(r4)
429 add r4,r3,r6
430
431#ifndef CONFIG_SMP
432 /* Store PTE in PTEG */
433found_empty:
434 STPTE r5,0(r4)
435found_slot:
436 STPTE r8,PTE_SIZE/2(r4)
437
438#else /* CONFIG_SMP */
439/*
440 * Between the tlbie above and updating the hash table entry below,
441 * another CPU could read the hash table entry and put it in its TLB.
442 * There are 3 cases:
443 * 1. using an empty slot
444 * 2. updating an earlier entry to change permissions (i.e. enable write)
445 * 3. taking over the PTE for an unrelated address
446 *
447 * In each case it doesn't really matter if the other CPUs have the old
448 * PTE in their TLB. So we don't need to bother with another tlbie here,
449 * which is convenient as we've overwritten the register that had the
450 * address. :-) The tlbie above is mainly to make sure that this CPU comes
451 * and gets the new PTE from the hash table.
452 *
453 * We do however have to make sure that the PTE is never in an invalid
454 * state with the V bit set.
455 */
456found_empty:
457found_slot:
458 CLR_V(r5,r0) /* clear V (valid) bit in PTE */
459 STPTE r5,0(r4)
460 sync
461 TLBSYNC
462 STPTE r8,PTE_SIZE/2(r4) /* put in correct RPN, WIMG, PP bits */
463 sync
464 SET_V(r5)
465 STPTE r5,0(r4) /* finally set V bit in PTE */
466#endif /* CONFIG_SMP */
467
468 sync /* make sure pte updates get to memory */
469 blr
470
471 .comm next_slot,4
472 .comm primary_pteg_full,4
473 .comm htab_hash_searches,4
474
475/*
476 * Flush the entry for a particular page from the hash table.
477 *
478 * flush_hash_pages(unsigned context, unsigned long va, unsigned long pmdval,
479 * int count)
480 *
481 * We assume that there is a hash table in use (Hash != 0).
482 */
483_GLOBAL(flush_hash_pages)
484 tophys(r7,0)
485
486 /*
487 * We disable interrupts here, even on UP, because we want
488 * the _PAGE_HASHPTE bit to be a reliable indication of
489 * whether the HPTE exists (or at least whether one did once).
490 * We also turn off the MMU for data accesses so that we
491 * we can't take a hash table miss (assuming the code is
492 * covered by a BAT). -- paulus
493 */
494 mfmsr r10
495 SYNC
496 rlwinm r0,r10,0,17,15 /* clear bit 16 (MSR_EE) */
497 rlwinm r0,r0,0,28,26 /* clear MSR_DR */
498 mtmsr r0
499 SYNC_601
500 isync
501
502 /* First find a PTE in the range that has _PAGE_HASHPTE set */
503 rlwimi r5,r4,22,20,29
5041: lwz r0,0(r5)
505 cmpwi cr1,r6,1
506 andi. r0,r0,_PAGE_HASHPTE
507 bne 2f
508 ble cr1,19f
509 addi r4,r4,0x1000
510 addi r5,r5,4
511 addi r6,r6,-1
512 b 1b
513
514 /* Convert context and va to VSID */
5152: mulli r3,r3,897*16 /* multiply context by context skew */
516 rlwinm r0,r4,4,28,31 /* get ESID (top 4 bits of va) */
517 mulli r0,r0,0x111 /* multiply by ESID skew */
518 add r3,r3,r0 /* note code below trims to 24 bits */
519
520 /* Construct the high word of the PPC-style PTE (r11) */
521#ifndef CONFIG_PPC64BRIDGE
522 rlwinm r11,r3,7,1,24 /* put VSID in 0x7fffff80 bits */
523 rlwimi r11,r4,10,26,31 /* put in API (abbrev page index) */
524#else /* CONFIG_PPC64BRIDGE */
525 clrlwi r3,r3,8 /* reduce vsid to 24 bits */
526 sldi r11,r3,12 /* shift vsid into position */
527 rlwimi r11,r4,16,20,24 /* put in API (abbrev page index) */
528#endif /* CONFIG_PPC64BRIDGE */
529 SET_V(r11) /* set V (valid) bit */
530
531#ifdef CONFIG_SMP
532 addis r9,r7,mmu_hash_lock@ha
533 addi r9,r9,mmu_hash_lock@l
534 rlwinm r8,r1,0,0,18
535 add r8,r8,r7
536 lwz r8,TI_CPU(r8)
537 oris r8,r8,9
53810: lwarx r0,0,r9
539 cmpi 0,r0,0
540 bne- 11f
541 stwcx. r8,0,r9
542 beq+ 12f
54311: lwz r0,0(r9)
544 cmpi 0,r0,0
545 beq 10b
546 b 11b
54712: isync
548#endif
549
550 /*
551 * Check the _PAGE_HASHPTE bit in the linux PTE. If it is
552 * already clear, we're done (for this pte). If not,
553 * clear it (atomically) and proceed. -- paulus.
554 */
55533: lwarx r8,0,r5 /* fetch the pte */
556 andi. r0,r8,_PAGE_HASHPTE
557 beq 8f /* done if HASHPTE is already clear */
558 rlwinm r8,r8,0,31,29 /* clear HASHPTE bit */
559 stwcx. r8,0,r5 /* update the pte */
560 bne- 33b
561
562 /* Get the address of the primary PTE group in the hash table (r3) */
563_GLOBAL(flush_hash_patch_A)
564 addis r8,r7,Hash_base@h /* base address of hash table */
565 rlwimi r8,r3,LG_PTEG_SIZE,HASH_LEFT,HASH_RIGHT /* VSID -> hash */
566 rlwinm r0,r4,20+LG_PTEG_SIZE,HASH_LEFT,HASH_RIGHT /* PI -> hash */
567 xor r8,r0,r8 /* make primary hash */
568
569 /* Search the primary PTEG for a PTE whose 1st (d)word matches r5 */
570 li r0,8 /* PTEs/group */
571 mtctr r0
572 addi r12,r8,-PTE_SIZE
5731: LDPTEu r0,PTE_SIZE(r12) /* get next PTE */
574 CMPPTE 0,r0,r11
575 bdnzf 2,1b /* loop while ctr != 0 && !cr0.eq */
576 beq+ 3f
577
578 /* Search the secondary PTEG for a matching PTE */
579 ori r11,r11,PTE_H /* set H (secondary hash) bit */
580 li r0,8 /* PTEs/group */
581_GLOBAL(flush_hash_patch_B)
582 xoris r12,r8,Hash_msk>>16 /* compute secondary hash */
583 xori r12,r12,(-PTEG_SIZE & 0xffff)
584 addi r12,r12,-PTE_SIZE
585 mtctr r0
5862: LDPTEu r0,PTE_SIZE(r12)
587 CMPPTE 0,r0,r11
588 bdnzf 2,2b
589 xori r11,r11,PTE_H /* clear H again */
590 bne- 4f /* should rarely fail to find it */
591
5923: li r0,0
593 STPTE r0,0(r12) /* invalidate entry */
5944: sync
595 tlbie r4 /* in hw tlb too */
596 sync
597
5988: ble cr1,9f /* if all ptes checked */
59981: addi r6,r6,-1
600 addi r5,r5,4 /* advance to next pte */
601 addi r4,r4,0x1000
602 lwz r0,0(r5) /* check next pte */
603 cmpwi cr1,r6,1
604 andi. r0,r0,_PAGE_HASHPTE
605 bne 33b
606 bgt cr1,81b
607
6089:
609#ifdef CONFIG_SMP
610 TLBSYNC
611 li r0,0
612 stw r0,0(r9) /* clear mmu_hash_lock */
613#endif
614
61519: mtmsr r10
616 SYNC_601
617 isync
618 blr
diff --git a/arch/ppc64/mm/hash_low.S b/arch/powerpc/mm/hash_low_64.S
index ee5a5d36bfa8..d6ed9102eeea 100644
--- a/arch/ppc64/mm/hash_low.S
+++ b/arch/powerpc/mm/hash_low_64.S
@@ -10,7 +10,7 @@
10 * described in the kernel's COPYING file. 10 * described in the kernel's COPYING file.
11 */ 11 */
12 12
13#include <asm/processor.h> 13#include <asm/reg.h>
14#include <asm/pgtable.h> 14#include <asm/pgtable.h>
15#include <asm/mmu.h> 15#include <asm/mmu.h>
16#include <asm/page.h> 16#include <asm/page.h>
diff --git a/arch/ppc64/mm/hash_native.c b/arch/powerpc/mm/hash_native_64.c
index bfd385b7713c..174d14576c28 100644
--- a/arch/ppc64/mm/hash_native.c
+++ b/arch/powerpc/mm/hash_native_64.c
@@ -335,10 +335,9 @@ static void native_hpte_clear(void)
335 local_irq_restore(flags); 335 local_irq_restore(flags);
336} 336}
337 337
338static void native_flush_hash_range(unsigned long context, 338static void native_flush_hash_range(unsigned long number, int local)
339 unsigned long number, int local)
340{ 339{
341 unsigned long vsid, vpn, va, hash, secondary, slot, flags, avpn; 340 unsigned long va, vpn, hash, secondary, slot, flags, avpn;
342 int i, j; 341 int i, j;
343 hpte_t *hptep; 342 hpte_t *hptep;
344 unsigned long hpte_v; 343 unsigned long hpte_v;
@@ -349,13 +348,7 @@ static void native_flush_hash_range(unsigned long context,
349 348
350 j = 0; 349 j = 0;
351 for (i = 0; i < number; i++) { 350 for (i = 0; i < number; i++) {
352 if (batch->addr[i] < KERNELBASE) 351 va = batch->vaddr[j];
353 vsid = get_vsid(context, batch->addr[i]);
354 else
355 vsid = get_kernel_vsid(batch->addr[i]);
356
357 va = (vsid << 28) | (batch->addr[i] & 0x0fffffff);
358 batch->vaddr[j] = va;
359 if (large) 352 if (large)
360 vpn = va >> HPAGE_SHIFT; 353 vpn = va >> HPAGE_SHIFT;
361 else 354 else
diff --git a/arch/ppc64/mm/hash_utils.c b/arch/powerpc/mm/hash_utils_64.c
index 09475c8edf7c..6e9e05cce02c 100644
--- a/arch/ppc64/mm/hash_utils.c
+++ b/arch/powerpc/mm/hash_utils_64.c
@@ -78,7 +78,7 @@ extern unsigned long dart_tablebase;
78hpte_t *htab_address; 78hpte_t *htab_address;
79unsigned long htab_hash_mask; 79unsigned long htab_hash_mask;
80 80
81extern unsigned long _SDR1; 81unsigned long _SDR1;
82 82
83#define KB (1024) 83#define KB (1024)
84#define MB (1024*KB) 84#define MB (1024*KB)
@@ -90,7 +90,6 @@ static inline void loop_forever(void)
90 ; 90 ;
91} 91}
92 92
93#ifdef CONFIG_PPC_MULTIPLATFORM
94static inline void create_pte_mapping(unsigned long start, unsigned long end, 93static inline void create_pte_mapping(unsigned long start, unsigned long end,
95 unsigned long mode, int large) 94 unsigned long mode, int large)
96{ 95{
@@ -111,7 +110,7 @@ static inline void create_pte_mapping(unsigned long start, unsigned long end,
111 unsigned long vpn, hash, hpteg; 110 unsigned long vpn, hash, hpteg;
112 unsigned long vsid = get_kernel_vsid(addr); 111 unsigned long vsid = get_kernel_vsid(addr);
113 unsigned long va = (vsid << 28) | (addr & 0xfffffff); 112 unsigned long va = (vsid << 28) | (addr & 0xfffffff);
114 int ret; 113 int ret = -1;
115 114
116 if (large) 115 if (large)
117 vpn = va >> HPAGE_SHIFT; 116 vpn = va >> HPAGE_SHIFT;
@@ -129,16 +128,25 @@ static inline void create_pte_mapping(unsigned long start, unsigned long end,
129 128
130 hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP); 129 hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP);
131 130
131#ifdef CONFIG_PPC_ISERIES
132 if (systemcfg->platform & PLATFORM_ISERIES_LPAR)
133 ret = iSeries_hpte_bolt_or_insert(hpteg, va,
134 virt_to_abs(addr) >> PAGE_SHIFT,
135 vflags, tmp_mode);
136 else
137#endif
132#ifdef CONFIG_PPC_PSERIES 138#ifdef CONFIG_PPC_PSERIES
133 if (systemcfg->platform & PLATFORM_LPAR) 139 if (systemcfg->platform & PLATFORM_LPAR)
134 ret = pSeries_lpar_hpte_insert(hpteg, va, 140 ret = pSeries_lpar_hpte_insert(hpteg, va,
135 virt_to_abs(addr) >> PAGE_SHIFT, 141 virt_to_abs(addr) >> PAGE_SHIFT,
136 vflags, tmp_mode); 142 vflags, tmp_mode);
137 else 143 else
138#endif /* CONFIG_PPC_PSERIES */ 144#endif
145#ifdef CONFIG_PPC_MULTIPLATFORM
139 ret = native_hpte_insert(hpteg, va, 146 ret = native_hpte_insert(hpteg, va,
140 virt_to_abs(addr) >> PAGE_SHIFT, 147 virt_to_abs(addr) >> PAGE_SHIFT,
141 vflags, tmp_mode); 148 vflags, tmp_mode);
149#endif
142 150
143 if (ret == -1) { 151 if (ret == -1) {
144 ppc64_terminate_msg(0x20, "create_pte_mapping"); 152 ppc64_terminate_msg(0x20, "create_pte_mapping");
@@ -147,6 +155,27 @@ static inline void create_pte_mapping(unsigned long start, unsigned long end,
147 } 155 }
148} 156}
149 157
158static unsigned long get_hashtable_size(void)
159{
160 unsigned long rnd_mem_size, pteg_count;
161
162 /* If hash size wasn't obtained in prom.c, we calculate it now based on
163 * the total RAM size
164 */
165 if (ppc64_pft_size)
166 return 1UL << ppc64_pft_size;
167
168 /* round mem_size up to next power of 2 */
169 rnd_mem_size = 1UL << __ilog2(systemcfg->physicalMemorySize);
170 if (rnd_mem_size < systemcfg->physicalMemorySize)
171 rnd_mem_size <<= 1;
172
173 /* # pages / 2 */
174 pteg_count = max(rnd_mem_size >> (12 + 1), 1UL << 11);
175
176 return pteg_count << 7;
177}
178
150void __init htab_initialize(void) 179void __init htab_initialize(void)
151{ 180{
152 unsigned long table, htab_size_bytes; 181 unsigned long table, htab_size_bytes;
@@ -162,7 +191,7 @@ void __init htab_initialize(void)
162 * Calculate the required size of the htab. We want the number of 191 * Calculate the required size of the htab. We want the number of
163 * PTEGs to equal one half the number of real pages. 192 * PTEGs to equal one half the number of real pages.
164 */ 193 */
165 htab_size_bytes = 1UL << ppc64_pft_size; 194 htab_size_bytes = get_hashtable_size();
166 pteg_count = htab_size_bytes >> 7; 195 pteg_count = htab_size_bytes >> 7;
167 196
168 /* For debug, make the HTAB 1/8 as big as it normally would be. */ 197 /* For debug, make the HTAB 1/8 as big as it normally would be. */
@@ -261,7 +290,6 @@ void __init htab_initialize(void)
261} 290}
262#undef KB 291#undef KB
263#undef MB 292#undef MB
264#endif /* CONFIG_PPC_MULTIPLATFORM */
265 293
266/* 294/*
267 * Called by asm hashtable.S for doing lazy icache flush 295 * Called by asm hashtable.S for doing lazy icache flush
@@ -355,18 +383,11 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap)
355 return ret; 383 return ret;
356} 384}
357 385
358void flush_hash_page(unsigned long context, unsigned long ea, pte_t pte, 386void flush_hash_page(unsigned long va, pte_t pte, int local)
359 int local)
360{ 387{
361 unsigned long vsid, vpn, va, hash, secondary, slot; 388 unsigned long vpn, hash, secondary, slot;
362 unsigned long huge = pte_huge(pte); 389 unsigned long huge = pte_huge(pte);
363 390
364 if (ea < KERNELBASE)
365 vsid = get_vsid(context, ea);
366 else
367 vsid = get_kernel_vsid(ea);
368
369 va = (vsid << 28) | (ea & 0x0fffffff);
370 if (huge) 391 if (huge)
371 vpn = va >> HPAGE_SHIFT; 392 vpn = va >> HPAGE_SHIFT;
372 else 393 else
@@ -381,17 +402,17 @@ void flush_hash_page(unsigned long context, unsigned long ea, pte_t pte,
381 ppc_md.hpte_invalidate(slot, va, huge, local); 402 ppc_md.hpte_invalidate(slot, va, huge, local);
382} 403}
383 404
384void flush_hash_range(unsigned long context, unsigned long number, int local) 405void flush_hash_range(unsigned long number, int local)
385{ 406{
386 if (ppc_md.flush_hash_range) { 407 if (ppc_md.flush_hash_range) {
387 ppc_md.flush_hash_range(context, number, local); 408 ppc_md.flush_hash_range(number, local);
388 } else { 409 } else {
389 int i; 410 int i;
390 struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch); 411 struct ppc64_tlb_batch *batch =
412 &__get_cpu_var(ppc64_tlb_batch);
391 413
392 for (i = 0; i < number; i++) 414 for (i = 0; i < number; i++)
393 flush_hash_page(context, batch->addr[i], batch->pte[i], 415 flush_hash_page(batch->vaddr[i], batch->pte[i], local);
394 local);
395 } 416 }
396} 417}
397 418
diff --git a/arch/ppc64/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c
index 0ea0994ed974..0ea0994ed974 100644
--- a/arch/ppc64/mm/hugetlbpage.c
+++ b/arch/powerpc/mm/hugetlbpage.c
diff --git a/arch/ppc64/mm/imalloc.c b/arch/powerpc/mm/imalloc.c
index f4ca29cf5364..f4ca29cf5364 100644
--- a/arch/ppc64/mm/imalloc.c
+++ b/arch/powerpc/mm/imalloc.c
diff --git a/arch/powerpc/mm/init_32.c b/arch/powerpc/mm/init_32.c
new file mode 100644
index 000000000000..4612a79dfb6e
--- /dev/null
+++ b/arch/powerpc/mm/init_32.c
@@ -0,0 +1,254 @@
1/*
2 * PowerPC version
3 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
4 *
5 * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
6 * and Cort Dougan (PReP) (cort@cs.nmt.edu)
7 * Copyright (C) 1996 Paul Mackerras
8 * Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk).
9 * PPC44x/36-bit changes by Matt Porter (mporter@mvista.com)
10 *
11 * Derived from "arch/i386/mm/init.c"
12 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
13 *
14 * This program is free software; you can redistribute it and/or
15 * modify it under the terms of the GNU General Public License
16 * as published by the Free Software Foundation; either version
17 * 2 of the License, or (at your option) any later version.
18 *
19 */
20
21#include <linux/config.h>
22#include <linux/module.h>
23#include <linux/sched.h>
24#include <linux/kernel.h>
25#include <linux/errno.h>
26#include <linux/string.h>
27#include <linux/types.h>
28#include <linux/mm.h>
29#include <linux/stddef.h>
30#include <linux/init.h>
31#include <linux/bootmem.h>
32#include <linux/highmem.h>
33#include <linux/initrd.h>
34#include <linux/pagemap.h>
35
36#include <asm/pgalloc.h>
37#include <asm/prom.h>
38#include <asm/io.h>
39#include <asm/mmu_context.h>
40#include <asm/pgtable.h>
41#include <asm/mmu.h>
42#include <asm/smp.h>
43#include <asm/machdep.h>
44#include <asm/btext.h>
45#include <asm/tlb.h>
46#include <asm/prom.h>
47#include <asm/lmb.h>
48#include <asm/sections.h>
49
50#include "mmu_decl.h"
51
52#if defined(CONFIG_KERNEL_START_BOOL) || defined(CONFIG_LOWMEM_SIZE_BOOL)
53/* The ammount of lowmem must be within 0xF0000000 - KERNELBASE. */
54#if (CONFIG_LOWMEM_SIZE > (0xF0000000 - KERNELBASE))
55#error "You must adjust CONFIG_LOWMEM_SIZE or CONFIG_START_KERNEL"
56#endif
57#endif
58#define MAX_LOW_MEM CONFIG_LOWMEM_SIZE
59
60DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
61
62unsigned long total_memory;
63unsigned long total_lowmem;
64
65unsigned long ppc_memstart;
66unsigned long ppc_memoffset = PAGE_OFFSET;
67
68int boot_mapsize;
69#ifdef CONFIG_PPC_PMAC
70unsigned long agp_special_page;
71EXPORT_SYMBOL(agp_special_page);
72#endif
73
74#ifdef CONFIG_HIGHMEM
75pte_t *kmap_pte;
76pgprot_t kmap_prot;
77
78EXPORT_SYMBOL(kmap_prot);
79EXPORT_SYMBOL(kmap_pte);
80#endif
81
82void MMU_init(void);
83
84/* XXX should be in current.h -- paulus */
85extern struct task_struct *current_set[NR_CPUS];
86
87char *klimit = _end;
88struct device_node *memory_node;
89
90extern int init_bootmem_done;
91
92/*
93 * this tells the system to map all of ram with the segregs
94 * (i.e. page tables) instead of the bats.
95 * -- Cort
96 */
97int __map_without_bats;
98int __map_without_ltlbs;
99
100/* max amount of low RAM to map in */
101unsigned long __max_low_memory = MAX_LOW_MEM;
102
103/*
104 * limit of what is accessible with initial MMU setup -
105 * 256MB usually, but only 16MB on 601.
106 */
107unsigned long __initial_memory_limit = 0x10000000;
108
109/*
110 * Check for command-line options that affect what MMU_init will do.
111 */
112void MMU_setup(void)
113{
114 /* Check for nobats option (used in mapin_ram). */
115 if (strstr(cmd_line, "nobats")) {
116 __map_without_bats = 1;
117 }
118
119 if (strstr(cmd_line, "noltlbs")) {
120 __map_without_ltlbs = 1;
121 }
122}
123
124/*
125 * MMU_init sets up the basic memory mappings for the kernel,
126 * including both RAM and possibly some I/O regions,
127 * and sets up the page tables and the MMU hardware ready to go.
128 */
129void __init MMU_init(void)
130{
131 if (ppc_md.progress)
132 ppc_md.progress("MMU:enter", 0x111);
133
134 /* 601 can only access 16MB at the moment */
135 if (PVR_VER(mfspr(SPRN_PVR)) == 1)
136 __initial_memory_limit = 0x01000000;
137
138 /* parse args from command line */
139 MMU_setup();
140
141 if (lmb.memory.cnt > 1) {
142 lmb.memory.cnt = 1;
143 lmb_analyze();
144 printk(KERN_WARNING "Only using first contiguous memory region");
145 }
146
147 total_memory = lmb_end_of_DRAM();
148 total_lowmem = total_memory;
149
150#ifdef CONFIG_FSL_BOOKE
151 /* Freescale Book-E parts expect lowmem to be mapped by fixed TLB
152 * entries, so we need to adjust lowmem to match the amount we can map
153 * in the fixed entries */
154 adjust_total_lowmem();
155#endif /* CONFIG_FSL_BOOKE */
156
157 if (total_lowmem > __max_low_memory) {
158 total_lowmem = __max_low_memory;
159#ifndef CONFIG_HIGHMEM
160 total_memory = total_lowmem;
161 lmb_enforce_memory_limit(total_lowmem);
162 lmb_analyze();
163#endif /* CONFIG_HIGHMEM */
164 }
165
166 /* Initialize the MMU hardware */
167 if (ppc_md.progress)
168 ppc_md.progress("MMU:hw init", 0x300);
169 MMU_init_hw();
170
171 /* Map in all of RAM starting at KERNELBASE */
172 if (ppc_md.progress)
173 ppc_md.progress("MMU:mapin", 0x301);
174 mapin_ram();
175
176#ifdef CONFIG_HIGHMEM
177 ioremap_base = PKMAP_BASE;
178#else
179 ioremap_base = 0xfe000000UL; /* for now, could be 0xfffff000 */
180#endif /* CONFIG_HIGHMEM */
181 ioremap_bot = ioremap_base;
182
183 /* Map in I/O resources */
184 if (ppc_md.progress)
185 ppc_md.progress("MMU:setio", 0x302);
186 if (ppc_md.setup_io_mappings)
187 ppc_md.setup_io_mappings();
188
189 /* Initialize the context management stuff */
190 mmu_context_init();
191
192 if (ppc_md.progress)
193 ppc_md.progress("MMU:exit", 0x211);
194}
195
196/* This is only called until mem_init is done. */
197void __init *early_get_page(void)
198{
199 void *p;
200
201 if (init_bootmem_done) {
202 p = alloc_bootmem_pages(PAGE_SIZE);
203 } else {
204 p = __va(lmb_alloc_base(PAGE_SIZE, PAGE_SIZE,
205 __initial_memory_limit));
206 }
207 return p;
208}
209
210/* Free up now-unused memory */
211static void free_sec(unsigned long start, unsigned long end, const char *name)
212{
213 unsigned long cnt = 0;
214
215 while (start < end) {
216 ClearPageReserved(virt_to_page(start));
217 set_page_count(virt_to_page(start), 1);
218 free_page(start);
219 cnt++;
220 start += PAGE_SIZE;
221 }
222 if (cnt) {
223 printk(" %ldk %s", cnt << (PAGE_SHIFT - 10), name);
224 totalram_pages += cnt;
225 }
226}
227
228void free_initmem(void)
229{
230#define FREESEC(TYPE) \
231 free_sec((unsigned long)(&__ ## TYPE ## _begin), \
232 (unsigned long)(&__ ## TYPE ## _end), \
233 #TYPE);
234
235 printk ("Freeing unused kernel memory:");
236 FREESEC(init);
237 printk("\n");
238 ppc_md.progress = NULL;
239#undef FREESEC
240}
241
242#ifdef CONFIG_BLK_DEV_INITRD
243void free_initrd_mem(unsigned long start, unsigned long end)
244{
245 if (start < end)
246 printk ("Freeing initrd memory: %ldk freed\n", (end - start) >> 10);
247 for (; start < end; start += PAGE_SIZE) {
248 ClearPageReserved(virt_to_page(start));
249 set_page_count(virt_to_page(start), 1);
250 free_page(start);
251 totalram_pages++;
252 }
253}
254#endif
diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c
new file mode 100644
index 000000000000..b0fc822ec29f
--- /dev/null
+++ b/arch/powerpc/mm/init_64.c
@@ -0,0 +1,223 @@
1/*
2 * PowerPC version
3 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
4 *
5 * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
6 * and Cort Dougan (PReP) (cort@cs.nmt.edu)
7 * Copyright (C) 1996 Paul Mackerras
8 * Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk).
9 *
10 * Derived from "arch/i386/mm/init.c"
11 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
12 *
13 * Dave Engebretsen <engebret@us.ibm.com>
14 * Rework for PPC64 port.
15 *
16 * This program is free software; you can redistribute it and/or
17 * modify it under the terms of the GNU General Public License
18 * as published by the Free Software Foundation; either version
19 * 2 of the License, or (at your option) any later version.
20 *
21 */
22
23#include <linux/config.h>
24#include <linux/signal.h>
25#include <linux/sched.h>
26#include <linux/kernel.h>
27#include <linux/errno.h>
28#include <linux/string.h>
29#include <linux/types.h>
30#include <linux/mman.h>
31#include <linux/mm.h>
32#include <linux/swap.h>
33#include <linux/stddef.h>
34#include <linux/vmalloc.h>
35#include <linux/init.h>
36#include <linux/delay.h>
37#include <linux/bootmem.h>
38#include <linux/highmem.h>
39#include <linux/idr.h>
40#include <linux/nodemask.h>
41#include <linux/module.h>
42
43#include <asm/pgalloc.h>
44#include <asm/page.h>
45#include <asm/prom.h>
46#include <asm/lmb.h>
47#include <asm/rtas.h>
48#include <asm/io.h>
49#include <asm/mmu_context.h>
50#include <asm/pgtable.h>
51#include <asm/mmu.h>
52#include <asm/uaccess.h>
53#include <asm/smp.h>
54#include <asm/machdep.h>
55#include <asm/tlb.h>
56#include <asm/eeh.h>
57#include <asm/processor.h>
58#include <asm/mmzone.h>
59#include <asm/cputable.h>
60#include <asm/ppcdebug.h>
61#include <asm/sections.h>
62#include <asm/system.h>
63#include <asm/iommu.h>
64#include <asm/abs_addr.h>
65#include <asm/vdso.h>
66#include <asm/imalloc.h>
67
68#if PGTABLE_RANGE > USER_VSID_RANGE
69#warning Limited user VSID range means pagetable space is wasted
70#endif
71
72#if (TASK_SIZE_USER64 < PGTABLE_RANGE) && (TASK_SIZE_USER64 < USER_VSID_RANGE)
73#warning TASK_SIZE is smaller than it needs to be.
74#endif
75
76unsigned long klimit = (unsigned long)_end;
77
78/* max amount of RAM to use */
79unsigned long __max_memory;
80
81/* info on what we think the IO hole is */
82unsigned long io_hole_start;
83unsigned long io_hole_size;
84
85/*
86 * Do very early mm setup.
87 */
88void __init mm_init_ppc64(void)
89{
90#ifndef CONFIG_PPC_ISERIES
91 unsigned long i;
92#endif
93
94 ppc64_boot_msg(0x100, "MM Init");
95
96 /* This is the story of the IO hole... please, keep seated,
97 * unfortunately, we are out of oxygen masks at the moment.
98 * So we need some rough way to tell where your big IO hole
99 * is. On pmac, it's between 2G and 4G, on POWER3, it's around
100 * that area as well, on POWER4 we don't have one, etc...
101 * We need that as a "hint" when sizing the TCE table on POWER3
102 * So far, the simplest way that seem work well enough for us it
103 * to just assume that the first discontinuity in our physical
104 * RAM layout is the IO hole. That may not be correct in the future
105 * (and isn't on iSeries but then we don't care ;)
106 */
107
108#ifndef CONFIG_PPC_ISERIES
109 for (i = 1; i < lmb.memory.cnt; i++) {
110 unsigned long base, prevbase, prevsize;
111
112 prevbase = lmb.memory.region[i-1].base;
113 prevsize = lmb.memory.region[i-1].size;
114 base = lmb.memory.region[i].base;
115 if (base > (prevbase + prevsize)) {
116 io_hole_start = prevbase + prevsize;
117 io_hole_size = base - (prevbase + prevsize);
118 break;
119 }
120 }
121#endif /* CONFIG_PPC_ISERIES */
122 if (io_hole_start)
123 printk("IO Hole assumed to be %lx -> %lx\n",
124 io_hole_start, io_hole_start + io_hole_size - 1);
125
126 ppc64_boot_msg(0x100, "MM Init Done");
127}
128
129void free_initmem(void)
130{
131 unsigned long addr;
132
133 addr = (unsigned long)__init_begin;
134 for (; addr < (unsigned long)__init_end; addr += PAGE_SIZE) {
135 memset((void *)addr, 0xcc, PAGE_SIZE);
136 ClearPageReserved(virt_to_page(addr));
137 set_page_count(virt_to_page(addr), 1);
138 free_page(addr);
139 totalram_pages++;
140 }
141 printk ("Freeing unused kernel memory: %luk freed\n",
142 ((unsigned long)__init_end - (unsigned long)__init_begin) >> 10);
143}
144
145#ifdef CONFIG_BLK_DEV_INITRD
146void free_initrd_mem(unsigned long start, unsigned long end)
147{
148 if (start < end)
149 printk ("Freeing initrd memory: %ldk freed\n", (end - start) >> 10);
150 for (; start < end; start += PAGE_SIZE) {
151 ClearPageReserved(virt_to_page(start));
152 set_page_count(virt_to_page(start), 1);
153 free_page(start);
154 totalram_pages++;
155 }
156}
157#endif
158
159static struct kcore_list kcore_vmem;
160
161static int __init setup_kcore(void)
162{
163 int i;
164
165 for (i=0; i < lmb.memory.cnt; i++) {
166 unsigned long base, size;
167 struct kcore_list *kcore_mem;
168
169 base = lmb.memory.region[i].base;
170 size = lmb.memory.region[i].size;
171
172 /* GFP_ATOMIC to avoid might_sleep warnings during boot */
173 kcore_mem = kmalloc(sizeof(struct kcore_list), GFP_ATOMIC);
174 if (!kcore_mem)
175 panic("mem_init: kmalloc failed\n");
176
177 kclist_add(kcore_mem, __va(base), size);
178 }
179
180 kclist_add(&kcore_vmem, (void *)VMALLOC_START, VMALLOC_END-VMALLOC_START);
181
182 return 0;
183}
184module_init(setup_kcore);
185
186static void zero_ctor(void *addr, kmem_cache_t *cache, unsigned long flags)
187{
188 memset(addr, 0, kmem_cache_size(cache));
189}
190
191static const int pgtable_cache_size[2] = {
192 PTE_TABLE_SIZE, PMD_TABLE_SIZE
193};
194static const char *pgtable_cache_name[ARRAY_SIZE(pgtable_cache_size)] = {
195 "pgd_pte_cache", "pud_pmd_cache",
196};
197
198kmem_cache_t *pgtable_cache[ARRAY_SIZE(pgtable_cache_size)];
199
200void pgtable_cache_init(void)
201{
202 int i;
203
204 BUILD_BUG_ON(PTE_TABLE_SIZE != pgtable_cache_size[PTE_CACHE_NUM]);
205 BUILD_BUG_ON(PMD_TABLE_SIZE != pgtable_cache_size[PMD_CACHE_NUM]);
206 BUILD_BUG_ON(PUD_TABLE_SIZE != pgtable_cache_size[PUD_CACHE_NUM]);
207 BUILD_BUG_ON(PGD_TABLE_SIZE != pgtable_cache_size[PGD_CACHE_NUM]);
208
209 for (i = 0; i < ARRAY_SIZE(pgtable_cache_size); i++) {
210 int size = pgtable_cache_size[i];
211 const char *name = pgtable_cache_name[i];
212
213 pgtable_cache[i] = kmem_cache_create(name,
214 size, size,
215 SLAB_HWCACHE_ALIGN
216 | SLAB_MUST_HWCACHE_ALIGN,
217 zero_ctor,
218 NULL);
219 if (! pgtable_cache[i])
220 panic("pgtable_cache_init(): could not create %s!\n",
221 name);
222 }
223}
diff --git a/arch/ppc64/kernel/lmb.c b/arch/powerpc/mm/lmb.c
index 5adaca2ddc9d..9b5aa6808eb8 100644
--- a/arch/ppc64/kernel/lmb.c
+++ b/arch/powerpc/mm/lmb.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Procedures for interfacing to Open Firmware. 2 * Procedures for maintaining information about logical memory blocks.
3 * 3 *
4 * Peter Bergner, IBM Corp. June 2001. 4 * Peter Bergner, IBM Corp. June 2001.
5 * Copyright (C) 2001 Peter Bergner. 5 * Copyright (C) 2001 Peter Bergner.
@@ -18,7 +18,9 @@
18#include <asm/page.h> 18#include <asm/page.h>
19#include <asm/prom.h> 19#include <asm/prom.h>
20#include <asm/lmb.h> 20#include <asm/lmb.h>
21#include <asm/abs_addr.h> 21#ifdef CONFIG_PPC32
22#include "mmu_decl.h" /* for __max_low_memory */
23#endif
22 24
23struct lmb lmb; 25struct lmb lmb;
24 26
@@ -54,16 +56,14 @@ void lmb_dump_all(void)
54#endif /* DEBUG */ 56#endif /* DEBUG */
55} 57}
56 58
57static unsigned long __init 59static unsigned long __init lmb_addrs_overlap(unsigned long base1,
58lmb_addrs_overlap(unsigned long base1, unsigned long size1, 60 unsigned long size1, unsigned long base2, unsigned long size2)
59 unsigned long base2, unsigned long size2)
60{ 61{
61 return ((base1 < (base2+size2)) && (base2 < (base1+size1))); 62 return ((base1 < (base2+size2)) && (base2 < (base1+size1)));
62} 63}
63 64
64static long __init 65static long __init lmb_addrs_adjacent(unsigned long base1, unsigned long size1,
65lmb_addrs_adjacent(unsigned long base1, unsigned long size1, 66 unsigned long base2, unsigned long size2)
66 unsigned long base2, unsigned long size2)
67{ 67{
68 if (base2 == base1 + size1) 68 if (base2 == base1 + size1)
69 return 1; 69 return 1;
@@ -73,8 +73,8 @@ lmb_addrs_adjacent(unsigned long base1, unsigned long size1,
73 return 0; 73 return 0;
74} 74}
75 75
76static long __init 76static long __init lmb_regions_adjacent(struct lmb_region *rgn,
77lmb_regions_adjacent(struct lmb_region *rgn, unsigned long r1, unsigned long r2) 77 unsigned long r1, unsigned long r2)
78{ 78{
79 unsigned long base1 = rgn->region[r1].base; 79 unsigned long base1 = rgn->region[r1].base;
80 unsigned long size1 = rgn->region[r1].size; 80 unsigned long size1 = rgn->region[r1].size;
@@ -85,8 +85,8 @@ lmb_regions_adjacent(struct lmb_region *rgn, unsigned long r1, unsigned long r2)
85} 85}
86 86
87/* Assumption: base addr of region 1 < base addr of region 2 */ 87/* Assumption: base addr of region 1 < base addr of region 2 */
88static void __init 88static void __init lmb_coalesce_regions(struct lmb_region *rgn,
89lmb_coalesce_regions(struct lmb_region *rgn, unsigned long r1, unsigned long r2) 89 unsigned long r1, unsigned long r2)
90{ 90{
91 unsigned long i; 91 unsigned long i;
92 92
@@ -99,8 +99,7 @@ lmb_coalesce_regions(struct lmb_region *rgn, unsigned long r1, unsigned long r2)
99} 99}
100 100
101/* This routine called with relocation disabled. */ 101/* This routine called with relocation disabled. */
102void __init 102void __init lmb_init(void)
103lmb_init(void)
104{ 103{
105 /* Create a dummy zero size LMB which will get coalesced away later. 104 /* Create a dummy zero size LMB which will get coalesced away later.
106 * This simplifies the lmb_add() code below... 105 * This simplifies the lmb_add() code below...
@@ -115,9 +114,8 @@ lmb_init(void)
115 lmb.reserved.cnt = 1; 114 lmb.reserved.cnt = 1;
116} 115}
117 116
118/* This routine called with relocation disabled. */ 117/* This routine may be called with relocation disabled. */
119void __init 118void __init lmb_analyze(void)
120lmb_analyze(void)
121{ 119{
122 int i; 120 int i;
123 121
@@ -128,8 +126,8 @@ lmb_analyze(void)
128} 126}
129 127
130/* This routine called with relocation disabled. */ 128/* This routine called with relocation disabled. */
131static long __init 129static long __init lmb_add_region(struct lmb_region *rgn, unsigned long base,
132lmb_add_region(struct lmb_region *rgn, unsigned long base, unsigned long size) 130 unsigned long size)
133{ 131{
134 unsigned long i, coalesced = 0; 132 unsigned long i, coalesced = 0;
135 long adjacent; 133 long adjacent;
@@ -158,18 +156,17 @@ lmb_add_region(struct lmb_region *rgn, unsigned long base, unsigned long size)
158 coalesced++; 156 coalesced++;
159 } 157 }
160 158
161 if ( coalesced ) { 159 if (coalesced)
162 return coalesced; 160 return coalesced;
163 } else if ( rgn->cnt >= MAX_LMB_REGIONS ) { 161 if (rgn->cnt >= MAX_LMB_REGIONS)
164 return -1; 162 return -1;
165 }
166 163
167 /* Couldn't coalesce the LMB, so add it to the sorted table. */ 164 /* Couldn't coalesce the LMB, so add it to the sorted table. */
168 for (i=rgn->cnt-1; i >= 0; i--) { 165 for (i = rgn->cnt-1; i >= 0; i--) {
169 if (base < rgn->region[i].base) { 166 if (base < rgn->region[i].base) {
170 rgn->region[i+1].base = rgn->region[i].base; 167 rgn->region[i+1].base = rgn->region[i].base;
171 rgn->region[i+1].size = rgn->region[i].size; 168 rgn->region[i+1].size = rgn->region[i].size;
172 } else { 169 } else {
173 rgn->region[i+1].base = base; 170 rgn->region[i+1].base = base;
174 rgn->region[i+1].size = size; 171 rgn->region[i+1].size = size;
175 break; 172 break;
@@ -180,30 +177,28 @@ lmb_add_region(struct lmb_region *rgn, unsigned long base, unsigned long size)
180 return 0; 177 return 0;
181} 178}
182 179
183/* This routine called with relocation disabled. */ 180/* This routine may be called with relocation disabled. */
184long __init 181long __init lmb_add(unsigned long base, unsigned long size)
185lmb_add(unsigned long base, unsigned long size)
186{ 182{
187 struct lmb_region *_rgn = &(lmb.memory); 183 struct lmb_region *_rgn = &(lmb.memory);
188 184
189 /* On pSeries LPAR systems, the first LMB is our RMO region. */ 185 /* On pSeries LPAR systems, the first LMB is our RMO region. */
190 if ( base == 0 ) 186 if (base == 0)
191 lmb.rmo_size = size; 187 lmb.rmo_size = size;
192 188
193 return lmb_add_region(_rgn, base, size); 189 return lmb_add_region(_rgn, base, size);
194 190
195} 191}
196 192
197long __init 193long __init lmb_reserve(unsigned long base, unsigned long size)
198lmb_reserve(unsigned long base, unsigned long size)
199{ 194{
200 struct lmb_region *_rgn = &(lmb.reserved); 195 struct lmb_region *_rgn = &(lmb.reserved);
201 196
202 return lmb_add_region(_rgn, base, size); 197 return lmb_add_region(_rgn, base, size);
203} 198}
204 199
205long __init 200long __init lmb_overlaps_region(struct lmb_region *rgn, unsigned long base,
206lmb_overlaps_region(struct lmb_region *rgn, unsigned long base, unsigned long size) 201 unsigned long size)
207{ 202{
208 unsigned long i; 203 unsigned long i;
209 204
@@ -218,39 +213,44 @@ lmb_overlaps_region(struct lmb_region *rgn, unsigned long base, unsigned long si
218 return (i < rgn->cnt) ? i : -1; 213 return (i < rgn->cnt) ? i : -1;
219} 214}
220 215
221unsigned long __init 216unsigned long __init lmb_alloc(unsigned long size, unsigned long align)
222lmb_alloc(unsigned long size, unsigned long align)
223{ 217{
224 return lmb_alloc_base(size, align, LMB_ALLOC_ANYWHERE); 218 return lmb_alloc_base(size, align, LMB_ALLOC_ANYWHERE);
225} 219}
226 220
227unsigned long __init 221unsigned long __init lmb_alloc_base(unsigned long size, unsigned long align,
228lmb_alloc_base(unsigned long size, unsigned long align, unsigned long max_addr) 222 unsigned long max_addr)
229{ 223{
230 long i, j; 224 long i, j;
231 unsigned long base = 0; 225 unsigned long base = 0;
232 226
233 for (i=lmb.memory.cnt-1; i >= 0; i--) { 227#ifdef CONFIG_PPC32
228 /* On 32-bit, make sure we allocate lowmem */
229 if (max_addr == LMB_ALLOC_ANYWHERE)
230 max_addr = __max_low_memory;
231#endif
232 for (i = lmb.memory.cnt-1; i >= 0; i--) {
234 unsigned long lmbbase = lmb.memory.region[i].base; 233 unsigned long lmbbase = lmb.memory.region[i].base;
235 unsigned long lmbsize = lmb.memory.region[i].size; 234 unsigned long lmbsize = lmb.memory.region[i].size;
236 235
237 if ( max_addr == LMB_ALLOC_ANYWHERE ) 236 if (max_addr == LMB_ALLOC_ANYWHERE)
238 base = _ALIGN_DOWN(lmbbase+lmbsize-size, align); 237 base = _ALIGN_DOWN(lmbbase + lmbsize - size, align);
239 else if ( lmbbase < max_addr ) 238 else if (lmbbase < max_addr) {
240 base = _ALIGN_DOWN(min(lmbbase+lmbsize,max_addr)-size, align); 239 base = min(lmbbase + lmbsize, max_addr);
241 else 240 base = _ALIGN_DOWN(base - size, align);
241 } else
242 continue; 242 continue;
243 243
244 while ( (lmbbase <= base) && 244 while ((lmbbase <= base) &&
245 ((j = lmb_overlaps_region(&lmb.reserved,base,size)) >= 0) ) { 245 ((j = lmb_overlaps_region(&lmb.reserved, base, size)) >= 0) )
246 base = _ALIGN_DOWN(lmb.reserved.region[j].base-size, align); 246 base = _ALIGN_DOWN(lmb.reserved.region[j].base - size,
247 } 247 align);
248 248
249 if ( (base != 0) && (lmbbase <= base) ) 249 if ((base != 0) && (lmbbase <= base))
250 break; 250 break;
251 } 251 }
252 252
253 if ( i < 0 ) 253 if (i < 0)
254 return 0; 254 return 0;
255 255
256 lmb_add_region(&lmb.reserved, base, size); 256 lmb_add_region(&lmb.reserved, base, size);
@@ -259,14 +259,12 @@ lmb_alloc_base(unsigned long size, unsigned long align, unsigned long max_addr)
259} 259}
260 260
261/* You must call lmb_analyze() before this. */ 261/* You must call lmb_analyze() before this. */
262unsigned long __init 262unsigned long __init lmb_phys_mem_size(void)
263lmb_phys_mem_size(void)
264{ 263{
265 return lmb.memory.size; 264 return lmb.memory.size;
266} 265}
267 266
268unsigned long __init 267unsigned long __init lmb_end_of_DRAM(void)
269lmb_end_of_DRAM(void)
270{ 268{
271 int idx = lmb.memory.cnt - 1; 269 int idx = lmb.memory.cnt - 1;
272 270
@@ -277,9 +275,8 @@ lmb_end_of_DRAM(void)
277 * Truncate the lmb list to memory_limit if it's set 275 * Truncate the lmb list to memory_limit if it's set
278 * You must call lmb_analyze() after this. 276 * You must call lmb_analyze() after this.
279 */ 277 */
280void __init lmb_enforce_memory_limit(void) 278void __init lmb_enforce_memory_limit(unsigned long memory_limit)
281{ 279{
282 extern unsigned long memory_limit;
283 unsigned long i, limit; 280 unsigned long i, limit;
284 281
285 if (! memory_limit) 282 if (! memory_limit)
diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
new file mode 100644
index 000000000000..117b00012e14
--- /dev/null
+++ b/arch/powerpc/mm/mem.c
@@ -0,0 +1,564 @@
1/*
2 * PowerPC version
3 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
4 *
5 * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
6 * and Cort Dougan (PReP) (cort@cs.nmt.edu)
7 * Copyright (C) 1996 Paul Mackerras
8 * Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk).
9 * PPC44x/36-bit changes by Matt Porter (mporter@mvista.com)
10 *
11 * Derived from "arch/i386/mm/init.c"
12 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
13 *
14 * This program is free software; you can redistribute it and/or
15 * modify it under the terms of the GNU General Public License
16 * as published by the Free Software Foundation; either version
17 * 2 of the License, or (at your option) any later version.
18 *
19 */
20
21#include <linux/config.h>
22#include <linux/module.h>
23#include <linux/sched.h>
24#include <linux/kernel.h>
25#include <linux/errno.h>
26#include <linux/string.h>
27#include <linux/types.h>
28#include <linux/mm.h>
29#include <linux/stddef.h>
30#include <linux/init.h>
31#include <linux/bootmem.h>
32#include <linux/highmem.h>
33#include <linux/initrd.h>
34#include <linux/pagemap.h>
35
36#include <asm/pgalloc.h>
37#include <asm/prom.h>
38#include <asm/io.h>
39#include <asm/mmu_context.h>
40#include <asm/pgtable.h>
41#include <asm/mmu.h>
42#include <asm/smp.h>
43#include <asm/machdep.h>
44#include <asm/btext.h>
45#include <asm/tlb.h>
46#include <asm/prom.h>
47#include <asm/lmb.h>
48#include <asm/sections.h>
49#ifdef CONFIG_PPC64
50#include <asm/vdso.h>
51#endif
52
53#include "mmu_decl.h"
54
55#ifndef CPU_FTR_COHERENT_ICACHE
56#define CPU_FTR_COHERENT_ICACHE 0 /* XXX for now */
57#define CPU_FTR_NOEXECUTE 0
58#endif
59
60int init_bootmem_done;
61int mem_init_done;
62unsigned long memory_limit;
63
64/*
65 * This is called by /dev/mem to know if a given address has to
66 * be mapped non-cacheable or not
67 */
68int page_is_ram(unsigned long pfn)
69{
70 unsigned long paddr = (pfn << PAGE_SHIFT);
71
72#ifndef CONFIG_PPC64 /* XXX for now */
73 return paddr < __pa(high_memory);
74#else
75 int i;
76 for (i=0; i < lmb.memory.cnt; i++) {
77 unsigned long base;
78
79 base = lmb.memory.region[i].base;
80
81 if ((paddr >= base) &&
82 (paddr < (base + lmb.memory.region[i].size))) {
83 return 1;
84 }
85 }
86
87 return 0;
88#endif
89}
90EXPORT_SYMBOL(page_is_ram);
91
92pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
93 unsigned long size, pgprot_t vma_prot)
94{
95 if (ppc_md.phys_mem_access_prot)
96 return ppc_md.phys_mem_access_prot(file, pfn, size, vma_prot);
97
98 if (!page_is_ram(pfn))
99 vma_prot = __pgprot(pgprot_val(vma_prot)
100 | _PAGE_GUARDED | _PAGE_NO_CACHE);
101 return vma_prot;
102}
103EXPORT_SYMBOL(phys_mem_access_prot);
104
105#ifdef CONFIG_MEMORY_HOTPLUG
106
107void online_page(struct page *page)
108{
109 ClearPageReserved(page);
110 free_cold_page(page);
111 totalram_pages++;
112 num_physpages++;
113}
114
115/*
116 * This works only for the non-NUMA case. Later, we'll need a lookup
117 * to convert from real physical addresses to nid, that doesn't use
118 * pfn_to_nid().
119 */
120int __devinit add_memory(u64 start, u64 size)
121{
122 struct pglist_data *pgdata = NODE_DATA(0);
123 struct zone *zone;
124 unsigned long start_pfn = start >> PAGE_SHIFT;
125 unsigned long nr_pages = size >> PAGE_SHIFT;
126
127 /* this should work for most non-highmem platforms */
128 zone = pgdata->node_zones;
129
130 return __add_pages(zone, start_pfn, nr_pages);
131
132 return 0;
133}
134
135/*
136 * First pass at this code will check to determine if the remove
137 * request is within the RMO. Do not allow removal within the RMO.
138 */
139int __devinit remove_memory(u64 start, u64 size)
140{
141 struct zone *zone;
142 unsigned long start_pfn, end_pfn, nr_pages;
143
144 start_pfn = start >> PAGE_SHIFT;
145 nr_pages = size >> PAGE_SHIFT;
146 end_pfn = start_pfn + nr_pages;
147
148 printk("%s(): Attempting to remove memoy in range "
149 "%lx to %lx\n", __func__, start, start+size);
150 /*
151 * check for range within RMO
152 */
153 zone = page_zone(pfn_to_page(start_pfn));
154
155 printk("%s(): memory will be removed from "
156 "the %s zone\n", __func__, zone->name);
157
158 /*
159 * not handling removing memory ranges that
160 * overlap multiple zones yet
161 */
162 if (end_pfn > (zone->zone_start_pfn + zone->spanned_pages))
163 goto overlap;
164
165 /* make sure it is NOT in RMO */
166 if ((start < lmb.rmo_size) || ((start+size) < lmb.rmo_size)) {
167 printk("%s(): range to be removed must NOT be in RMO!\n",
168 __func__);
169 goto in_rmo;
170 }
171
172 return __remove_pages(zone, start_pfn, nr_pages);
173
174overlap:
175 printk("%s(): memory range to be removed overlaps "
176 "multiple zones!!!\n", __func__);
177in_rmo:
178 return -1;
179}
180#endif /* CONFIG_MEMORY_HOTPLUG */
181
182void show_mem(void)
183{
184 unsigned long total = 0, reserved = 0;
185 unsigned long shared = 0, cached = 0;
186 unsigned long highmem = 0;
187 struct page *page;
188 pg_data_t *pgdat;
189 unsigned long i;
190
191 printk("Mem-info:\n");
192 show_free_areas();
193 printk("Free swap: %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10));
194 for_each_pgdat(pgdat) {
195 unsigned long flags;
196 pgdat_resize_lock(pgdat, &flags);
197 for (i = 0; i < pgdat->node_spanned_pages; i++) {
198 page = pgdat_page_nr(pgdat, i);
199 total++;
200 if (PageHighMem(page))
201 highmem++;
202 if (PageReserved(page))
203 reserved++;
204 else if (PageSwapCache(page))
205 cached++;
206 else if (page_count(page))
207 shared += page_count(page) - 1;
208 }
209 pgdat_resize_unlock(pgdat, &flags);
210 }
211 printk("%ld pages of RAM\n", total);
212#ifdef CONFIG_HIGHMEM
213 printk("%ld pages of HIGHMEM\n", highmem);
214#endif
215 printk("%ld reserved pages\n", reserved);
216 printk("%ld pages shared\n", shared);
217 printk("%ld pages swap cached\n", cached);
218}
219
220/*
221 * Initialize the bootmem system and give it all the memory we
222 * have available. If we are using highmem, we only put the
223 * lowmem into the bootmem system.
224 */
225#ifndef CONFIG_NEED_MULTIPLE_NODES
226void __init do_init_bootmem(void)
227{
228 unsigned long i;
229 unsigned long start, bootmap_pages;
230 unsigned long total_pages;
231 int boot_mapsize;
232
233 max_pfn = total_pages = lmb_end_of_DRAM() >> PAGE_SHIFT;
234#ifdef CONFIG_HIGHMEM
235 total_pages = total_lowmem >> PAGE_SHIFT;
236#endif
237
238 /*
239 * Find an area to use for the bootmem bitmap. Calculate the size of
240 * bitmap required as (Total Memory) / PAGE_SIZE / BITS_PER_BYTE.
241 * Add 1 additional page in case the address isn't page-aligned.
242 */
243 bootmap_pages = bootmem_bootmap_pages(total_pages);
244
245 start = lmb_alloc(bootmap_pages << PAGE_SHIFT, PAGE_SIZE);
246 BUG_ON(!start);
247
248 boot_mapsize = init_bootmem(start >> PAGE_SHIFT, total_pages);
249
250 /* Add all physical memory to the bootmem map, mark each area
251 * present.
252 */
253 for (i = 0; i < lmb.memory.cnt; i++) {
254 unsigned long base = lmb.memory.region[i].base;
255 unsigned long size = lmb_size_bytes(&lmb.memory, i);
256#ifdef CONFIG_HIGHMEM
257 if (base >= total_lowmem)
258 continue;
259 if (base + size > total_lowmem)
260 size = total_lowmem - base;
261#endif
262 free_bootmem(base, size);
263 }
264
265 /* reserve the sections we're already using */
266 for (i = 0; i < lmb.reserved.cnt; i++)
267 reserve_bootmem(lmb.reserved.region[i].base,
268 lmb_size_bytes(&lmb.reserved, i));
269
270 /* XXX need to clip this if using highmem? */
271 for (i = 0; i < lmb.memory.cnt; i++)
272 memory_present(0, lmb_start_pfn(&lmb.memory, i),
273 lmb_end_pfn(&lmb.memory, i));
274 init_bootmem_done = 1;
275}
276
277/*
278 * paging_init() sets up the page tables - in fact we've already done this.
279 */
280void __init paging_init(void)
281{
282 unsigned long zones_size[MAX_NR_ZONES];
283 unsigned long zholes_size[MAX_NR_ZONES];
284 unsigned long total_ram = lmb_phys_mem_size();
285 unsigned long top_of_ram = lmb_end_of_DRAM();
286
287#ifdef CONFIG_HIGHMEM
288 map_page(PKMAP_BASE, 0, 0); /* XXX gross */
289 pkmap_page_table = pte_offset_kernel(pmd_offset(pgd_offset_k
290 (PKMAP_BASE), PKMAP_BASE), PKMAP_BASE);
291 map_page(KMAP_FIX_BEGIN, 0, 0); /* XXX gross */
292 kmap_pte = pte_offset_kernel(pmd_offset(pgd_offset_k
293 (KMAP_FIX_BEGIN), KMAP_FIX_BEGIN), KMAP_FIX_BEGIN);
294 kmap_prot = PAGE_KERNEL;
295#endif /* CONFIG_HIGHMEM */
296
297 printk(KERN_INFO "Top of RAM: 0x%lx, Total RAM: 0x%lx\n",
298 top_of_ram, total_ram);
299 printk(KERN_INFO "Memory hole size: %ldMB\n",
300 (top_of_ram - total_ram) >> 20);
301 /*
302 * All pages are DMA-able so we put them all in the DMA zone.
303 */
304 memset(zones_size, 0, sizeof(zones_size));
305 memset(zholes_size, 0, sizeof(zholes_size));
306
307 zones_size[ZONE_DMA] = top_of_ram >> PAGE_SHIFT;
308 zholes_size[ZONE_DMA] = (top_of_ram - total_ram) >> PAGE_SHIFT;
309
310#ifdef CONFIG_HIGHMEM
311 zones_size[ZONE_DMA] = total_lowmem >> PAGE_SHIFT;
312 zones_size[ZONE_HIGHMEM] = (total_memory - total_lowmem) >> PAGE_SHIFT;
313 zholes_size[ZONE_HIGHMEM] = (top_of_ram - total_ram) >> PAGE_SHIFT;
314#else
315 zones_size[ZONE_DMA] = top_of_ram >> PAGE_SHIFT;
316 zholes_size[ZONE_DMA] = (top_of_ram - total_ram) >> PAGE_SHIFT;
317#endif /* CONFIG_HIGHMEM */
318
319 free_area_init_node(0, NODE_DATA(0), zones_size,
320 __pa(PAGE_OFFSET) >> PAGE_SHIFT, zholes_size);
321}
322#endif /* ! CONFIG_NEED_MULTIPLE_NODES */
323
324void __init mem_init(void)
325{
326#ifdef CONFIG_NEED_MULTIPLE_NODES
327 int nid;
328#endif
329 pg_data_t *pgdat;
330 unsigned long i;
331 struct page *page;
332 unsigned long reservedpages = 0, codesize, initsize, datasize, bsssize;
333
334 num_physpages = max_pfn; /* RAM is assumed contiguous */
335 high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
336
337#ifdef CONFIG_NEED_MULTIPLE_NODES
338 for_each_online_node(nid) {
339 if (NODE_DATA(nid)->node_spanned_pages != 0) {
340 printk("freeing bootmem node %x\n", nid);
341 totalram_pages +=
342 free_all_bootmem_node(NODE_DATA(nid));
343 }
344 }
345#else
346 max_mapnr = num_physpages;
347 totalram_pages += free_all_bootmem();
348#endif
349 for_each_pgdat(pgdat) {
350 for (i = 0; i < pgdat->node_spanned_pages; i++) {
351 page = pgdat_page_nr(pgdat, i);
352 if (PageReserved(page))
353 reservedpages++;
354 }
355 }
356
357 codesize = (unsigned long)&_sdata - (unsigned long)&_stext;
358 datasize = (unsigned long)&__init_begin - (unsigned long)&_sdata;
359 initsize = (unsigned long)&__init_end - (unsigned long)&__init_begin;
360 bsssize = (unsigned long)&__bss_stop - (unsigned long)&__bss_start;
361
362#ifdef CONFIG_HIGHMEM
363 {
364 unsigned long pfn, highmem_mapnr;
365
366 highmem_mapnr = total_lowmem >> PAGE_SHIFT;
367 for (pfn = highmem_mapnr; pfn < max_mapnr; ++pfn) {
368 struct page *page = pfn_to_page(pfn);
369
370 ClearPageReserved(page);
371 set_page_count(page, 1);
372 __free_page(page);
373 totalhigh_pages++;
374 }
375 totalram_pages += totalhigh_pages;
376 printk(KERN_INFO "High memory: %luk\n",
377 totalhigh_pages << (PAGE_SHIFT-10));
378 }
379#endif /* CONFIG_HIGHMEM */
380
381 printk(KERN_INFO "Memory: %luk/%luk available (%luk kernel code, "
382 "%luk reserved, %luk data, %luk bss, %luk init)\n",
383 (unsigned long)nr_free_pages() << (PAGE_SHIFT-10),
384 num_physpages << (PAGE_SHIFT-10),
385 codesize >> 10,
386 reservedpages << (PAGE_SHIFT-10),
387 datasize >> 10,
388 bsssize >> 10,
389 initsize >> 10);
390
391 mem_init_done = 1;
392
393#ifdef CONFIG_PPC64
394 /* Initialize the vDSO */
395 vdso_init();
396#endif
397}
398
399/*
400 * This is called when a page has been modified by the kernel.
401 * It just marks the page as not i-cache clean. We do the i-cache
402 * flush later when the page is given to a user process, if necessary.
403 */
404void flush_dcache_page(struct page *page)
405{
406 if (cpu_has_feature(CPU_FTR_COHERENT_ICACHE))
407 return;
408 /* avoid an atomic op if possible */
409 if (test_bit(PG_arch_1, &page->flags))
410 clear_bit(PG_arch_1, &page->flags);
411}
412EXPORT_SYMBOL(flush_dcache_page);
413
414void flush_dcache_icache_page(struct page *page)
415{
416#ifdef CONFIG_BOOKE
417 void *start = kmap_atomic(page, KM_PPC_SYNC_ICACHE);
418 __flush_dcache_icache(start);
419 kunmap_atomic(start, KM_PPC_SYNC_ICACHE);
420#elif defined(CONFIG_8xx) || defined(CONFIG_PPC64)
421 /* On 8xx there is no need to kmap since highmem is not supported */
422 __flush_dcache_icache(page_address(page));
423#else
424 __flush_dcache_icache_phys(page_to_pfn(page) << PAGE_SHIFT);
425#endif
426
427}
428void clear_user_page(void *page, unsigned long vaddr, struct page *pg)
429{
430 clear_page(page);
431
432 if (cpu_has_feature(CPU_FTR_COHERENT_ICACHE))
433 return;
434 /*
435 * We shouldnt have to do this, but some versions of glibc
436 * require it (ld.so assumes zero filled pages are icache clean)
437 * - Anton
438 */
439
440 /* avoid an atomic op if possible */
441 if (test_bit(PG_arch_1, &pg->flags))
442 clear_bit(PG_arch_1, &pg->flags);
443}
444EXPORT_SYMBOL(clear_user_page);
445
446void copy_user_page(void *vto, void *vfrom, unsigned long vaddr,
447 struct page *pg)
448{
449 copy_page(vto, vfrom);
450
451 /*
452 * We should be able to use the following optimisation, however
453 * there are two problems.
454 * Firstly a bug in some versions of binutils meant PLT sections
455 * were not marked executable.
456 * Secondly the first word in the GOT section is blrl, used
457 * to establish the GOT address. Until recently the GOT was
458 * not marked executable.
459 * - Anton
460 */
461#if 0
462 if (!vma->vm_file && ((vma->vm_flags & VM_EXEC) == 0))
463 return;
464#endif
465
466 if (cpu_has_feature(CPU_FTR_COHERENT_ICACHE))
467 return;
468
469 /* avoid an atomic op if possible */
470 if (test_bit(PG_arch_1, &pg->flags))
471 clear_bit(PG_arch_1, &pg->flags);
472}
473
474void flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
475 unsigned long addr, int len)
476{
477 unsigned long maddr;
478
479 maddr = (unsigned long) kmap(page) + (addr & ~PAGE_MASK);
480 flush_icache_range(maddr, maddr + len);
481 kunmap(page);
482}
483EXPORT_SYMBOL(flush_icache_user_range);
484
485/*
486 * This is called at the end of handling a user page fault, when the
487 * fault has been handled by updating a PTE in the linux page tables.
488 * We use it to preload an HPTE into the hash table corresponding to
489 * the updated linux PTE.
490 *
491 * This must always be called with the mm->page_table_lock held
492 */
493void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
494 pte_t pte)
495{
496 /* handle i-cache coherency */
497 unsigned long pfn = pte_pfn(pte);
498#ifdef CONFIG_PPC32
499 pmd_t *pmd;
500#else
501 unsigned long vsid;
502 void *pgdir;
503 pte_t *ptep;
504 int local = 0;
505 cpumask_t tmp;
506 unsigned long flags;
507#endif
508
509 /* handle i-cache coherency */
510 if (!cpu_has_feature(CPU_FTR_COHERENT_ICACHE) &&
511 !cpu_has_feature(CPU_FTR_NOEXECUTE) &&
512 pfn_valid(pfn)) {
513 struct page *page = pfn_to_page(pfn);
514 if (!PageReserved(page)
515 && !test_bit(PG_arch_1, &page->flags)) {
516 if (vma->vm_mm == current->active_mm) {
517#ifdef CONFIG_8xx
518 /* On 8xx, cache control instructions (particularly
519 * "dcbst" from flush_dcache_icache) fault as write
520 * operation if there is an unpopulated TLB entry
521 * for the address in question. To workaround that,
522 * we invalidate the TLB here, thus avoiding dcbst
523 * misbehaviour.
524 */
525 _tlbie(address);
526#endif
527 __flush_dcache_icache((void *) address);
528 } else
529 flush_dcache_icache_page(page);
530 set_bit(PG_arch_1, &page->flags);
531 }
532 }
533
534#ifdef CONFIG_PPC_STD_MMU
535 /* We only want HPTEs for linux PTEs that have _PAGE_ACCESSED set */
536 if (!pte_young(pte) || address >= TASK_SIZE)
537 return;
538#ifdef CONFIG_PPC32
539 if (Hash == 0)
540 return;
541 pmd = pmd_offset(pgd_offset(vma->vm_mm, address), address);
542 if (!pmd_none(*pmd))
543 add_hash_page(vma->vm_mm->context, address, pmd_val(*pmd));
544#else
545 pgdir = vma->vm_mm->pgd;
546 if (pgdir == NULL)
547 return;
548
549 ptep = find_linux_pte(pgdir, address);
550 if (!ptep)
551 return;
552
553 vsid = get_vsid(vma->vm_mm->context.id, address);
554
555 local_irq_save(flags);
556 tmp = cpumask_of_cpu(smp_processor_id());
557 if (cpus_equal(vma->vm_mm->cpu_vm_mask, tmp))
558 local = 1;
559
560 __hash_page(address, 0, vsid, ptep, 0x300, local);
561 local_irq_restore(flags);
562#endif
563#endif
564}
diff --git a/arch/ppc64/mm/mmap.c b/arch/powerpc/mm/mmap.c
index fe65f522aff3..fe65f522aff3 100644
--- a/arch/ppc64/mm/mmap.c
+++ b/arch/powerpc/mm/mmap.c
diff --git a/arch/powerpc/mm/mmu_context_32.c b/arch/powerpc/mm/mmu_context_32.c
new file mode 100644
index 000000000000..a8816e0f6a86
--- /dev/null
+++ b/arch/powerpc/mm/mmu_context_32.c
@@ -0,0 +1,86 @@
1/*
2 * This file contains the routines for handling the MMU on those
3 * PowerPC implementations where the MMU substantially follows the
4 * architecture specification. This includes the 6xx, 7xx, 7xxx,
5 * 8260, and POWER3 implementations but excludes the 8xx and 4xx.
6 * -- paulus
7 *
8 * Derived from arch/ppc/mm/init.c:
9 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
10 *
11 * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
12 * and Cort Dougan (PReP) (cort@cs.nmt.edu)
13 * Copyright (C) 1996 Paul Mackerras
14 * Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk).
15 *
16 * Derived from "arch/i386/mm/init.c"
17 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
18 *
19 * This program is free software; you can redistribute it and/or
20 * modify it under the terms of the GNU General Public License
21 * as published by the Free Software Foundation; either version
22 * 2 of the License, or (at your option) any later version.
23 *
24 */
25
26#include <linux/config.h>
27#include <linux/mm.h>
28#include <linux/init.h>
29
30#include <asm/mmu_context.h>
31#include <asm/tlbflush.h>
32
33mm_context_t next_mmu_context;
34unsigned long context_map[LAST_CONTEXT / BITS_PER_LONG + 1];
35#ifdef FEW_CONTEXTS
36atomic_t nr_free_contexts;
37struct mm_struct *context_mm[LAST_CONTEXT+1];
38void steal_context(void);
39#endif /* FEW_CONTEXTS */
40
41/*
42 * Initialize the context management stuff.
43 */
44void __init
45mmu_context_init(void)
46{
47 /*
48 * Some processors have too few contexts to reserve one for
49 * init_mm, and require using context 0 for a normal task.
50 * Other processors reserve the use of context zero for the kernel.
51 * This code assumes FIRST_CONTEXT < 32.
52 */
53 context_map[0] = (1 << FIRST_CONTEXT) - 1;
54 next_mmu_context = FIRST_CONTEXT;
55#ifdef FEW_CONTEXTS
56 atomic_set(&nr_free_contexts, LAST_CONTEXT - FIRST_CONTEXT + 1);
57#endif /* FEW_CONTEXTS */
58}
59
60#ifdef FEW_CONTEXTS
61/*
62 * Steal a context from a task that has one at the moment.
63 * This is only used on 8xx and 4xx and we presently assume that
64 * they don't do SMP. If they do then this will have to check
65 * whether the MM we steal is in use.
66 * We also assume that this is only used on systems that don't
67 * use an MMU hash table - this is true for 8xx and 4xx.
68 * This isn't an LRU system, it just frees up each context in
69 * turn (sort-of pseudo-random replacement :). This would be the
70 * place to implement an LRU scheme if anyone was motivated to do it.
71 * -- paulus
72 */
73void
74steal_context(void)
75{
76 struct mm_struct *mm;
77
78 /* free up context `next_mmu_context' */
79 /* if we shouldn't free context 0, don't... */
80 if (next_mmu_context < FIRST_CONTEXT)
81 next_mmu_context = FIRST_CONTEXT;
82 mm = context_mm[next_mmu_context];
83 flush_tlb_mm(mm);
84 destroy_context(mm);
85}
86#endif /* FEW_CONTEXTS */
diff --git a/arch/powerpc/mm/mmu_context_64.c b/arch/powerpc/mm/mmu_context_64.c
new file mode 100644
index 000000000000..714a84dd8d5d
--- /dev/null
+++ b/arch/powerpc/mm/mmu_context_64.c
@@ -0,0 +1,63 @@
1/*
2 * MMU context allocation for 64-bit kernels.
3 *
4 * Copyright (C) 2004 Anton Blanchard, IBM Corp. <anton@samba.org>
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 *
11 */
12
13#include <linux/config.h>
14#include <linux/sched.h>
15#include <linux/kernel.h>
16#include <linux/errno.h>
17#include <linux/string.h>
18#include <linux/types.h>
19#include <linux/mm.h>
20#include <linux/spinlock.h>
21#include <linux/idr.h>
22
23#include <asm/mmu_context.h>
24
25static DEFINE_SPINLOCK(mmu_context_lock);
26static DEFINE_IDR(mmu_context_idr);
27
28int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
29{
30 int index;
31 int err;
32
33again:
34 if (!idr_pre_get(&mmu_context_idr, GFP_KERNEL))
35 return -ENOMEM;
36
37 spin_lock(&mmu_context_lock);
38 err = idr_get_new_above(&mmu_context_idr, NULL, 1, &index);
39 spin_unlock(&mmu_context_lock);
40
41 if (err == -EAGAIN)
42 goto again;
43 else if (err)
44 return err;
45
46 if (index > MAX_CONTEXT) {
47 idr_remove(&mmu_context_idr, index);
48 return -ENOMEM;
49 }
50
51 mm->context.id = index;
52
53 return 0;
54}
55
56void destroy_context(struct mm_struct *mm)
57{
58 spin_lock(&mmu_context_lock);
59 idr_remove(&mmu_context_idr, mm->context.id);
60 spin_unlock(&mmu_context_lock);
61
62 mm->context.id = NO_CONTEXT;
63}
diff --git a/arch/powerpc/mm/mmu_decl.h b/arch/powerpc/mm/mmu_decl.h
new file mode 100644
index 000000000000..a4d7a327c0e5
--- /dev/null
+++ b/arch/powerpc/mm/mmu_decl.h
@@ -0,0 +1,87 @@
1/*
2 * Declarations of procedures and variables shared between files
3 * in arch/ppc/mm/.
4 *
5 * Derived from arch/ppc/mm/init.c:
6 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
7 *
8 * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
9 * and Cort Dougan (PReP) (cort@cs.nmt.edu)
10 * Copyright (C) 1996 Paul Mackerras
11 * Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk).
12 *
13 * Derived from "arch/i386/mm/init.c"
14 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
15 *
16 * This program is free software; you can redistribute it and/or
17 * modify it under the terms of the GNU General Public License
18 * as published by the Free Software Foundation; either version
19 * 2 of the License, or (at your option) any later version.
20 *
21 */
22#include <asm/tlbflush.h>
23#include <asm/mmu.h>
24
25#ifdef CONFIG_PPC32
26extern void mapin_ram(void);
27extern int map_page(unsigned long va, phys_addr_t pa, int flags);
28extern void setbat(int index, unsigned long virt, unsigned long phys,
29 unsigned int size, int flags);
30extern void settlbcam(int index, unsigned long virt, phys_addr_t phys,
31 unsigned int size, int flags, unsigned int pid);
32extern void invalidate_tlbcam_entry(int index);
33
34extern int __map_without_bats;
35extern unsigned long ioremap_base;
36extern unsigned long ioremap_bot;
37extern unsigned int rtas_data, rtas_size;
38
39extern PTE *Hash, *Hash_end;
40extern unsigned long Hash_size, Hash_mask;
41
42extern unsigned int num_tlbcam_entries;
43#endif
44
45extern unsigned long __max_low_memory;
46extern unsigned long __initial_memory_limit;
47extern unsigned long total_memory;
48extern unsigned long total_lowmem;
49
50/* ...and now those things that may be slightly different between processor
51 * architectures. -- Dan
52 */
53#if defined(CONFIG_8xx)
54#define flush_HPTE(X, va, pg) _tlbie(va)
55#define MMU_init_hw() do { } while(0)
56#define mmu_mapin_ram() (0UL)
57
58#elif defined(CONFIG_4xx)
59#define flush_HPTE(X, va, pg) _tlbie(va)
60extern void MMU_init_hw(void);
61extern unsigned long mmu_mapin_ram(void);
62
63#elif defined(CONFIG_FSL_BOOKE)
64#define flush_HPTE(X, va, pg) _tlbie(va)
65extern void MMU_init_hw(void);
66extern unsigned long mmu_mapin_ram(void);
67extern void adjust_total_lowmem(void);
68
69#elif defined(CONFIG_PPC32)
70/* anything 32-bit except 4xx or 8xx */
71extern void MMU_init_hw(void);
72extern unsigned long mmu_mapin_ram(void);
73
74/* Be careful....this needs to be updated if we ever encounter 603 SMPs,
75 * which includes all new 82xx processors. We need tlbie/tlbsync here
76 * in that case (I think). -- Dan.
77 */
78static inline void flush_HPTE(unsigned context, unsigned long va,
79 unsigned long pdval)
80{
81 if ((Hash != 0) &&
82 cpu_has_feature(CPU_FTR_HPTE_TABLE))
83 flush_hash_pages(0, va, pdval, 1);
84 else
85 _tlbie(va);
86}
87#endif
diff --git a/arch/ppc64/mm/numa.c b/arch/powerpc/mm/numa.c
index cb864b8f2750..4035cad8d7f1 100644
--- a/arch/ppc64/mm/numa.c
+++ b/arch/powerpc/mm/numa.c
@@ -20,6 +20,7 @@
20#include <asm/lmb.h> 20#include <asm/lmb.h>
21#include <asm/machdep.h> 21#include <asm/machdep.h>
22#include <asm/abs_addr.h> 22#include <asm/abs_addr.h>
23#include <asm/system.h>
23 24
24static int numa_enabled = 1; 25static int numa_enabled = 1;
25 26
@@ -300,7 +301,6 @@ static unsigned long __init numa_enforce_memory_limit(unsigned long start, unsig
300 * we've already adjusted it for the limit and it takes care of 301 * we've already adjusted it for the limit and it takes care of
301 * having memory holes below the limit. 302 * having memory holes below the limit.
302 */ 303 */
303 extern unsigned long memory_limit;
304 304
305 if (! memory_limit) 305 if (! memory_limit)
306 return size; 306 return size;
diff --git a/arch/powerpc/mm/pgtable_32.c b/arch/powerpc/mm/pgtable_32.c
new file mode 100644
index 000000000000..f4e5ac122615
--- /dev/null
+++ b/arch/powerpc/mm/pgtable_32.c
@@ -0,0 +1,467 @@
1/*
2 * This file contains the routines setting up the linux page tables.
3 * -- paulus
4 *
5 * Derived from arch/ppc/mm/init.c:
6 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
7 *
8 * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
9 * and Cort Dougan (PReP) (cort@cs.nmt.edu)
10 * Copyright (C) 1996 Paul Mackerras
11 * Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk).
12 *
13 * Derived from "arch/i386/mm/init.c"
14 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
15 *
16 * This program is free software; you can redistribute it and/or
17 * modify it under the terms of the GNU General Public License
18 * as published by the Free Software Foundation; either version
19 * 2 of the License, or (at your option) any later version.
20 *
21 */
22
23#include <linux/config.h>
24#include <linux/kernel.h>
25#include <linux/module.h>
26#include <linux/types.h>
27#include <linux/mm.h>
28#include <linux/vmalloc.h>
29#include <linux/init.h>
30#include <linux/highmem.h>
31
32#include <asm/pgtable.h>
33#include <asm/pgalloc.h>
34#include <asm/io.h>
35
36#include "mmu_decl.h"
37
38unsigned long ioremap_base;
39unsigned long ioremap_bot;
40int io_bat_index;
41
42#if defined(CONFIG_6xx) || defined(CONFIG_POWER3)
43#define HAVE_BATS 1
44#endif
45
46#if defined(CONFIG_FSL_BOOKE)
47#define HAVE_TLBCAM 1
48#endif
49
50extern char etext[], _stext[];
51
52#ifdef CONFIG_SMP
53extern void hash_page_sync(void);
54#endif
55
56#ifdef HAVE_BATS
57extern unsigned long v_mapped_by_bats(unsigned long va);
58extern unsigned long p_mapped_by_bats(unsigned long pa);
59void setbat(int index, unsigned long virt, unsigned long phys,
60 unsigned int size, int flags);
61
62#else /* !HAVE_BATS */
63#define v_mapped_by_bats(x) (0UL)
64#define p_mapped_by_bats(x) (0UL)
65#endif /* HAVE_BATS */
66
67#ifdef HAVE_TLBCAM
68extern unsigned int tlbcam_index;
69extern unsigned long v_mapped_by_tlbcam(unsigned long va);
70extern unsigned long p_mapped_by_tlbcam(unsigned long pa);
71#else /* !HAVE_TLBCAM */
72#define v_mapped_by_tlbcam(x) (0UL)
73#define p_mapped_by_tlbcam(x) (0UL)
74#endif /* HAVE_TLBCAM */
75
76#ifdef CONFIG_PTE_64BIT
77/* 44x uses an 8kB pgdir because it has 8-byte Linux PTEs. */
78#define PGDIR_ORDER 1
79#else
80#define PGDIR_ORDER 0
81#endif
82
83pgd_t *pgd_alloc(struct mm_struct *mm)
84{
85 pgd_t *ret;
86
87 ret = (pgd_t *)__get_free_pages(GFP_KERNEL|__GFP_ZERO, PGDIR_ORDER);
88 return ret;
89}
90
91void pgd_free(pgd_t *pgd)
92{
93 free_pages((unsigned long)pgd, PGDIR_ORDER);
94}
95
96pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
97{
98 pte_t *pte;
99 extern int mem_init_done;
100 extern void *early_get_page(void);
101
102 if (mem_init_done) {
103 pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO);
104 } else {
105 pte = (pte_t *)early_get_page();
106 if (pte)
107 clear_page(pte);
108 }
109 return pte;
110}
111
112struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address)
113{
114 struct page *ptepage;
115
116#ifdef CONFIG_HIGHPTE
117 gfp_t flags = GFP_KERNEL | __GFP_HIGHMEM | __GFP_REPEAT;
118#else
119 gfp_t flags = GFP_KERNEL | __GFP_REPEAT;
120#endif
121
122 ptepage = alloc_pages(flags, 0);
123 if (ptepage)
124 clear_highpage(ptepage);
125 return ptepage;
126}
127
128void pte_free_kernel(pte_t *pte)
129{
130#ifdef CONFIG_SMP
131 hash_page_sync();
132#endif
133 free_page((unsigned long)pte);
134}
135
136void pte_free(struct page *ptepage)
137{
138#ifdef CONFIG_SMP
139 hash_page_sync();
140#endif
141 __free_page(ptepage);
142}
143
144#ifndef CONFIG_PHYS_64BIT
145void __iomem *
146ioremap(phys_addr_t addr, unsigned long size)
147{
148 return __ioremap(addr, size, _PAGE_NO_CACHE);
149}
150#else /* CONFIG_PHYS_64BIT */
151void __iomem *
152ioremap64(unsigned long long addr, unsigned long size)
153{
154 return __ioremap(addr, size, _PAGE_NO_CACHE);
155}
156
157void __iomem *
158ioremap(phys_addr_t addr, unsigned long size)
159{
160 phys_addr_t addr64 = fixup_bigphys_addr(addr, size);
161
162 return ioremap64(addr64, size);
163}
164#endif /* CONFIG_PHYS_64BIT */
165
166void __iomem *
167__ioremap(phys_addr_t addr, unsigned long size, unsigned long flags)
168{
169 unsigned long v, i;
170 phys_addr_t p;
171 int err;
172
173 /*
174 * Choose an address to map it to.
175 * Once the vmalloc system is running, we use it.
176 * Before then, we use space going down from ioremap_base
177 * (ioremap_bot records where we're up to).
178 */
179 p = addr & PAGE_MASK;
180 size = PAGE_ALIGN(addr + size) - p;
181
182 /*
183 * If the address lies within the first 16 MB, assume it's in ISA
184 * memory space
185 */
186 if (p < 16*1024*1024)
187 p += _ISA_MEM_BASE;
188
189 /*
190 * Don't allow anybody to remap normal RAM that we're using.
191 * mem_init() sets high_memory so only do the check after that.
192 */
193 if (mem_init_done && (p < virt_to_phys(high_memory))) {
194 printk("__ioremap(): phys addr "PHYS_FMT" is RAM lr %p\n", p,
195 __builtin_return_address(0));
196 return NULL;
197 }
198
199 if (size == 0)
200 return NULL;
201
202 /*
203 * Is it already mapped? Perhaps overlapped by a previous
204 * BAT mapping. If the whole area is mapped then we're done,
205 * otherwise remap it since we want to keep the virt addrs for
206 * each request contiguous.
207 *
208 * We make the assumption here that if the bottom and top
209 * of the range we want are mapped then it's mapped to the
210 * same virt address (and this is contiguous).
211 * -- Cort
212 */
213 if ((v = p_mapped_by_bats(p)) /*&& p_mapped_by_bats(p+size-1)*/ )
214 goto out;
215
216 if ((v = p_mapped_by_tlbcam(p)))
217 goto out;
218
219 if (mem_init_done) {
220 struct vm_struct *area;
221 area = get_vm_area(size, VM_IOREMAP);
222 if (area == 0)
223 return NULL;
224 v = (unsigned long) area->addr;
225 } else {
226 v = (ioremap_bot -= size);
227 }
228
229 if ((flags & _PAGE_PRESENT) == 0)
230 flags |= _PAGE_KERNEL;
231 if (flags & _PAGE_NO_CACHE)
232 flags |= _PAGE_GUARDED;
233
234 /*
235 * Should check if it is a candidate for a BAT mapping
236 */
237
238 err = 0;
239 for (i = 0; i < size && err == 0; i += PAGE_SIZE)
240 err = map_page(v+i, p+i, flags);
241 if (err) {
242 if (mem_init_done)
243 vunmap((void *)v);
244 return NULL;
245 }
246
247out:
248 return (void __iomem *) (v + ((unsigned long)addr & ~PAGE_MASK));
249}
250
251void iounmap(volatile void __iomem *addr)
252{
253 /*
254 * If mapped by BATs then there is nothing to do.
255 * Calling vfree() generates a benign warning.
256 */
257 if (v_mapped_by_bats((unsigned long)addr)) return;
258
259 if (addr > high_memory && (unsigned long) addr < ioremap_bot)
260 vunmap((void *) (PAGE_MASK & (unsigned long)addr));
261}
262
263void __iomem *ioport_map(unsigned long port, unsigned int len)
264{
265 return (void __iomem *) (port + _IO_BASE);
266}
267
268void ioport_unmap(void __iomem *addr)
269{
270 /* Nothing to do */
271}
272EXPORT_SYMBOL(ioport_map);
273EXPORT_SYMBOL(ioport_unmap);
274
275int
276map_page(unsigned long va, phys_addr_t pa, int flags)
277{
278 pmd_t *pd;
279 pte_t *pg;
280 int err = -ENOMEM;
281
282 /* Use upper 10 bits of VA to index the first level map */
283 pd = pmd_offset(pgd_offset_k(va), va);
284 /* Use middle 10 bits of VA to index the second-level map */
285 pg = pte_alloc_kernel(pd, va);
286 if (pg != 0) {
287 err = 0;
288 set_pte_at(&init_mm, va, pg, pfn_pte(pa >> PAGE_SHIFT, __pgprot(flags)));
289 if (mem_init_done)
290 flush_HPTE(0, va, pmd_val(*pd));
291 }
292 return err;
293}
294
295/*
296 * Map in all of physical memory starting at KERNELBASE.
297 */
298void __init mapin_ram(void)
299{
300 unsigned long v, p, s, f;
301
302 s = mmu_mapin_ram();
303 v = KERNELBASE + s;
304 p = PPC_MEMSTART + s;
305 for (; s < total_lowmem; s += PAGE_SIZE) {
306 if ((char *) v >= _stext && (char *) v < etext)
307 f = _PAGE_RAM_TEXT;
308 else
309 f = _PAGE_RAM;
310 map_page(v, p, f);
311 v += PAGE_SIZE;
312 p += PAGE_SIZE;
313 }
314}
315
316/* is x a power of 2? */
317#define is_power_of_2(x) ((x) != 0 && (((x) & ((x) - 1)) == 0))
318
319/* is x a power of 4? */
320#define is_power_of_4(x) ((x) != 0 && (((x) & (x-1)) == 0) && (ffs(x) & 1))
321
322/*
323 * Set up a mapping for a block of I/O.
324 * virt, phys, size must all be page-aligned.
325 * This should only be called before ioremap is called.
326 */
327void __init io_block_mapping(unsigned long virt, phys_addr_t phys,
328 unsigned int size, int flags)
329{
330 int i;
331
332 if (virt > KERNELBASE && virt < ioremap_bot)
333 ioremap_bot = ioremap_base = virt;
334
335#ifdef HAVE_BATS
336 /*
337 * Use a BAT for this if possible...
338 */
339 if (io_bat_index < 2 && is_power_of_2(size)
340 && (virt & (size - 1)) == 0 && (phys & (size - 1)) == 0) {
341 setbat(io_bat_index, virt, phys, size, flags);
342 ++io_bat_index;
343 return;
344 }
345#endif /* HAVE_BATS */
346
347#ifdef HAVE_TLBCAM
348 /*
349 * Use a CAM for this if possible...
350 */
351 if (tlbcam_index < num_tlbcam_entries && is_power_of_4(size)
352 && (virt & (size - 1)) == 0 && (phys & (size - 1)) == 0) {
353 settlbcam(tlbcam_index, virt, phys, size, flags, 0);
354 ++tlbcam_index;
355 return;
356 }
357#endif /* HAVE_TLBCAM */
358
359 /* No BATs available, put it in the page tables. */
360 for (i = 0; i < size; i += PAGE_SIZE)
361 map_page(virt + i, phys + i, flags);
362}
363
364/* Scan the real Linux page tables and return a PTE pointer for
365 * a virtual address in a context.
366 * Returns true (1) if PTE was found, zero otherwise. The pointer to
367 * the PTE pointer is unmodified if PTE is not found.
368 */
369int
370get_pteptr(struct mm_struct *mm, unsigned long addr, pte_t **ptep)
371{
372 pgd_t *pgd;
373 pmd_t *pmd;
374 pte_t *pte;
375 int retval = 0;
376
377 pgd = pgd_offset(mm, addr & PAGE_MASK);
378 if (pgd) {
379 pmd = pmd_offset(pgd, addr & PAGE_MASK);
380 if (pmd_present(*pmd)) {
381 pte = pte_offset_map(pmd, addr & PAGE_MASK);
382 if (pte) {
383 retval = 1;
384 *ptep = pte;
385 /* XXX caller needs to do pte_unmap, yuck */
386 }
387 }
388 }
389 return(retval);
390}
391
392/* Find physical address for this virtual address. Normally used by
393 * I/O functions, but anyone can call it.
394 */
395unsigned long iopa(unsigned long addr)
396{
397 unsigned long pa;
398
399 /* I don't know why this won't work on PMacs or CHRP. It
400 * appears there is some bug, or there is some implicit
401 * mapping done not properly represented by BATs or in page
402 * tables.......I am actively working on resolving this, but
403 * can't hold up other stuff. -- Dan
404 */
405 pte_t *pte;
406 struct mm_struct *mm;
407
408 /* Check the BATs */
409 pa = v_mapped_by_bats(addr);
410 if (pa)
411 return pa;
412
413 /* Allow mapping of user addresses (within the thread)
414 * for DMA if necessary.
415 */
416 if (addr < TASK_SIZE)
417 mm = current->mm;
418 else
419 mm = &init_mm;
420
421 pa = 0;
422 if (get_pteptr(mm, addr, &pte)) {
423 pa = (pte_val(*pte) & PAGE_MASK) | (addr & ~PAGE_MASK);
424 pte_unmap(pte);
425 }
426
427 return(pa);
428}
429
430/* This is will find the virtual address for a physical one....
431 * Swiped from APUS, could be dangerous :-).
432 * This is only a placeholder until I really find a way to make this
433 * work. -- Dan
434 */
435unsigned long
436mm_ptov (unsigned long paddr)
437{
438 unsigned long ret;
439#if 0
440 if (paddr < 16*1024*1024)
441 ret = ZTWO_VADDR(paddr);
442 else {
443 int i;
444
445 for (i = 0; i < kmap_chunk_count;){
446 unsigned long phys = kmap_chunks[i++];
447 unsigned long size = kmap_chunks[i++];
448 unsigned long virt = kmap_chunks[i++];
449 if (paddr >= phys
450 && paddr < (phys + size)){
451 ret = virt + paddr - phys;
452 goto exit;
453 }
454 }
455
456 ret = (unsigned long) __va(paddr);
457 }
458exit:
459#ifdef DEBUGPV
460 printk ("PTOV(%lx)=%lx\n", paddr, ret);
461#endif
462#else
463 ret = (unsigned long)paddr + KERNELBASE;
464#endif
465 return ret;
466}
467
diff --git a/arch/powerpc/mm/pgtable_64.c b/arch/powerpc/mm/pgtable_64.c
new file mode 100644
index 000000000000..b79a78206135
--- /dev/null
+++ b/arch/powerpc/mm/pgtable_64.c
@@ -0,0 +1,347 @@
1/*
2 * This file contains ioremap and related functions for 64-bit machines.
3 *
4 * Derived from arch/ppc64/mm/init.c
5 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
6 *
7 * Modifications by Paul Mackerras (PowerMac) (paulus@samba.org)
8 * and Cort Dougan (PReP) (cort@cs.nmt.edu)
9 * Copyright (C) 1996 Paul Mackerras
10 * Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk).
11 *
12 * Derived from "arch/i386/mm/init.c"
13 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
14 *
15 * Dave Engebretsen <engebret@us.ibm.com>
16 * Rework for PPC64 port.
17 *
18 * This program is free software; you can redistribute it and/or
19 * modify it under the terms of the GNU General Public License
20 * as published by the Free Software Foundation; either version
21 * 2 of the License, or (at your option) any later version.
22 *
23 */
24
25#include <linux/config.h>
26#include <linux/signal.h>
27#include <linux/sched.h>
28#include <linux/kernel.h>
29#include <linux/errno.h>
30#include <linux/string.h>
31#include <linux/types.h>
32#include <linux/mman.h>
33#include <linux/mm.h>
34#include <linux/swap.h>
35#include <linux/stddef.h>
36#include <linux/vmalloc.h>
37#include <linux/init.h>
38#include <linux/delay.h>
39#include <linux/bootmem.h>
40#include <linux/highmem.h>
41#include <linux/idr.h>
42#include <linux/nodemask.h>
43#include <linux/module.h>
44
45#include <asm/pgalloc.h>
46#include <asm/page.h>
47#include <asm/prom.h>
48#include <asm/lmb.h>
49#include <asm/rtas.h>
50#include <asm/io.h>
51#include <asm/mmu_context.h>
52#include <asm/pgtable.h>
53#include <asm/mmu.h>
54#include <asm/uaccess.h>
55#include <asm/smp.h>
56#include <asm/machdep.h>
57#include <asm/tlb.h>
58#include <asm/eeh.h>
59#include <asm/processor.h>
60#include <asm/mmzone.h>
61#include <asm/cputable.h>
62#include <asm/ppcdebug.h>
63#include <asm/sections.h>
64#include <asm/system.h>
65#include <asm/iommu.h>
66#include <asm/abs_addr.h>
67#include <asm/vdso.h>
68#include <asm/imalloc.h>
69
70unsigned long ioremap_bot = IMALLOC_BASE;
71static unsigned long phbs_io_bot = PHBS_IO_BASE;
72
73#ifdef CONFIG_PPC_ISERIES
74
75void __iomem *ioremap(unsigned long addr, unsigned long size)
76{
77 return (void __iomem *)addr;
78}
79
80extern void __iomem *__ioremap(unsigned long addr, unsigned long size,
81 unsigned long flags)
82{
83 return (void __iomem *)addr;
84}
85
86void iounmap(volatile void __iomem *addr)
87{
88 return;
89}
90
91#else
92
93/*
94 * map_io_page currently only called by __ioremap
95 * map_io_page adds an entry to the ioremap page table
96 * and adds an entry to the HPT, possibly bolting it
97 */
98static int map_io_page(unsigned long ea, unsigned long pa, int flags)
99{
100 pgd_t *pgdp;
101 pud_t *pudp;
102 pmd_t *pmdp;
103 pte_t *ptep;
104 unsigned long vsid;
105
106 if (mem_init_done) {
107 pgdp = pgd_offset_k(ea);
108 pudp = pud_alloc(&init_mm, pgdp, ea);
109 if (!pudp)
110 return -ENOMEM;
111 pmdp = pmd_alloc(&init_mm, pudp, ea);
112 if (!pmdp)
113 return -ENOMEM;
114 ptep = pte_alloc_kernel(pmdp, ea);
115 if (!ptep)
116 return -ENOMEM;
117 set_pte_at(&init_mm, ea, ptep, pfn_pte(pa >> PAGE_SHIFT,
118 __pgprot(flags)));
119 } else {
120 unsigned long va, vpn, hash, hpteg;
121
122 /*
123 * If the mm subsystem is not fully up, we cannot create a
124 * linux page table entry for this mapping. Simply bolt an
125 * entry in the hardware page table.
126 */
127 vsid = get_kernel_vsid(ea);
128 va = (vsid << 28) | (ea & 0xFFFFFFF);
129 vpn = va >> PAGE_SHIFT;
130
131 hash = hpt_hash(vpn, 0);
132
133 hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP);
134
135 /* Panic if a pte grpup is full */
136 if (ppc_md.hpte_insert(hpteg, va, pa >> PAGE_SHIFT,
137 HPTE_V_BOLTED,
138 _PAGE_NO_CACHE|_PAGE_GUARDED|PP_RWXX)
139 == -1) {
140 panic("map_io_page: could not insert mapping");
141 }
142 }
143 return 0;
144}
145
146
147static void __iomem * __ioremap_com(unsigned long addr, unsigned long pa,
148 unsigned long ea, unsigned long size,
149 unsigned long flags)
150{
151 unsigned long i;
152
153 if ((flags & _PAGE_PRESENT) == 0)
154 flags |= pgprot_val(PAGE_KERNEL);
155
156 for (i = 0; i < size; i += PAGE_SIZE)
157 if (map_io_page(ea+i, pa+i, flags))
158 return NULL;
159
160 return (void __iomem *) (ea + (addr & ~PAGE_MASK));
161}
162
163
164void __iomem *
165ioremap(unsigned long addr, unsigned long size)
166{
167 return __ioremap(addr, size, _PAGE_NO_CACHE | _PAGE_GUARDED);
168}
169
170void __iomem * __ioremap(unsigned long addr, unsigned long size,
171 unsigned long flags)
172{
173 unsigned long pa, ea;
174 void __iomem *ret;
175
176 /*
177 * Choose an address to map it to.
178 * Once the imalloc system is running, we use it.
179 * Before that, we map using addresses going
180 * up from ioremap_bot. imalloc will use
181 * the addresses from ioremap_bot through
182 * IMALLOC_END
183 *
184 */
185 pa = addr & PAGE_MASK;
186 size = PAGE_ALIGN(addr + size) - pa;
187
188 if (size == 0)
189 return NULL;
190
191 if (mem_init_done) {
192 struct vm_struct *area;
193 area = im_get_free_area(size);
194 if (area == NULL)
195 return NULL;
196 ea = (unsigned long)(area->addr);
197 ret = __ioremap_com(addr, pa, ea, size, flags);
198 if (!ret)
199 im_free(area->addr);
200 } else {
201 ea = ioremap_bot;
202 ret = __ioremap_com(addr, pa, ea, size, flags);
203 if (ret)
204 ioremap_bot += size;
205 }
206 return ret;
207}
208
209#define IS_PAGE_ALIGNED(_val) ((_val) == ((_val) & PAGE_MASK))
210
211int __ioremap_explicit(unsigned long pa, unsigned long ea,
212 unsigned long size, unsigned long flags)
213{
214 struct vm_struct *area;
215 void __iomem *ret;
216
217 /* For now, require page-aligned values for pa, ea, and size */
218 if (!IS_PAGE_ALIGNED(pa) || !IS_PAGE_ALIGNED(ea) ||
219 !IS_PAGE_ALIGNED(size)) {
220 printk(KERN_ERR "unaligned value in %s\n", __FUNCTION__);
221 return 1;
222 }
223
224 if (!mem_init_done) {
225 /* Two things to consider in this case:
226 * 1) No records will be kept (imalloc, etc) that the region
227 * has been remapped
228 * 2) It won't be easy to iounmap() the region later (because
229 * of 1)
230 */
231 ;
232 } else {
233 area = im_get_area(ea, size,
234 IM_REGION_UNUSED|IM_REGION_SUBSET|IM_REGION_EXISTS);
235 if (area == NULL) {
236 /* Expected when PHB-dlpar is in play */
237 return 1;
238 }
239 if (ea != (unsigned long) area->addr) {
240 printk(KERN_ERR "unexpected addr return from "
241 "im_get_area\n");
242 return 1;
243 }
244 }
245
246 ret = __ioremap_com(pa, pa, ea, size, flags);
247 if (ret == NULL) {
248 printk(KERN_ERR "ioremap_explicit() allocation failure !\n");
249 return 1;
250 }
251 if (ret != (void *) ea) {
252 printk(KERN_ERR "__ioremap_com() returned unexpected addr\n");
253 return 1;
254 }
255
256 return 0;
257}
258
259/*
260 * Unmap an IO region and remove it from imalloc'd list.
261 * Access to IO memory should be serialized by driver.
262 * This code is modeled after vmalloc code - unmap_vm_area()
263 *
264 * XXX what about calls before mem_init_done (ie python_countermeasures())
265 */
266void iounmap(volatile void __iomem *token)
267{
268 void *addr;
269
270 if (!mem_init_done)
271 return;
272
273 addr = (void *) ((unsigned long __force) token & PAGE_MASK);
274
275 im_free(addr);
276}
277
278static int iounmap_subset_regions(unsigned long addr, unsigned long size)
279{
280 struct vm_struct *area;
281
282 /* Check whether subsets of this region exist */
283 area = im_get_area(addr, size, IM_REGION_SUPERSET);
284 if (area == NULL)
285 return 1;
286
287 while (area) {
288 iounmap((void __iomem *) area->addr);
289 area = im_get_area(addr, size,
290 IM_REGION_SUPERSET);
291 }
292
293 return 0;
294}
295
296int iounmap_explicit(volatile void __iomem *start, unsigned long size)
297{
298 struct vm_struct *area;
299 unsigned long addr;
300 int rc;
301
302 addr = (unsigned long __force) start & PAGE_MASK;
303
304 /* Verify that the region either exists or is a subset of an existing
305 * region. In the latter case, split the parent region to create
306 * the exact region
307 */
308 area = im_get_area(addr, size,
309 IM_REGION_EXISTS | IM_REGION_SUBSET);
310 if (area == NULL) {
311 /* Determine whether subset regions exist. If so, unmap */
312 rc = iounmap_subset_regions(addr, size);
313 if (rc) {
314 printk(KERN_ERR
315 "%s() cannot unmap nonexistent range 0x%lx\n",
316 __FUNCTION__, addr);
317 return 1;
318 }
319 } else {
320 iounmap((void __iomem *) area->addr);
321 }
322 /*
323 * FIXME! This can't be right:
324 iounmap(area->addr);
325 * Maybe it should be "iounmap(area);"
326 */
327 return 0;
328}
329
330#endif
331
332EXPORT_SYMBOL(ioremap);
333EXPORT_SYMBOL(__ioremap);
334EXPORT_SYMBOL(iounmap);
335
336void __iomem * reserve_phb_iospace(unsigned long size)
337{
338 void __iomem *virt_addr;
339
340 if (phbs_io_bot >= IMALLOC_BASE)
341 panic("reserve_phb_iospace(): phb io space overflow\n");
342
343 virt_addr = (void __iomem *) phbs_io_bot;
344 phbs_io_bot += size;
345
346 return virt_addr;
347}
diff --git a/arch/powerpc/mm/ppc_mmu_32.c b/arch/powerpc/mm/ppc_mmu_32.c
new file mode 100644
index 000000000000..cef9e83cc7e9
--- /dev/null
+++ b/arch/powerpc/mm/ppc_mmu_32.c
@@ -0,0 +1,285 @@
1/*
2 * This file contains the routines for handling the MMU on those
3 * PowerPC implementations where the MMU substantially follows the
4 * architecture specification. This includes the 6xx, 7xx, 7xxx,
5 * 8260, and POWER3 implementations but excludes the 8xx and 4xx.
6 * -- paulus
7 *
8 * Derived from arch/ppc/mm/init.c:
9 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
10 *
11 * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
12 * and Cort Dougan (PReP) (cort@cs.nmt.edu)
13 * Copyright (C) 1996 Paul Mackerras
14 * Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk).
15 *
16 * Derived from "arch/i386/mm/init.c"
17 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
18 *
19 * This program is free software; you can redistribute it and/or
20 * modify it under the terms of the GNU General Public License
21 * as published by the Free Software Foundation; either version
22 * 2 of the License, or (at your option) any later version.
23 *
24 */
25
26#include <linux/config.h>
27#include <linux/kernel.h>
28#include <linux/mm.h>
29#include <linux/init.h>
30#include <linux/highmem.h>
31
32#include <asm/prom.h>
33#include <asm/mmu.h>
34#include <asm/machdep.h>
35#include <asm/lmb.h>
36
37#include "mmu_decl.h"
38
39PTE *Hash, *Hash_end;
40unsigned long Hash_size, Hash_mask;
41unsigned long _SDR1;
42
43union ubat { /* BAT register values to be loaded */
44 BAT bat;
45#ifdef CONFIG_PPC64BRIDGE
46 u64 word[2];
47#else
48 u32 word[2];
49#endif
50} BATS[4][2]; /* 4 pairs of IBAT, DBAT */
51
52struct batrange { /* stores address ranges mapped by BATs */
53 unsigned long start;
54 unsigned long limit;
55 unsigned long phys;
56} bat_addrs[4];
57
58/*
59 * Return PA for this VA if it is mapped by a BAT, or 0
60 */
61unsigned long v_mapped_by_bats(unsigned long va)
62{
63 int b;
64 for (b = 0; b < 4; ++b)
65 if (va >= bat_addrs[b].start && va < bat_addrs[b].limit)
66 return bat_addrs[b].phys + (va - bat_addrs[b].start);
67 return 0;
68}
69
70/*
71 * Return VA for a given PA or 0 if not mapped
72 */
73unsigned long p_mapped_by_bats(unsigned long pa)
74{
75 int b;
76 for (b = 0; b < 4; ++b)
77 if (pa >= bat_addrs[b].phys
78 && pa < (bat_addrs[b].limit-bat_addrs[b].start)
79 +bat_addrs[b].phys)
80 return bat_addrs[b].start+(pa-bat_addrs[b].phys);
81 return 0;
82}
83
84unsigned long __init mmu_mapin_ram(void)
85{
86#ifdef CONFIG_POWER4
87 return 0;
88#else
89 unsigned long tot, bl, done;
90 unsigned long max_size = (256<<20);
91 unsigned long align;
92
93 if (__map_without_bats)
94 return 0;
95
96 /* Set up BAT2 and if necessary BAT3 to cover RAM. */
97
98 /* Make sure we don't map a block larger than the
99 smallest alignment of the physical address. */
100 /* alignment of PPC_MEMSTART */
101 align = ~(PPC_MEMSTART-1) & PPC_MEMSTART;
102 /* set BAT block size to MIN(max_size, align) */
103 if (align && align < max_size)
104 max_size = align;
105
106 tot = total_lowmem;
107 for (bl = 128<<10; bl < max_size; bl <<= 1) {
108 if (bl * 2 > tot)
109 break;
110 }
111
112 setbat(2, KERNELBASE, PPC_MEMSTART, bl, _PAGE_RAM);
113 done = (unsigned long)bat_addrs[2].limit - KERNELBASE + 1;
114 if ((done < tot) && !bat_addrs[3].limit) {
115 /* use BAT3 to cover a bit more */
116 tot -= done;
117 for (bl = 128<<10; bl < max_size; bl <<= 1)
118 if (bl * 2 > tot)
119 break;
120 setbat(3, KERNELBASE+done, PPC_MEMSTART+done, bl, _PAGE_RAM);
121 done = (unsigned long)bat_addrs[3].limit - KERNELBASE + 1;
122 }
123
124 return done;
125#endif
126}
127
128/*
129 * Set up one of the I/D BAT (block address translation) register pairs.
130 * The parameters are not checked; in particular size must be a power
131 * of 2 between 128k and 256M.
132 */
133void __init setbat(int index, unsigned long virt, unsigned long phys,
134 unsigned int size, int flags)
135{
136 unsigned int bl;
137 int wimgxpp;
138 union ubat *bat = BATS[index];
139
140 if (((flags & _PAGE_NO_CACHE) == 0) &&
141 cpu_has_feature(CPU_FTR_NEED_COHERENT))
142 flags |= _PAGE_COHERENT;
143
144 bl = (size >> 17) - 1;
145 if (PVR_VER(mfspr(SPRN_PVR)) != 1) {
146 /* 603, 604, etc. */
147 /* Do DBAT first */
148 wimgxpp = flags & (_PAGE_WRITETHRU | _PAGE_NO_CACHE
149 | _PAGE_COHERENT | _PAGE_GUARDED);
150 wimgxpp |= (flags & _PAGE_RW)? BPP_RW: BPP_RX;
151 bat[1].word[0] = virt | (bl << 2) | 2; /* Vs=1, Vp=0 */
152 bat[1].word[1] = phys | wimgxpp;
153#ifndef CONFIG_KGDB /* want user access for breakpoints */
154 if (flags & _PAGE_USER)
155#endif
156 bat[1].bat.batu.vp = 1;
157 if (flags & _PAGE_GUARDED) {
158 /* G bit must be zero in IBATs */
159 bat[0].word[0] = bat[0].word[1] = 0;
160 } else {
161 /* make IBAT same as DBAT */
162 bat[0] = bat[1];
163 }
164 } else {
165 /* 601 cpu */
166 if (bl > BL_8M)
167 bl = BL_8M;
168 wimgxpp = flags & (_PAGE_WRITETHRU | _PAGE_NO_CACHE
169 | _PAGE_COHERENT);
170 wimgxpp |= (flags & _PAGE_RW)?
171 ((flags & _PAGE_USER)? PP_RWRW: PP_RWXX): PP_RXRX;
172 bat->word[0] = virt | wimgxpp | 4; /* Ks=0, Ku=1 */
173 bat->word[1] = phys | bl | 0x40; /* V=1 */
174 }
175
176 bat_addrs[index].start = virt;
177 bat_addrs[index].limit = virt + ((bl + 1) << 17) - 1;
178 bat_addrs[index].phys = phys;
179}
180
181/*
182 * Initialize the hash table and patch the instructions in hashtable.S.
183 */
184void __init MMU_init_hw(void)
185{
186 unsigned int hmask, mb, mb2;
187 unsigned int n_hpteg, lg_n_hpteg;
188
189 extern unsigned int hash_page_patch_A[];
190 extern unsigned int hash_page_patch_B[], hash_page_patch_C[];
191 extern unsigned int hash_page[];
192 extern unsigned int flush_hash_patch_A[], flush_hash_patch_B[];
193
194 if (!cpu_has_feature(CPU_FTR_HPTE_TABLE)) {
195 /*
196 * Put a blr (procedure return) instruction at the
197 * start of hash_page, since we can still get DSI
198 * exceptions on a 603.
199 */
200 hash_page[0] = 0x4e800020;
201 flush_icache_range((unsigned long) &hash_page[0],
202 (unsigned long) &hash_page[1]);
203 return;
204 }
205
206 if ( ppc_md.progress ) ppc_md.progress("hash:enter", 0x105);
207
208#ifdef CONFIG_PPC64BRIDGE
209#define LG_HPTEG_SIZE 7 /* 128 bytes per HPTEG */
210#define SDR1_LOW_BITS (lg_n_hpteg - 11)
211#define MIN_N_HPTEG 2048 /* min 256kB hash table */
212#else
213#define LG_HPTEG_SIZE 6 /* 64 bytes per HPTEG */
214#define SDR1_LOW_BITS ((n_hpteg - 1) >> 10)
215#define MIN_N_HPTEG 1024 /* min 64kB hash table */
216#endif
217
218 /*
219 * Allow 1 HPTE (1/8 HPTEG) for each page of memory.
220 * This is less than the recommended amount, but then
221 * Linux ain't AIX.
222 */
223 n_hpteg = total_memory / (PAGE_SIZE * 8);
224 if (n_hpteg < MIN_N_HPTEG)
225 n_hpteg = MIN_N_HPTEG;
226 lg_n_hpteg = __ilog2(n_hpteg);
227 if (n_hpteg & (n_hpteg - 1)) {
228 ++lg_n_hpteg; /* round up if not power of 2 */
229 n_hpteg = 1 << lg_n_hpteg;
230 }
231 Hash_size = n_hpteg << LG_HPTEG_SIZE;
232
233 /*
234 * Find some memory for the hash table.
235 */
236 if ( ppc_md.progress ) ppc_md.progress("hash:find piece", 0x322);
237 Hash = __va(lmb_alloc_base(Hash_size, Hash_size,
238 __initial_memory_limit));
239 cacheable_memzero(Hash, Hash_size);
240 _SDR1 = __pa(Hash) | SDR1_LOW_BITS;
241
242 Hash_end = (PTE *) ((unsigned long)Hash + Hash_size);
243
244 printk("Total memory = %ldMB; using %ldkB for hash table (at %p)\n",
245 total_memory >> 20, Hash_size >> 10, Hash);
246
247
248 /*
249 * Patch up the instructions in hashtable.S:create_hpte
250 */
251 if ( ppc_md.progress ) ppc_md.progress("hash:patch", 0x345);
252 Hash_mask = n_hpteg - 1;
253 hmask = Hash_mask >> (16 - LG_HPTEG_SIZE);
254 mb2 = mb = 32 - LG_HPTEG_SIZE - lg_n_hpteg;
255 if (lg_n_hpteg > 16)
256 mb2 = 16 - LG_HPTEG_SIZE;
257
258 hash_page_patch_A[0] = (hash_page_patch_A[0] & ~0xffff)
259 | ((unsigned int)(Hash) >> 16);
260 hash_page_patch_A[1] = (hash_page_patch_A[1] & ~0x7c0) | (mb << 6);
261 hash_page_patch_A[2] = (hash_page_patch_A[2] & ~0x7c0) | (mb2 << 6);
262 hash_page_patch_B[0] = (hash_page_patch_B[0] & ~0xffff) | hmask;
263 hash_page_patch_C[0] = (hash_page_patch_C[0] & ~0xffff) | hmask;
264
265 /*
266 * Ensure that the locations we've patched have been written
267 * out from the data cache and invalidated in the instruction
268 * cache, on those machines with split caches.
269 */
270 flush_icache_range((unsigned long) &hash_page_patch_A[0],
271 (unsigned long) &hash_page_patch_C[1]);
272
273 /*
274 * Patch up the instructions in hashtable.S:flush_hash_page
275 */
276 flush_hash_patch_A[0] = (flush_hash_patch_A[0] & ~0xffff)
277 | ((unsigned int)(Hash) >> 16);
278 flush_hash_patch_A[1] = (flush_hash_patch_A[1] & ~0x7c0) | (mb << 6);
279 flush_hash_patch_A[2] = (flush_hash_patch_A[2] & ~0x7c0) | (mb2 << 6);
280 flush_hash_patch_B[0] = (flush_hash_patch_B[0] & ~0xffff) | hmask;
281 flush_icache_range((unsigned long) &flush_hash_patch_A[0],
282 (unsigned long) &flush_hash_patch_B[1]);
283
284 if ( ppc_md.progress ) ppc_md.progress("hash:done", 0x205);
285}
diff --git a/arch/ppc64/mm/slb.c b/arch/powerpc/mm/slb.c
index 0473953f6a37..0473953f6a37 100644
--- a/arch/ppc64/mm/slb.c
+++ b/arch/powerpc/mm/slb.c
diff --git a/arch/ppc64/mm/slb_low.S b/arch/powerpc/mm/slb_low.S
index a3a03da503bc..a3a03da503bc 100644
--- a/arch/ppc64/mm/slb_low.S
+++ b/arch/powerpc/mm/slb_low.S
diff --git a/arch/ppc64/mm/stab.c b/arch/powerpc/mm/stab.c
index 1b83f002bf27..1b83f002bf27 100644
--- a/arch/ppc64/mm/stab.c
+++ b/arch/powerpc/mm/stab.c
diff --git a/arch/powerpc/mm/tlb_32.c b/arch/powerpc/mm/tlb_32.c
new file mode 100644
index 000000000000..6c3dc3c44c86
--- /dev/null
+++ b/arch/powerpc/mm/tlb_32.c
@@ -0,0 +1,183 @@
1/*
2 * This file contains the routines for TLB flushing.
3 * On machines where the MMU uses a hash table to store virtual to
4 * physical translations, these routines flush entries from the
5 * hash table also.
6 * -- paulus
7 *
8 * Derived from arch/ppc/mm/init.c:
9 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
10 *
11 * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
12 * and Cort Dougan (PReP) (cort@cs.nmt.edu)
13 * Copyright (C) 1996 Paul Mackerras
14 * Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk).
15 *
16 * Derived from "arch/i386/mm/init.c"
17 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
18 *
19 * This program is free software; you can redistribute it and/or
20 * modify it under the terms of the GNU General Public License
21 * as published by the Free Software Foundation; either version
22 * 2 of the License, or (at your option) any later version.
23 *
24 */
25
26#include <linux/config.h>
27#include <linux/kernel.h>
28#include <linux/mm.h>
29#include <linux/init.h>
30#include <linux/highmem.h>
31#include <asm/tlbflush.h>
32#include <asm/tlb.h>
33
34#include "mmu_decl.h"
35
36/*
37 * Called when unmapping pages to flush entries from the TLB/hash table.
38 */
39void flush_hash_entry(struct mm_struct *mm, pte_t *ptep, unsigned long addr)
40{
41 unsigned long ptephys;
42
43 if (Hash != 0) {
44 ptephys = __pa(ptep) & PAGE_MASK;
45 flush_hash_pages(mm->context, addr, ptephys, 1);
46 }
47}
48
49/*
50 * Called by ptep_set_access_flags, must flush on CPUs for which the
51 * DSI handler can't just "fixup" the TLB on a write fault
52 */
53void flush_tlb_page_nohash(struct vm_area_struct *vma, unsigned long addr)
54{
55 if (Hash != 0)
56 return;
57 _tlbie(addr);
58}
59
60/*
61 * Called at the end of a mmu_gather operation to make sure the
62 * TLB flush is completely done.
63 */
64void tlb_flush(struct mmu_gather *tlb)
65{
66 if (Hash == 0) {
67 /*
68 * 603 needs to flush the whole TLB here since
69 * it doesn't use a hash table.
70 */
71 _tlbia();
72 }
73}
74
75/*
76 * TLB flushing:
77 *
78 * - flush_tlb_mm(mm) flushes the specified mm context TLB's
79 * - flush_tlb_page(vma, vmaddr) flushes one page
80 * - flush_tlb_range(vma, start, end) flushes a range of pages
81 * - flush_tlb_kernel_range(start, end) flushes kernel pages
82 *
83 * since the hardware hash table functions as an extension of the
84 * tlb as far as the linux tables are concerned, flush it too.
85 * -- Cort
86 */
87
88/*
89 * 750 SMP is a Bad Idea because the 750 doesn't broadcast all
90 * the cache operations on the bus. Hence we need to use an IPI
91 * to get the other CPU(s) to invalidate their TLBs.
92 */
93#ifdef CONFIG_SMP_750
94#define FINISH_FLUSH smp_send_tlb_invalidate(0)
95#else
96#define FINISH_FLUSH do { } while (0)
97#endif
98
99static void flush_range(struct mm_struct *mm, unsigned long start,
100 unsigned long end)
101{
102 pmd_t *pmd;
103 unsigned long pmd_end;
104 int count;
105 unsigned int ctx = mm->context;
106
107 if (Hash == 0) {
108 _tlbia();
109 return;
110 }
111 start &= PAGE_MASK;
112 if (start >= end)
113 return;
114 end = (end - 1) | ~PAGE_MASK;
115 pmd = pmd_offset(pgd_offset(mm, start), start);
116 for (;;) {
117 pmd_end = ((start + PGDIR_SIZE) & PGDIR_MASK) - 1;
118 if (pmd_end > end)
119 pmd_end = end;
120 if (!pmd_none(*pmd)) {
121 count = ((pmd_end - start) >> PAGE_SHIFT) + 1;
122 flush_hash_pages(ctx, start, pmd_val(*pmd), count);
123 }
124 if (pmd_end == end)
125 break;
126 start = pmd_end + 1;
127 ++pmd;
128 }
129}
130
131/*
132 * Flush kernel TLB entries in the given range
133 */
134void flush_tlb_kernel_range(unsigned long start, unsigned long end)
135{
136 flush_range(&init_mm, start, end);
137 FINISH_FLUSH;
138}
139
140/*
141 * Flush all the (user) entries for the address space described by mm.
142 */
143void flush_tlb_mm(struct mm_struct *mm)
144{
145 struct vm_area_struct *mp;
146
147 if (Hash == 0) {
148 _tlbia();
149 return;
150 }
151
152 for (mp = mm->mmap; mp != NULL; mp = mp->vm_next)
153 flush_range(mp->vm_mm, mp->vm_start, mp->vm_end);
154 FINISH_FLUSH;
155}
156
157void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
158{
159 struct mm_struct *mm;
160 pmd_t *pmd;
161
162 if (Hash == 0) {
163 _tlbie(vmaddr);
164 return;
165 }
166 mm = (vmaddr < TASK_SIZE)? vma->vm_mm: &init_mm;
167 pmd = pmd_offset(pgd_offset(mm, vmaddr), vmaddr);
168 if (!pmd_none(*pmd))
169 flush_hash_pages(mm->context, vmaddr, pmd_val(*pmd), 1);
170 FINISH_FLUSH;
171}
172
173/*
174 * For each address in the range, find the pte for the address
175 * and check _PAGE_HASHPTE bit; if it is set, find and destroy
176 * the corresponding HPTE.
177 */
178void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
179 unsigned long end)
180{
181 flush_range(vma->vm_mm, start, end);
182 FINISH_FLUSH;
183}
diff --git a/arch/ppc64/mm/tlb.c b/arch/powerpc/mm/tlb_64.c
index 21fbffb23a43..09ab81a10f4f 100644
--- a/arch/ppc64/mm/tlb.c
+++ b/arch/powerpc/mm/tlb_64.c
@@ -128,12 +128,10 @@ void pgtable_free_tlb(struct mmu_gather *tlb, pgtable_free_t pgf)
128void hpte_update(struct mm_struct *mm, unsigned long addr, 128void hpte_update(struct mm_struct *mm, unsigned long addr,
129 unsigned long pte, int wrprot) 129 unsigned long pte, int wrprot)
130{ 130{
131 int i;
132 unsigned long context = 0;
133 struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch); 131 struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch);
132 unsigned long vsid;
133 int i;
134 134
135 if (REGION_ID(addr) == USER_REGION_ID)
136 context = mm->context.id;
137 i = batch->index; 135 i = batch->index;
138 136
139 /* 137 /*
@@ -143,19 +141,21 @@ void hpte_update(struct mm_struct *mm, unsigned long addr,
143 * up scanning and resetting referenced bits then our batch context 141 * up scanning and resetting referenced bits then our batch context
144 * will change mid stream. 142 * will change mid stream.
145 */ 143 */
146 if (i != 0 && (context != batch->context || 144 if (i != 0 && (mm != batch->mm || batch->large != pte_huge(pte))) {
147 batch->large != pte_huge(pte))) {
148 flush_tlb_pending(); 145 flush_tlb_pending();
149 i = 0; 146 i = 0;
150 } 147 }
151
152 if (i == 0) { 148 if (i == 0) {
153 batch->context = context;
154 batch->mm = mm; 149 batch->mm = mm;
155 batch->large = pte_huge(pte); 150 batch->large = pte_huge(pte);
156 } 151 }
152 if (addr < KERNELBASE) {
153 vsid = get_vsid(mm->context.id, addr);
154 WARN_ON(vsid == 0);
155 } else
156 vsid = get_kernel_vsid(addr);
157 batch->vaddr[i] = (vsid << 28 ) | (addr & 0x0fffffff);
157 batch->pte[i] = __pte(pte); 158 batch->pte[i] = __pte(pte);
158 batch->addr[i] = addr;
159 batch->index = ++i; 159 batch->index = ++i;
160 if (i >= PPC64_TLB_BATCH_NR) 160 if (i >= PPC64_TLB_BATCH_NR)
161 flush_tlb_pending(); 161 flush_tlb_pending();
@@ -177,10 +177,9 @@ void __flush_tlb_pending(struct ppc64_tlb_batch *batch)
177 local = 1; 177 local = 1;
178 178
179 if (i == 1) 179 if (i == 1)
180 flush_hash_page(batch->context, batch->addr[0], batch->pte[0], 180 flush_hash_page(batch->vaddr[0], batch->pte[0], local);
181 local);
182 else 181 else
183 flush_hash_range(batch->context, i, local); 182 flush_hash_range(i, local);
184 batch->index = 0; 183 batch->index = 0;
185 put_cpu(); 184 put_cpu();
186} 185}
diff --git a/arch/ppc/oprofile/Kconfig b/arch/powerpc/oprofile/Kconfig
index 19d37730b664..19d37730b664 100644
--- a/arch/ppc/oprofile/Kconfig
+++ b/arch/powerpc/oprofile/Kconfig
diff --git a/arch/ppc/oprofile/Makefile b/arch/powerpc/oprofile/Makefile
index e2218d32a4eb..0782d0cca89c 100644
--- a/arch/ppc/oprofile/Makefile
+++ b/arch/powerpc/oprofile/Makefile
@@ -7,8 +7,5 @@ DRIVER_OBJS := $(addprefix ../../../drivers/oprofile/, \
7 timer_int.o ) 7 timer_int.o )
8 8
9oprofile-y := $(DRIVER_OBJS) common.o 9oprofile-y := $(DRIVER_OBJS) common.o
10 10oprofile-$(CONFIG_PPC64) += op_model_rs64.o op_model_power4.o
11ifeq ($(CONFIG_FSL_BOOKE),y) 11oprofile-$(CONFIG_FSL_BOOKE) += op_model_fsl_booke.o
12 oprofile-y += op_model_fsl_booke.o
13endif
14
diff --git a/arch/ppc64/oprofile/common.c b/arch/powerpc/oprofile/common.c
index e5f572710aa0..af2c05d20ba5 100644
--- a/arch/ppc64/oprofile/common.c
+++ b/arch/powerpc/oprofile/common.c
@@ -1,5 +1,9 @@
1/* 1/*
2 * PPC 64 oprofile support:
2 * Copyright (C) 2004 Anton Blanchard <anton@au.ibm.com>, IBM 3 * Copyright (C) 2004 Anton Blanchard <anton@au.ibm.com>, IBM
4 * PPC 32 oprofile support: (based on PPC 64 support)
5 * Copyright (C) Freescale Semiconductor, Inc 2004
6 * Author: Andy Fleming
3 * 7 *
4 * Based on alpha version. 8 * Based on alpha version.
5 * 9 *
@@ -10,6 +14,9 @@
10 */ 14 */
11 15
12#include <linux/oprofile.h> 16#include <linux/oprofile.h>
17#ifndef __powerpc64__
18#include <linux/slab.h>
19#endif /* ! __powerpc64__ */
13#include <linux/init.h> 20#include <linux/init.h>
14#include <linux/smp.h> 21#include <linux/smp.h>
15#include <linux/errno.h> 22#include <linux/errno.h>
@@ -19,17 +26,21 @@
19#include <asm/cputable.h> 26#include <asm/cputable.h>
20#include <asm/oprofile_impl.h> 27#include <asm/oprofile_impl.h>
21 28
22static struct op_ppc64_model *model; 29static struct op_powerpc_model *model;
23 30
24static struct op_counter_config ctr[OP_MAX_COUNTER]; 31static struct op_counter_config ctr[OP_MAX_COUNTER];
25static struct op_system_config sys; 32static struct op_system_config sys;
26 33
34#ifndef __powerpc64__
35static char *cpu_type;
36#endif /* ! __powerpc64__ */
37
27static void op_handle_interrupt(struct pt_regs *regs) 38static void op_handle_interrupt(struct pt_regs *regs)
28{ 39{
29 model->handle_interrupt(regs, ctr); 40 model->handle_interrupt(regs, ctr);
30} 41}
31 42
32static int op_ppc64_setup(void) 43static int op_powerpc_setup(void)
33{ 44{
34 int err; 45 int err;
35 46
@@ -42,41 +53,49 @@ static int op_ppc64_setup(void)
42 model->reg_setup(ctr, &sys, model->num_counters); 53 model->reg_setup(ctr, &sys, model->num_counters);
43 54
44 /* Configure the registers on all cpus. */ 55 /* Configure the registers on all cpus. */
56#ifdef __powerpc64__
45 on_each_cpu(model->cpu_setup, NULL, 0, 1); 57 on_each_cpu(model->cpu_setup, NULL, 0, 1);
58#else /* __powerpc64__ */
59#if 0
60 /* FIXME: Make multi-cpu work */
61 on_each_cpu(model->reg_setup, NULL, 0, 1);
62#endif
63#endif /* __powerpc64__ */
46 64
47 return 0; 65 return 0;
48} 66}
49 67
50static void op_ppc64_shutdown(void) 68static void op_powerpc_shutdown(void)
51{ 69{
52 release_pmc_hardware(); 70 release_pmc_hardware();
53} 71}
54 72
55static void op_ppc64_cpu_start(void *dummy) 73static void op_powerpc_cpu_start(void *dummy)
56{ 74{
57 model->start(ctr); 75 model->start(ctr);
58} 76}
59 77
60static int op_ppc64_start(void) 78static int op_powerpc_start(void)
61{ 79{
62 on_each_cpu(op_ppc64_cpu_start, NULL, 0, 1); 80 on_each_cpu(op_powerpc_cpu_start, NULL, 0, 1);
63 return 0; 81 return 0;
64} 82}
65 83
66static inline void op_ppc64_cpu_stop(void *dummy) 84static inline void op_powerpc_cpu_stop(void *dummy)
67{ 85{
68 model->stop(); 86 model->stop();
69} 87}
70 88
71static void op_ppc64_stop(void) 89static void op_powerpc_stop(void)
72{ 90{
73 on_each_cpu(op_ppc64_cpu_stop, NULL, 0, 1); 91 on_each_cpu(op_powerpc_cpu_stop, NULL, 0, 1);
74} 92}
75 93
76static int op_ppc64_create_files(struct super_block *sb, struct dentry *root) 94static int op_powerpc_create_files(struct super_block *sb, struct dentry *root)
77{ 95{
78 int i; 96 int i;
79 97
98#ifdef __powerpc64__
80 /* 99 /*
81 * There is one mmcr0, mmcr1 and mmcra for setting the events for 100 * There is one mmcr0, mmcr1 and mmcra for setting the events for
82 * all of the counters. 101 * all of the counters.
@@ -84,6 +103,7 @@ static int op_ppc64_create_files(struct super_block *sb, struct dentry *root)
84 oprofilefs_create_ulong(sb, root, "mmcr0", &sys.mmcr0); 103 oprofilefs_create_ulong(sb, root, "mmcr0", &sys.mmcr0);
85 oprofilefs_create_ulong(sb, root, "mmcr1", &sys.mmcr1); 104 oprofilefs_create_ulong(sb, root, "mmcr1", &sys.mmcr1);
86 oprofilefs_create_ulong(sb, root, "mmcra", &sys.mmcra); 105 oprofilefs_create_ulong(sb, root, "mmcra", &sys.mmcra);
106#endif /* __powerpc64__ */
87 107
88 for (i = 0; i < model->num_counters; ++i) { 108 for (i = 0; i < model->num_counters; ++i) {
89 struct dentry *dir; 109 struct dentry *dir;
@@ -95,44 +115,70 @@ static int op_ppc64_create_files(struct super_block *sb, struct dentry *root)
95 oprofilefs_create_ulong(sb, dir, "enabled", &ctr[i].enabled); 115 oprofilefs_create_ulong(sb, dir, "enabled", &ctr[i].enabled);
96 oprofilefs_create_ulong(sb, dir, "event", &ctr[i].event); 116 oprofilefs_create_ulong(sb, dir, "event", &ctr[i].event);
97 oprofilefs_create_ulong(sb, dir, "count", &ctr[i].count); 117 oprofilefs_create_ulong(sb, dir, "count", &ctr[i].count);
118#ifdef __powerpc64__
98 /* 119 /*
99 * We dont support per counter user/kernel selection, but 120 * We dont support per counter user/kernel selection, but
100 * we leave the entries because userspace expects them 121 * we leave the entries because userspace expects them
101 */ 122 */
123#endif /* __powerpc64__ */
102 oprofilefs_create_ulong(sb, dir, "kernel", &ctr[i].kernel); 124 oprofilefs_create_ulong(sb, dir, "kernel", &ctr[i].kernel);
103 oprofilefs_create_ulong(sb, dir, "user", &ctr[i].user); 125 oprofilefs_create_ulong(sb, dir, "user", &ctr[i].user);
126
127#ifndef __powerpc64__
128 /* FIXME: Not sure if this is used */
129#endif /* ! __powerpc64__ */
104 oprofilefs_create_ulong(sb, dir, "unit_mask", &ctr[i].unit_mask); 130 oprofilefs_create_ulong(sb, dir, "unit_mask", &ctr[i].unit_mask);
105 } 131 }
106 132
107 oprofilefs_create_ulong(sb, root, "enable_kernel", &sys.enable_kernel); 133 oprofilefs_create_ulong(sb, root, "enable_kernel", &sys.enable_kernel);
108 oprofilefs_create_ulong(sb, root, "enable_user", &sys.enable_user); 134 oprofilefs_create_ulong(sb, root, "enable_user", &sys.enable_user);
135#ifdef __powerpc64__
109 oprofilefs_create_ulong(sb, root, "backtrace_spinlocks", 136 oprofilefs_create_ulong(sb, root, "backtrace_spinlocks",
110 &sys.backtrace_spinlocks); 137 &sys.backtrace_spinlocks);
138#endif /* __powerpc64__ */
111 139
112 /* Default to tracing both kernel and user */ 140 /* Default to tracing both kernel and user */
113 sys.enable_kernel = 1; 141 sys.enable_kernel = 1;
114 sys.enable_user = 1; 142 sys.enable_user = 1;
115 143#ifdef __powerpc64__
116 /* Turn on backtracing through spinlocks by default */ 144 /* Turn on backtracing through spinlocks by default */
117 sys.backtrace_spinlocks = 1; 145 sys.backtrace_spinlocks = 1;
146#endif /* __powerpc64__ */
118 147
119 return 0; 148 return 0;
120} 149}
121 150
122int __init oprofile_arch_init(struct oprofile_operations *ops) 151int __init oprofile_arch_init(struct oprofile_operations *ops)
123{ 152{
153#ifndef __powerpc64__
154#ifdef CONFIG_FSL_BOOKE
155 model = &op_model_fsl_booke;
156#else
157 return -ENODEV;
158#endif
159
160 cpu_type = kmalloc(32, GFP_KERNEL);
161 if (NULL == cpu_type)
162 return -ENOMEM;
163
164 sprintf(cpu_type, "ppc/%s", cur_cpu_spec->cpu_name);
165
166 model->num_counters = cur_cpu_spec->num_pmcs;
167
168 ops->cpu_type = cpu_type;
169#else /* __powerpc64__ */
124 if (!cur_cpu_spec->oprofile_model || !cur_cpu_spec->oprofile_cpu_type) 170 if (!cur_cpu_spec->oprofile_model || !cur_cpu_spec->oprofile_cpu_type)
125 return -ENODEV; 171 return -ENODEV;
126
127 model = cur_cpu_spec->oprofile_model; 172 model = cur_cpu_spec->oprofile_model;
128 model->num_counters = cur_cpu_spec->num_pmcs; 173 model->num_counters = cur_cpu_spec->num_pmcs;
129 174
130 ops->cpu_type = cur_cpu_spec->oprofile_cpu_type; 175 ops->cpu_type = cur_cpu_spec->oprofile_cpu_type;
131 ops->create_files = op_ppc64_create_files; 176#endif /* __powerpc64__ */
132 ops->setup = op_ppc64_setup; 177 ops->create_files = op_powerpc_create_files;
133 ops->shutdown = op_ppc64_shutdown; 178 ops->setup = op_powerpc_setup;
134 ops->start = op_ppc64_start; 179 ops->shutdown = op_powerpc_shutdown;
135 ops->stop = op_ppc64_stop; 180 ops->start = op_powerpc_start;
181 ops->stop = op_powerpc_stop;
136 182
137 printk(KERN_INFO "oprofile: using %s performance monitoring.\n", 183 printk(KERN_INFO "oprofile: using %s performance monitoring.\n",
138 ops->cpu_type); 184 ops->cpu_type);
@@ -142,4 +188,8 @@ int __init oprofile_arch_init(struct oprofile_operations *ops)
142 188
143void oprofile_arch_exit(void) 189void oprofile_arch_exit(void)
144{ 190{
191#ifndef __powerpc64__
192 kfree(cpu_type);
193 cpu_type = NULL;
194#endif /* ! __powerpc64__ */
145} 195}
diff --git a/arch/ppc/oprofile/op_model_fsl_booke.c b/arch/powerpc/oprofile/op_model_fsl_booke.c
index fc9c859358c6..86124a94c9af 100644
--- a/arch/ppc/oprofile/op_model_fsl_booke.c
+++ b/arch/powerpc/oprofile/op_model_fsl_booke.c
@@ -24,9 +24,8 @@
24#include <asm/cputable.h> 24#include <asm/cputable.h>
25#include <asm/reg_booke.h> 25#include <asm/reg_booke.h>
26#include <asm/page.h> 26#include <asm/page.h>
27#include <asm/perfmon.h> 27#include <asm/pmc.h>
28 28#include <asm/oprofile_impl.h>
29#include "op_impl.h"
30 29
31static unsigned long reset_value[OP_MAX_COUNTER]; 30static unsigned long reset_value[OP_MAX_COUNTER];
32 31
@@ -176,7 +175,7 @@ static void fsl_booke_handle_interrupt(struct pt_regs *regs,
176 pmc_start_ctrs(1); 175 pmc_start_ctrs(1);
177} 176}
178 177
179struct op_ppc32_model op_model_fsl_booke = { 178struct op_powerpc_model op_model_fsl_booke = {
180 .reg_setup = fsl_booke_reg_setup, 179 .reg_setup = fsl_booke_reg_setup,
181 .start = fsl_booke_start, 180 .start = fsl_booke_start,
182 .stop = fsl_booke_stop, 181 .stop = fsl_booke_stop,
diff --git a/arch/ppc64/oprofile/op_model_power4.c b/arch/powerpc/oprofile/op_model_power4.c
index 32b2bb5625fe..886449315847 100644
--- a/arch/ppc64/oprofile/op_model_power4.c
+++ b/arch/powerpc/oprofile/op_model_power4.c
@@ -300,7 +300,7 @@ static void power4_handle_interrupt(struct pt_regs *regs,
300 mtspr(SPRN_MMCR0, mmcr0); 300 mtspr(SPRN_MMCR0, mmcr0);
301} 301}
302 302
303struct op_ppc64_model op_model_power4 = { 303struct op_powerpc_model op_model_power4 = {
304 .reg_setup = power4_reg_setup, 304 .reg_setup = power4_reg_setup,
305 .cpu_setup = power4_cpu_setup, 305 .cpu_setup = power4_cpu_setup,
306 .start = power4_start, 306 .start = power4_start,
diff --git a/arch/ppc64/oprofile/op_model_rs64.c b/arch/powerpc/oprofile/op_model_rs64.c
index 08c5b333f5c4..e010b85996e8 100644
--- a/arch/ppc64/oprofile/op_model_rs64.c
+++ b/arch/powerpc/oprofile/op_model_rs64.c
@@ -209,7 +209,7 @@ static void rs64_handle_interrupt(struct pt_regs *regs,
209 mtspr(SPRN_MMCR0, mmcr0); 209 mtspr(SPRN_MMCR0, mmcr0);
210} 210}
211 211
212struct op_ppc64_model op_model_rs64 = { 212struct op_powerpc_model op_model_rs64 = {
213 .reg_setup = rs64_reg_setup, 213 .reg_setup = rs64_reg_setup,
214 .cpu_setup = rs64_cpu_setup, 214 .cpu_setup = rs64_cpu_setup,
215 .start = rs64_start, 215 .start = rs64_start,
diff --git a/arch/powerpc/platforms/4xx/Kconfig b/arch/powerpc/platforms/4xx/Kconfig
new file mode 100644
index 000000000000..ed39d6a3d22a
--- /dev/null
+++ b/arch/powerpc/platforms/4xx/Kconfig
@@ -0,0 +1,280 @@
1config 4xx
2 bool
3 depends on 40x || 44x
4 default y
5
6config WANT_EARLY_SERIAL
7 bool
8 select SERIAL_8250
9 default n
10
11menu "AMCC 4xx options"
12 depends on 4xx
13
14choice
15 prompt "Machine Type"
16 depends on 40x
17 default WALNUT
18
19config BUBINGA
20 bool "Bubinga"
21 select WANT_EARLY_SERIAL
22 help
23 This option enables support for the IBM 405EP evaluation board.
24
25config CPCI405
26 bool "CPCI405"
27 help
28 This option enables support for the CPCI405 board.
29
30config EP405
31 bool "EP405/EP405PC"
32 help
33 This option enables support for the EP405/EP405PC boards.
34
35config REDWOOD_5
36 bool "Redwood-5"
37 help
38 This option enables support for the IBM STB04 evaluation board.
39
40config REDWOOD_6
41 bool "Redwood-6"
42 help
43 This option enables support for the IBM STBx25xx evaluation board.
44
45config SYCAMORE
46 bool "Sycamore"
47 help
48 This option enables support for the IBM PPC405GPr evaluation board.
49
50config WALNUT
51 bool "Walnut"
52 help
53 This option enables support for the IBM PPC405GP evaluation board.
54
55config XILINX_ML300
56 bool "Xilinx-ML300"
57 help
58 This option enables support for the Xilinx ML300 evaluation board.
59
60endchoice
61
62choice
63 prompt "Machine Type"
64 depends on 44x
65 default EBONY
66
67config BAMBOO
68 bool "Bamboo"
69 select WANT_EARLY_SERIAL
70 help
71 This option enables support for the IBM PPC440EP evaluation board.
72
73config EBONY
74 bool "Ebony"
75 select WANT_EARLY_SERIAL
76 help
77 This option enables support for the IBM PPC440GP evaluation board.
78
79config LUAN
80 bool "Luan"
81 select WANT_EARLY_SERIAL
82 help
83 This option enables support for the IBM PPC440SP evaluation board.
84
85config OCOTEA
86 bool "Ocotea"
87 select WANT_EARLY_SERIAL
88 help
89 This option enables support for the IBM PPC440GX evaluation board.
90
91endchoice
92
93config EP405PC
94 bool "EP405PC Support"
95 depends on EP405
96
97
98# It's often necessary to know the specific 4xx processor type.
99# Fortunately, it is impled (so far) from the board type, so we
100# don't need to ask more redundant questions.
101config NP405H
102 bool
103 depends on ASH
104 default y
105
106config 440EP
107 bool
108 depends on BAMBOO
109 select PPC_FPU
110 default y
111
112config 440GP
113 bool
114 depends on EBONY
115 default y
116
117config 440GX
118 bool
119 depends on OCOTEA
120 default y
121
122config 440SP
123 bool
124 depends on LUAN
125 default y
126
127config 440
128 bool
129 depends on 440GP || 440SP || 440EP
130 default y
131
132config 440A
133 bool
134 depends on 440GX
135 default y
136
137config IBM440EP_ERR42
138 bool
139 depends on 440EP
140 default y
141
142# All 405-based cores up until the 405GPR and 405EP have this errata.
143config IBM405_ERR77
144 bool
145 depends on 40x && !403GCX && !405GPR && !405EP
146 default y
147
148# All 40x-based cores, up until the 405GPR and 405EP have this errata.
149config IBM405_ERR51
150 bool
151 depends on 40x && !405GPR && !405EP
152 default y
153
154config BOOKE
155 bool
156 depends on 44x
157 default y
158
159config IBM_OCP
160 bool
161 depends on ASH || BAMBOO || BUBINGA || CPCI405 || EBONY || EP405 || LUAN || OCOTEA || REDWOOD_5 || REDWOOD_6 || SYCAMORE || WALNUT
162 default y
163
164config XILINX_OCP
165 bool
166 depends on XILINX_ML300
167 default y
168
169config IBM_EMAC4
170 bool
171 depends on 440GX || 440SP
172 default y
173
174config BIOS_FIXUP
175 bool
176 depends on BUBINGA || EP405 || SYCAMORE || WALNUT
177 default y
178
179# OAK doesn't exist but wanted to keep this around for any future 403GCX boards
180config 403GCX
181 bool
182 depends OAK
183 default y
184
185config 405EP
186 bool
187 depends on BUBINGA
188 default y
189
190config 405GP
191 bool
192 depends on CPCI405 || EP405 || WALNUT
193 default y
194
195config 405GPR
196 bool
197 depends on SYCAMORE
198 default y
199
200config VIRTEX_II_PRO
201 bool
202 depends on XILINX_ML300
203 default y
204
205config STB03xxx
206 bool
207 depends on REDWOOD_5 || REDWOOD_6
208 default y
209
210config EMBEDDEDBOOT
211 bool
212 depends on EP405 || XILINX_ML300
213 default y
214
215config IBM_OPENBIOS
216 bool
217 depends on ASH || BUBINGA || REDWOOD_5 || REDWOOD_6 || SYCAMORE || WALNUT
218 default y
219
220config PPC4xx_DMA
221 bool "PPC4xx DMA controller support"
222 depends on 4xx
223
224config PPC4xx_EDMA
225 bool
226 depends on !STB03xxx && PPC4xx_DMA
227 default y
228
229config PPC_GEN550
230 bool
231 depends on 4xx
232 default y
233
234choice
235 prompt "TTYS0 device and default console"
236 depends on 40x
237 default UART0_TTYS0
238
239config UART0_TTYS0
240 bool "UART0"
241
242config UART0_TTYS1
243 bool "UART1"
244
245endchoice
246
247config SERIAL_SICC
248 bool "SICC Serial port support"
249 depends on STB03xxx
250
251config UART1_DFLT_CONSOLE
252 bool
253 depends on SERIAL_SICC && UART0_TTYS1
254 default y
255
256config SERIAL_SICC_CONSOLE
257 bool
258 depends on SERIAL_SICC && UART0_TTYS1
259 default y
260endmenu
261
262
263menu "IBM 40x options"
264 depends on 40x
265
266config SERIAL_SICC
267 bool "SICC Serial port"
268 depends on STB03xxx
269
270config UART1_DFLT_CONSOLE
271 bool
272 depends on SERIAL_SICC && UART0_TTYS1
273 default y
274
275config SERIAL_SICC_CONSOLE
276 bool
277 depends on SERIAL_SICC && UART0_TTYS1
278 default y
279
280endmenu
diff --git a/arch/powerpc/platforms/4xx/Makefile b/arch/powerpc/platforms/4xx/Makefile
new file mode 100644
index 000000000000..79ff6b1e887c
--- /dev/null
+++ b/arch/powerpc/platforms/4xx/Makefile
@@ -0,0 +1 @@
# empty makefile so make clean works \ No newline at end of file
diff --git a/arch/powerpc/platforms/85xx/Kconfig b/arch/powerpc/platforms/85xx/Kconfig
new file mode 100644
index 000000000000..c5bc2821d991
--- /dev/null
+++ b/arch/powerpc/platforms/85xx/Kconfig
@@ -0,0 +1,86 @@
1config 85xx
2 bool
3 depends on E500
4 default y
5
6config PPC_INDIRECT_PCI_BE
7 bool
8 depends on 85xx
9 default y
10
11menu "Freescale 85xx options"
12 depends on E500
13
14choice
15 prompt "Machine Type"
16 depends on 85xx
17 default MPC8540_ADS
18
19config MPC8540_ADS
20 bool "Freescale MPC8540 ADS"
21 help
22 This option enables support for the MPC 8540 ADS evaluation board.
23
24config MPC8548_CDS
25 bool "Freescale MPC8548 CDS"
26 help
27 This option enablese support for the MPC8548 CDS evaluation board.
28
29config MPC8555_CDS
30 bool "Freescale MPC8555 CDS"
31 help
32 This option enablese support for the MPC8555 CDS evaluation board.
33
34config MPC8560_ADS
35 bool "Freescale MPC8560 ADS"
36 help
37 This option enables support for the MPC 8560 ADS evaluation board.
38
39config SBC8560
40 bool "WindRiver PowerQUICC III SBC8560"
41 help
42 This option enables support for the WindRiver PowerQUICC III
43 SBC8560 board.
44
45config STX_GP3
46 bool "Silicon Turnkey Express GP3"
47 help
48 This option enables support for the Silicon Turnkey Express GP3
49 board.
50
51endchoice
52
53# It's often necessary to know the specific 85xx processor type.
54# Fortunately, it is implied (so far) from the board type, so we
55# don't need to ask more redundant questions.
56config MPC8540
57 bool
58 depends on MPC8540_ADS
59 default y
60
61config MPC8548
62 bool
63 depends on MPC8548_CDS
64 default y
65
66config MPC8555
67 bool
68 depends on MPC8555_CDS
69 default y
70
71config MPC8560
72 bool
73 depends on SBC8560 || MPC8560_ADS || STX_GP3
74 default y
75
76config 85xx_PCI2
77 bool "Supprt for 2nd PCI host controller"
78 depends on MPC8555_CDS
79 default y
80
81config PPC_GEN550
82 bool
83 depends on MPC8540 || SBC8560 || MPC8555
84 default y
85
86endmenu
diff --git a/arch/powerpc/platforms/85xx/Makefile b/arch/powerpc/platforms/85xx/Makefile
new file mode 100644
index 000000000000..6407197ffd89
--- /dev/null
+++ b/arch/powerpc/platforms/85xx/Makefile
@@ -0,0 +1 @@
# empty makefile so make clean works
diff --git a/arch/powerpc/platforms/8xx/Kconfig b/arch/powerpc/platforms/8xx/Kconfig
new file mode 100644
index 000000000000..c8c0ba3cf8e8
--- /dev/null
+++ b/arch/powerpc/platforms/8xx/Kconfig
@@ -0,0 +1,352 @@
1config FADS
2 bool
3
4choice
5 prompt "8xx Machine Type"
6 depends on 8xx
7 default RPXLITE
8
9config RPXLITE
10 bool "RPX-Lite"
11 ---help---
12 Single-board computers based around the PowerPC MPC8xx chips and
13 intended for embedded applications. The following types are
14 supported:
15
16 RPX-Lite:
17 Embedded Planet RPX Lite. PC104 form-factor SBC based on the MPC823.
18
19 RPX-Classic:
20 Embedded Planet RPX Classic Low-fat. Credit-card-size SBC based on
21 the MPC 860
22
23 BSE-IP:
24 Bright Star Engineering ip-Engine.
25
26 TQM823L:
27 TQM850L:
28 TQM855L:
29 TQM860L:
30 MPC8xx based family of mini modules, half credit card size,
31 up to 64 MB of RAM, 8 MB Flash, (Fast) Ethernet, 2 x serial ports,
32 2 x CAN bus interface, ...
33 Manufacturer: TQ Components, www.tq-group.de
34 Date of Release: October (?) 1999
35 End of Life: not yet :-)
36 URL:
37 - module: <http://www.denx.de/PDF/TQM8xxLHWM201.pdf>
38 - starter kit: <http://www.denx.de/PDF/STK8xxLHWM201.pdf>
39 - images: <http://www.denx.de/embedded-ppc-en.html>
40
41 FPS850L:
42 FingerPrint Sensor System (based on TQM850L)
43 Manufacturer: IKENDI AG, <http://www.ikendi.com/>
44 Date of Release: November 1999
45 End of life: end 2000 ?
46 URL: see TQM850L
47
48 IVMS8:
49 MPC860 based board used in the "Integrated Voice Mail System",
50 Small Version (8 voice channels)
51 Manufacturer: Speech Design, <http://www.speech-design.de/>
52 Date of Release: December 2000 (?)
53 End of life: -
54 URL: <http://www.speech-design.de/>
55
56 IVML24:
57 MPC860 based board used in the "Integrated Voice Mail System",
58 Large Version (24 voice channels)
59 Manufacturer: Speech Design, <http://www.speech-design.de/>
60 Date of Release: March 2001 (?)
61 End of life: -
62 URL: <http://www.speech-design.de/>
63
64 HERMES:
65 Hermes-Pro ISDN/LAN router with integrated 8 x hub
66 Manufacturer: Multidata Gesellschaft fur Datentechnik und Informatik
67 <http://www.multidata.de/>
68 Date of Release: 2000 (?)
69 End of life: -
70 URL: <http://www.multidata.de/english/products/hpro.htm>
71
72 IP860:
73 VMEBus IP (Industry Pack) carrier board with MPC860
74 Manufacturer: MicroSys GmbH, <http://www.microsys.de/>
75 Date of Release: ?
76 End of life: -
77 URL: <http://www.microsys.de/html/ip860.html>
78
79 PCU_E:
80 PCU = Peripheral Controller Unit, Extended
81 Manufacturer: Siemens AG, ICN (Information and Communication Networks)
82 <http://www.siemens.de/page/1,3771,224315-1-999_2_226207-0,00.html>
83 Date of Release: April 2001
84 End of life: August 2001
85 URL: n. a.
86
87config RPXCLASSIC
88 bool "RPX-Classic"
89 help
90 The RPX-Classic is a single-board computer based on the Motorola
91 MPC860. It features 16MB of DRAM and a variable amount of flash,
92 I2C EEPROM, thermal monitoring, a PCMCIA slot, a DIP switch and two
93 LEDs. Variants with Ethernet ports exist. Say Y here to support it
94 directly.
95
96config BSEIP
97 bool "BSE-IP"
98 help
99 Say Y here to support the Bright Star Engineering ipEngine SBC.
100 This is a credit-card-sized device featuring a MPC823 processor,
101 26MB DRAM, 4MB flash, Ethernet, a 16K-gate FPGA, USB, an LCD/video
102 controller, and two RS232 ports.
103
104config MPC8XXFADS
105 bool "FADS"
106 select FADS
107
108config MPC86XADS
109 bool "MPC86XADS"
110 help
111 MPC86x Application Development System by Freescale Semiconductor.
112 The MPC86xADS is meant to serve as a platform for s/w and h/w
113 development around the MPC86X processor families.
114 select FADS
115
116config MPC885ADS
117 bool "MPC885ADS"
118 help
119 Freescale Semiconductor MPC885 Application Development System (ADS).
120 Also known as DUET.
121 The MPC885ADS is meant to serve as a platform for s/w and h/w
122 development around the MPC885 processor family.
123
124config TQM823L
125 bool "TQM823L"
126 help
127 Say Y here to support the TQM823L, one of an MPC8xx-based family of
128 mini SBCs (half credit-card size) from TQ Components first released
129 in late 1999. Technical references are at
130 <http://www.denx.de/PDF/TQM8xxLHWM201.pdf>, and
131 <http://www.denx.de/PDF/STK8xxLHWM201.pdf>, and an image at
132 <http://www.denx.de/embedded-ppc-en.html>.
133
134config TQM850L
135 bool "TQM850L"
136 help
137 Say Y here to support the TQM850L, one of an MPC8xx-based family of
138 mini SBCs (half credit-card size) from TQ Components first released
139 in late 1999. Technical references are at
140 <http://www.denx.de/PDF/TQM8xxLHWM201.pdf>, and
141 <http://www.denx.de/PDF/STK8xxLHWM201.pdf>, and an image at
142 <http://www.denx.de/embedded-ppc-en.html>.
143
144config TQM855L
145 bool "TQM855L"
146 help
147 Say Y here to support the TQM855L, one of an MPC8xx-based family of
148 mini SBCs (half credit-card size) from TQ Components first released
149 in late 1999. Technical references are at
150 <http://www.denx.de/PDF/TQM8xxLHWM201.pdf>, and
151 <http://www.denx.de/PDF/STK8xxLHWM201.pdf>, and an image at
152 <http://www.denx.de/embedded-ppc-en.html>.
153
154config TQM860L
155 bool "TQM860L"
156 help
157 Say Y here to support the TQM860L, one of an MPC8xx-based family of
158 mini SBCs (half credit-card size) from TQ Components first released
159 in late 1999. Technical references are at
160 <http://www.denx.de/PDF/TQM8xxLHWM201.pdf>, and
161 <http://www.denx.de/PDF/STK8xxLHWM201.pdf>, and an image at
162 <http://www.denx.de/embedded-ppc-en.html>.
163
164config FPS850L
165 bool "FPS850L"
166
167config IVMS8
168 bool "IVMS8"
169 help
170 Say Y here to support the Integrated Voice-Mail Small 8-channel SBC
171 from Speech Design, released March 2001. The manufacturer's website
172 is at <http://www.speech-design.de/>.
173
174config IVML24
175 bool "IVML24"
176 help
177 Say Y here to support the Integrated Voice-Mail Large 24-channel SBC
178 from Speech Design, released March 2001. The manufacturer's website
179 is at <http://www.speech-design.de/>.
180
181config HERMES_PRO
182 bool "HERMES"
183
184config IP860
185 bool "IP860"
186
187config LWMON
188 bool "LWMON"
189
190config PCU_E
191 bool "PCU_E"
192
193config CCM
194 bool "CCM"
195
196config LANTEC
197 bool "LANTEC"
198
199config MBX
200 bool "MBX"
201 help
202 MBX is a line of Motorola single-board computer based around the
203 MPC821 and MPC860 processors, and intended for embedded-controller
204 applications. Say Y here to support these boards directly.
205
206config WINCEPT
207 bool "WinCept"
208 help
209 The Wincept 100/110 is a Motorola single-board computer based on the
210 MPC821 PowerPC, introduced in 1998 and designed to be used in
211 thin-client machines. Say Y to support it directly.
212
213endchoice
214
215#
216# MPC8xx Communication options
217#
218
219menu "MPC8xx CPM Options"
220 depends on 8xx
221
222config SCC_ENET
223 bool "CPM SCC Ethernet"
224 depends on NET_ETHERNET
225 help
226 Enable Ethernet support via the Motorola MPC8xx serial
227 communications controller.
228
229choice
230 prompt "SCC used for Ethernet"
231 depends on SCC_ENET
232 default SCC1_ENET
233
234config SCC1_ENET
235 bool "SCC1"
236 help
237 Use MPC8xx serial communications controller 1 to drive Ethernet
238 (default).
239
240config SCC2_ENET
241 bool "SCC2"
242 help
243 Use MPC8xx serial communications controller 2 to drive Ethernet.
244
245config SCC3_ENET
246 bool "SCC3"
247 help
248 Use MPC8xx serial communications controller 3 to drive Ethernet.
249
250endchoice
251
252config FEC_ENET
253 bool "860T FEC Ethernet"
254 depends on NET_ETHERNET
255 help
256 Enable Ethernet support via the Fast Ethernet Controller (FCC) on
257 the Motorola MPC8260.
258
259config USE_MDIO
260 bool "Use MDIO for PHY configuration"
261 depends on FEC_ENET
262 help
263 On some boards the hardware configuration of the ethernet PHY can be
264 used without any software interaction over the MDIO interface, so
265 all MII code can be omitted. Say N here if unsure or if you don't
266 need link status reports.
267
268config FEC_AM79C874
269 bool "Support AMD79C874 PHY"
270 depends on USE_MDIO
271
272config FEC_LXT970
273 bool "Support LXT970 PHY"
274 depends on USE_MDIO
275
276config FEC_LXT971
277 bool "Support LXT971 PHY"
278 depends on USE_MDIO
279
280config FEC_QS6612
281 bool "Support QS6612 PHY"
282 depends on USE_MDIO
283
284config ENET_BIG_BUFFERS
285 bool "Use Big CPM Ethernet Buffers"
286 depends on SCC_ENET || FEC_ENET
287 help
288 Allocate large buffers for MPC8xx Ethernet. Increases throughput
289 and decreases the likelihood of dropped packets, but costs memory.
290
291config HTDMSOUND
292 bool "Embedded Planet HIOX Audio"
293 depends on SOUND=y
294
295# This doesn't really belong here, but it is convenient to ask
296# 8xx specific questions.
297comment "Generic MPC8xx Options"
298
299config 8xx_COPYBACK
300 bool "Copy-Back Data Cache (else Writethrough)"
301 help
302 Saying Y here will cause the cache on an MPC8xx processor to be used
303 in Copy-Back mode. If you say N here, it is used in Writethrough
304 mode.
305
306 If in doubt, say Y here.
307
308config 8xx_CPU6
309 bool "CPU6 Silicon Errata (860 Pre Rev. C)"
310 help
311 MPC860 CPUs, prior to Rev C have some bugs in the silicon, which
312 require workarounds for Linux (and most other OSes to work). If you
313 get a BUG() very early in boot, this might fix the problem. For
314 more details read the document entitled "MPC860 Family Device Errata
315 Reference" on Motorola's website. This option also incurs a
316 performance hit.
317
318 If in doubt, say N here.
319
320choice
321 prompt "Microcode patch selection"
322 default NO_UCODE_PATCH
323 help
324 Help not implemented yet, coming soon.
325
326config NO_UCODE_PATCH
327 bool "None"
328
329config USB_SOF_UCODE_PATCH
330 bool "USB SOF patch"
331 help
332 Help not implemented yet, coming soon.
333
334config I2C_SPI_UCODE_PATCH
335 bool "I2C/SPI relocation patch"
336 help
337 Help not implemented yet, coming soon.
338
339config I2C_SPI_SMC1_UCODE_PATCH
340 bool "I2C/SPI/SMC1 relocation patch"
341 help
342 Help not implemented yet, coming soon.
343
344endchoice
345
346config UCODE_PATCH
347 bool
348 default y
349 depends on !NO_UCODE_PATCH
350
351endmenu
352
diff --git a/arch/powerpc/platforms/Makefile b/arch/powerpc/platforms/Makefile
new file mode 100644
index 000000000000..172c0db63504
--- /dev/null
+++ b/arch/powerpc/platforms/Makefile
@@ -0,0 +1,13 @@
1ifeq ($(CONFIG_PPC_MERGE),y)
2obj-$(CONFIG_PPC_PMAC) += powermac/
3else
4ifeq ($(CONFIG_PPC64),y)
5obj-$(CONFIG_PPC_PMAC) += powermac/
6endif
7endif
8obj-$(CONFIG_PPC_CHRP) += chrp/
9obj-$(CONFIG_4xx) += 4xx/
10obj-$(CONFIG_85xx) += 85xx/
11obj-$(CONFIG_PPC_PSERIES) += pseries/
12obj-$(CONFIG_PPC_ISERIES) += iseries/
13obj-$(CONFIG_PPC_MAPLE) += maple/
diff --git a/arch/powerpc/platforms/apus/Kconfig b/arch/powerpc/platforms/apus/Kconfig
new file mode 100644
index 000000000000..6bde3bffed86
--- /dev/null
+++ b/arch/powerpc/platforms/apus/Kconfig
@@ -0,0 +1,130 @@
1
2config AMIGA
3 bool
4 depends on APUS
5 default y
6 help
7 This option enables support for the Amiga series of computers.
8
9config ZORRO
10 bool
11 depends on APUS
12 default y
13 help
14 This enables support for the Zorro bus in the Amiga. If you have
15 expansion cards in your Amiga that conform to the Amiga
16 AutoConfig(tm) specification, say Y, otherwise N. Note that even
17 expansion cards that do not fit in the Zorro slots but fit in e.g.
18 the CPU slot may fall in this category, so you have to say Y to let
19 Linux use these.
20
21config ABSTRACT_CONSOLE
22 bool
23 depends on APUS
24 default y
25
26config APUS_FAST_EXCEPT
27 bool
28 depends on APUS
29 default y
30
31config AMIGA_PCMCIA
32 bool "Amiga 1200/600 PCMCIA support"
33 depends on APUS && EXPERIMENTAL
34 help
35 Include support in the kernel for pcmcia on Amiga 1200 and Amiga
36 600. If you intend to use pcmcia cards say Y; otherwise say N.
37
38config AMIGA_BUILTIN_SERIAL
39 tristate "Amiga builtin serial support"
40 depends on APUS
41 help
42 If you want to use your Amiga's built-in serial port in Linux,
43 answer Y.
44
45 To compile this driver as a module, choose M here.
46
47config GVPIOEXT
48 tristate "GVP IO-Extender support"
49 depends on APUS
50 help
51 If you want to use a GVP IO-Extender serial card in Linux, say Y.
52 Otherwise, say N.
53
54config GVPIOEXT_LP
55 tristate "GVP IO-Extender parallel printer support"
56 depends on GVPIOEXT
57 help
58 Say Y to enable driving a printer from the parallel port on your
59 GVP IO-Extender card, N otherwise.
60
61config GVPIOEXT_PLIP
62 tristate "GVP IO-Extender PLIP support"
63 depends on GVPIOEXT
64 help
65 Say Y to enable doing IP over the parallel port on your GVP
66 IO-Extender card, N otherwise.
67
68config MULTIFACE_III_TTY
69 tristate "Multiface Card III serial support"
70 depends on APUS
71 help
72 If you want to use a Multiface III card's serial port in Linux,
73 answer Y.
74
75 To compile this driver as a module, choose M here.
76
77config A2232
78 tristate "Commodore A2232 serial support (EXPERIMENTAL)"
79 depends on EXPERIMENTAL && APUS
80 ---help---
81 This option supports the 2232 7-port serial card shipped with the
82 Amiga 2000 and other Zorro-bus machines, dating from 1989. At
83 a max of 19,200 bps, the ports are served by a 6551 ACIA UART chip
84 each, plus a 8520 CIA, and a master 6502 CPU and buffer as well. The
85 ports were connected with 8 pin DIN connectors on the card bracket,
86 for which 8 pin to DB25 adapters were supplied. The card also had
87 jumpers internally to toggle various pinning configurations.
88
89 This driver can be built as a module; but then "generic_serial"
90 will also be built as a module. This has to be loaded before
91 "ser_a2232". If you want to do this, answer M here.
92
93config WHIPPET_SERIAL
94 tristate "Hisoft Whippet PCMCIA serial support"
95 depends on AMIGA_PCMCIA
96 help
97 HiSoft has a web page at <http://www.hisoft.co.uk/>, but there
98 is no listing for the Whippet in their Amiga section.
99
100config APNE
101 tristate "PCMCIA NE2000 support"
102 depends on AMIGA_PCMCIA
103 help
104 If you have a PCMCIA NE2000 compatible adapter, say Y. Otherwise,
105 say N.
106
107 To compile this driver as a module, choose M here: the
108 module will be called apne.
109
110config SERIAL_CONSOLE
111 bool "Support for serial port console"
112 depends on APUS && (AMIGA_BUILTIN_SERIAL=y || GVPIOEXT=y || MULTIFACE_III_TTY=y)
113
114config HEARTBEAT
115 bool "Use power LED as a heartbeat"
116 depends on APUS
117 help
118 Use the power-on LED on your machine as a load meter. The exact
119 behavior is platform-dependent, but normally the flash frequency is
120 a hyperbolic function of the 5-minute load average.
121
122config PROC_HARDWARE
123 bool "/proc/hardware support"
124 depends on APUS
125
126source "drivers/zorro/Kconfig"
127
128config PCI_PERMEDIA
129 bool "PCI for Permedia2"
130 depends on !4xx && !8xx && APUS
diff --git a/arch/powerpc/platforms/chrp/Makefile b/arch/powerpc/platforms/chrp/Makefile
new file mode 100644
index 000000000000..902feb1ac431
--- /dev/null
+++ b/arch/powerpc/platforms/chrp/Makefile
@@ -0,0 +1,4 @@
1obj-y += setup.o time.o pegasos_eth.o
2obj-$(CONFIG_PCI) += pci.o
3obj-$(CONFIG_SMP) += smp.o
4obj-$(CONFIG_NVRAM) += nvram.o
diff --git a/arch/powerpc/platforms/chrp/chrp.h b/arch/powerpc/platforms/chrp/chrp.h
new file mode 100644
index 000000000000..3a2057fa314a
--- /dev/null
+++ b/arch/powerpc/platforms/chrp/chrp.h
@@ -0,0 +1,12 @@
1/*
2 * Declarations of CHRP platform-specific things.
3 */
4
5extern void chrp_nvram_init(void);
6extern void chrp_get_rtc_time(struct rtc_time *);
7extern int chrp_set_rtc_time(struct rtc_time *);
8extern void chrp_calibrate_decr(void);
9extern long chrp_time_init(void);
10
11extern void chrp_find_bridges(void);
12extern void chrp_event_scan(void);
diff --git a/arch/powerpc/platforms/chrp/nvram.c b/arch/powerpc/platforms/chrp/nvram.c
new file mode 100644
index 000000000000..4ac7125aa09c
--- /dev/null
+++ b/arch/powerpc/platforms/chrp/nvram.c
@@ -0,0 +1,84 @@
1/*
2 * c 2001 PPC 64 Team, IBM Corp
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * /dev/nvram driver for PPC
10 *
11 */
12
13#include <linux/kernel.h>
14#include <linux/init.h>
15#include <linux/slab.h>
16#include <linux/spinlock.h>
17#include <asm/uaccess.h>
18#include <asm/prom.h>
19#include <asm/machdep.h>
20#include "chrp.h"
21
22static unsigned int nvram_size;
23static unsigned char nvram_buf[4];
24static DEFINE_SPINLOCK(nvram_lock);
25
26static unsigned char chrp_nvram_read(int addr)
27{
28 unsigned long done, flags;
29 unsigned char ret;
30
31 if (addr >= nvram_size) {
32 printk(KERN_DEBUG "%s: read addr %d > nvram_size %u\n",
33 current->comm, addr, nvram_size);
34 return 0xff;
35 }
36 spin_lock_irqsave(&nvram_lock, flags);
37 if ((call_rtas("nvram-fetch", 3, 2, &done, addr, __pa(nvram_buf), 1) != 0) || 1 != done)
38 ret = 0xff;
39 else
40 ret = nvram_buf[0];
41 spin_unlock_irqrestore(&nvram_lock, flags);
42
43 return ret;
44}
45
46static void chrp_nvram_write(int addr, unsigned char val)
47{
48 unsigned long done, flags;
49
50 if (addr >= nvram_size) {
51 printk(KERN_DEBUG "%s: write addr %d > nvram_size %u\n",
52 current->comm, addr, nvram_size);
53 return;
54 }
55 spin_lock_irqsave(&nvram_lock, flags);
56 nvram_buf[0] = val;
57 if ((call_rtas("nvram-store", 3, 2, &done, addr, __pa(nvram_buf), 1) != 0) || 1 != done)
58 printk(KERN_DEBUG "rtas IO error storing 0x%02x at %d", val, addr);
59 spin_unlock_irqrestore(&nvram_lock, flags);
60}
61
62void __init chrp_nvram_init(void)
63{
64 struct device_node *nvram;
65 unsigned int *nbytes_p, proplen;
66
67 nvram = of_find_node_by_type(NULL, "nvram");
68 if (nvram == NULL)
69 return;
70
71 nbytes_p = (unsigned int *)get_property(nvram, "#bytes", &proplen);
72 if (nbytes_p == NULL || proplen != sizeof(unsigned int))
73 return;
74
75 nvram_size = *nbytes_p;
76
77 printk(KERN_INFO "CHRP nvram contains %u bytes\n", nvram_size);
78 of_node_put(nvram);
79
80 ppc_md.nvram_read_val = chrp_nvram_read;
81 ppc_md.nvram_write_val = chrp_nvram_write;
82
83 return;
84}
diff --git a/arch/powerpc/platforms/chrp/pci.c b/arch/powerpc/platforms/chrp/pci.c
new file mode 100644
index 000000000000..82c429d487f3
--- /dev/null
+++ b/arch/powerpc/platforms/chrp/pci.c
@@ -0,0 +1,310 @@
1/*
2 * CHRP pci routines.
3 */
4
5#include <linux/config.h>
6#include <linux/kernel.h>
7#include <linux/pci.h>
8#include <linux/delay.h>
9#include <linux/string.h>
10#include <linux/init.h>
11#include <linux/ide.h>
12
13#include <asm/io.h>
14#include <asm/pgtable.h>
15#include <asm/irq.h>
16#include <asm/hydra.h>
17#include <asm/prom.h>
18#include <asm/gg2.h>
19#include <asm/machdep.h>
20#include <asm/sections.h>
21#include <asm/pci-bridge.h>
22#include <asm/open_pic.h>
23#include <asm/grackle.h>
24#include <asm/rtas.h>
25
26/* LongTrail */
27void __iomem *gg2_pci_config_base;
28
29/*
30 * The VLSI Golden Gate II has only 512K of PCI configuration space, so we
31 * limit the bus number to 3 bits
32 */
33
34int gg2_read_config(struct pci_bus *bus, unsigned int devfn, int off,
35 int len, u32 *val)
36{
37 volatile void __iomem *cfg_data;
38 struct pci_controller *hose = bus->sysdata;
39
40 if (bus->number > 7)
41 return PCIBIOS_DEVICE_NOT_FOUND;
42 /*
43 * Note: the caller has already checked that off is
44 * suitably aligned and that len is 1, 2 or 4.
45 */
46 cfg_data = hose->cfg_data + ((bus->number<<16) | (devfn<<8) | off);
47 switch (len) {
48 case 1:
49 *val = in_8(cfg_data);
50 break;
51 case 2:
52 *val = in_le16(cfg_data);
53 break;
54 default:
55 *val = in_le32(cfg_data);
56 break;
57 }
58 return PCIBIOS_SUCCESSFUL;
59}
60
61int gg2_write_config(struct pci_bus *bus, unsigned int devfn, int off,
62 int len, u32 val)
63{
64 volatile void __iomem *cfg_data;
65 struct pci_controller *hose = bus->sysdata;
66
67 if (bus->number > 7)
68 return PCIBIOS_DEVICE_NOT_FOUND;
69 /*
70 * Note: the caller has already checked that off is
71 * suitably aligned and that len is 1, 2 or 4.
72 */
73 cfg_data = hose->cfg_data + ((bus->number<<16) | (devfn<<8) | off);
74 switch (len) {
75 case 1:
76 out_8(cfg_data, val);
77 break;
78 case 2:
79 out_le16(cfg_data, val);
80 break;
81 default:
82 out_le32(cfg_data, val);
83 break;
84 }
85 return PCIBIOS_SUCCESSFUL;
86}
87
88static struct pci_ops gg2_pci_ops =
89{
90 gg2_read_config,
91 gg2_write_config
92};
93
94/*
95 * Access functions for PCI config space using RTAS calls.
96 */
97int rtas_read_config(struct pci_bus *bus, unsigned int devfn, int offset,
98 int len, u32 *val)
99{
100 struct pci_controller *hose = bus->sysdata;
101 unsigned long addr = (offset & 0xff) | ((devfn & 0xff) << 8)
102 | (((bus->number - hose->first_busno) & 0xff) << 16)
103 | (hose->index << 24);
104 int ret = -1;
105 int rval;
106
107 rval = rtas_call(rtas_token("read-pci-config"), 2, 2, &ret, addr, len);
108 *val = ret;
109 return rval? PCIBIOS_DEVICE_NOT_FOUND: PCIBIOS_SUCCESSFUL;
110}
111
112int rtas_write_config(struct pci_bus *bus, unsigned int devfn, int offset,
113 int len, u32 val)
114{
115 struct pci_controller *hose = bus->sysdata;
116 unsigned long addr = (offset & 0xff) | ((devfn & 0xff) << 8)
117 | (((bus->number - hose->first_busno) & 0xff) << 16)
118 | (hose->index << 24);
119 int rval;
120
121 rval = rtas_call(rtas_token("write-pci-config"), 3, 1, NULL,
122 addr, len, val);
123 return rval? PCIBIOS_DEVICE_NOT_FOUND: PCIBIOS_SUCCESSFUL;
124}
125
126static struct pci_ops rtas_pci_ops =
127{
128 rtas_read_config,
129 rtas_write_config
130};
131
132volatile struct Hydra __iomem *Hydra = NULL;
133
134int __init
135hydra_init(void)
136{
137 struct device_node *np;
138
139 np = find_devices("mac-io");
140 if (np == NULL || np->n_addrs == 0)
141 return 0;
142 Hydra = ioremap(np->addrs[0].address, np->addrs[0].size);
143 printk("Hydra Mac I/O at %lx\n", np->addrs[0].address);
144 printk("Hydra Feature_Control was %x",
145 in_le32(&Hydra->Feature_Control));
146 out_le32(&Hydra->Feature_Control, (HYDRA_FC_SCC_CELL_EN |
147 HYDRA_FC_SCSI_CELL_EN |
148 HYDRA_FC_SCCA_ENABLE |
149 HYDRA_FC_SCCB_ENABLE |
150 HYDRA_FC_ARB_BYPASS |
151 HYDRA_FC_MPIC_ENABLE |
152 HYDRA_FC_SLOW_SCC_PCLK |
153 HYDRA_FC_MPIC_IS_MASTER));
154 printk(", now %x\n", in_le32(&Hydra->Feature_Control));
155 return 1;
156}
157
158void __init
159chrp_pcibios_fixup(void)
160{
161 struct pci_dev *dev = NULL;
162 struct device_node *np;
163
164 /* PCI interrupts are controlled by the OpenPIC */
165 for_each_pci_dev(dev) {
166 np = pci_device_to_OF_node(dev);
167 if ((np != 0) && (np->n_intrs > 0) && (np->intrs[0].line != 0))
168 dev->irq = np->intrs[0].line;
169 pci_write_config_byte(dev, PCI_INTERRUPT_LINE, dev->irq);
170 }
171}
172
173#define PRG_CL_RESET_VALID 0x00010000
174
175static void __init
176setup_python(struct pci_controller *hose, struct device_node *dev)
177{
178 u32 __iomem *reg;
179 u32 val;
180 unsigned long addr = dev->addrs[0].address;
181
182 setup_indirect_pci(hose, addr + 0xf8000, addr + 0xf8010);
183
184 /* Clear the magic go-slow bit */
185 reg = ioremap(dev->addrs[0].address + 0xf6000, 0x40);
186 val = in_be32(&reg[12]);
187 if (val & PRG_CL_RESET_VALID) {
188 out_be32(&reg[12], val & ~PRG_CL_RESET_VALID);
189 in_be32(&reg[12]);
190 }
191 iounmap(reg);
192}
193
194/* Marvell Discovery II based Pegasos 2 */
195static void __init setup_peg2(struct pci_controller *hose, struct device_node *dev)
196{
197 struct device_node *root = find_path_device("/");
198 struct device_node *rtas;
199
200 rtas = of_find_node_by_name (root, "rtas");
201 if (rtas) {
202 hose->ops = &rtas_pci_ops;
203 } else {
204 printk ("RTAS supporting Pegasos OF not found, please upgrade"
205 " your firmware\n");
206 }
207 pci_assign_all_buses = 1;
208}
209
210void __init
211chrp_find_bridges(void)
212{
213 struct device_node *dev;
214 int *bus_range;
215 int len, index = -1;
216 struct pci_controller *hose;
217 unsigned int *dma;
218 char *model, *machine;
219 int is_longtrail = 0, is_mot = 0, is_pegasos = 0;
220 struct device_node *root = find_path_device("/");
221
222 /*
223 * The PCI host bridge nodes on some machines don't have
224 * properties to adequately identify them, so we have to
225 * look at what sort of machine this is as well.
226 */
227 machine = get_property(root, "model", NULL);
228 if (machine != NULL) {
229 is_longtrail = strncmp(machine, "IBM,LongTrail", 13) == 0;
230 is_mot = strncmp(machine, "MOT", 3) == 0;
231 if (strncmp(machine, "Pegasos2", 8) == 0)
232 is_pegasos = 2;
233 else if (strncmp(machine, "Pegasos", 7) == 0)
234 is_pegasos = 1;
235 }
236 for (dev = root->child; dev != NULL; dev = dev->sibling) {
237 if (dev->type == NULL || strcmp(dev->type, "pci") != 0)
238 continue;
239 ++index;
240 /* The GG2 bridge on the LongTrail doesn't have an address */
241 if (dev->n_addrs < 1 && !is_longtrail) {
242 printk(KERN_WARNING "Can't use %s: no address\n",
243 dev->full_name);
244 continue;
245 }
246 bus_range = (int *) get_property(dev, "bus-range", &len);
247 if (bus_range == NULL || len < 2 * sizeof(int)) {
248 printk(KERN_WARNING "Can't get bus-range for %s\n",
249 dev->full_name);
250 continue;
251 }
252 if (bus_range[1] == bus_range[0])
253 printk(KERN_INFO "PCI bus %d", bus_range[0]);
254 else
255 printk(KERN_INFO "PCI buses %d..%d",
256 bus_range[0], bus_range[1]);
257 printk(" controlled by %s", dev->type);
258 if (dev->n_addrs > 0)
259 printk(" at %lx", dev->addrs[0].address);
260 printk("\n");
261
262 hose = pcibios_alloc_controller();
263 if (!hose) {
264 printk("Can't allocate PCI controller structure for %s\n",
265 dev->full_name);
266 continue;
267 }
268 hose->arch_data = dev;
269 hose->first_busno = bus_range[0];
270 hose->last_busno = bus_range[1];
271
272 model = get_property(dev, "model", NULL);
273 if (model == NULL)
274 model = "<none>";
275 if (device_is_compatible(dev, "IBM,python")) {
276 setup_python(hose, dev);
277 } else if (is_mot
278 || strncmp(model, "Motorola, Grackle", 17) == 0) {
279 setup_grackle(hose);
280 } else if (is_longtrail) {
281 void __iomem *p = ioremap(GG2_PCI_CONFIG_BASE, 0x80000);
282 hose->ops = &gg2_pci_ops;
283 hose->cfg_data = p;
284 gg2_pci_config_base = p;
285 } else if (is_pegasos == 1) {
286 setup_indirect_pci(hose, 0xfec00cf8, 0xfee00cfc);
287 } else if (is_pegasos == 2) {
288 setup_peg2(hose, dev);
289 } else {
290 printk("No methods for %s (model %s), using RTAS\n",
291 dev->full_name, model);
292 hose->ops = &rtas_pci_ops;
293 }
294
295 pci_process_bridge_OF_ranges(hose, dev, index == 0);
296
297 /* check the first bridge for a property that we can
298 use to set pci_dram_offset */
299 dma = (unsigned int *)
300 get_property(dev, "ibm,dma-ranges", &len);
301 if (index == 0 && dma != NULL && len >= 6 * sizeof(*dma)) {
302 pci_dram_offset = dma[2] - dma[3];
303 printk("pci_dram_offset = %lx\n", pci_dram_offset);
304 }
305 }
306
307 /* Do not fixup interrupts from OF tree on pegasos */
308 if (is_pegasos == 0)
309 ppc_md.pcibios_fixup = chrp_pcibios_fixup;
310}
diff --git a/arch/powerpc/platforms/chrp/pegasos_eth.c b/arch/powerpc/platforms/chrp/pegasos_eth.c
new file mode 100644
index 000000000000..a9052305c35d
--- /dev/null
+++ b/arch/powerpc/platforms/chrp/pegasos_eth.c
@@ -0,0 +1,213 @@
1/*
2 * arch/ppc/platforms/chrp_pegasos_eth.c
3 *
4 * Copyright (C) 2005 Sven Luther <sl@bplan-gmbh.de>
5 * Thanks to :
6 * Dale Farnsworth <dale@farnsworth.org>
7 * Mark A. Greer <mgreer@mvista.com>
8 * Nicolas DET <nd@bplan-gmbh.de>
9 * Benjamin Herrenschmidt <benh@kernel.crashing.org>
10 * And anyone else who helped me on this.
11 */
12
13#include <linux/types.h>
14#include <linux/init.h>
15#include <linux/ioport.h>
16#include <linux/device.h>
17#include <linux/mv643xx.h>
18#include <linux/pci.h>
19
20#define PEGASOS2_MARVELL_REGBASE (0xf1000000)
21#define PEGASOS2_MARVELL_REGSIZE (0x00004000)
22#define PEGASOS2_SRAM_BASE (0xf2000000)
23#define PEGASOS2_SRAM_SIZE (256*1024)
24
25#define PEGASOS2_SRAM_BASE_ETH0 (PEGASOS2_SRAM_BASE)
26#define PEGASOS2_SRAM_BASE_ETH1 (PEGASOS2_SRAM_BASE_ETH0 + (PEGASOS2_SRAM_SIZE / 2) )
27
28
29#define PEGASOS2_SRAM_RXRING_SIZE (PEGASOS2_SRAM_SIZE/4)
30#define PEGASOS2_SRAM_TXRING_SIZE (PEGASOS2_SRAM_SIZE/4)
31
32#undef BE_VERBOSE
33
34static struct resource mv643xx_eth_shared_resources[] = {
35 [0] = {
36 .name = "ethernet shared base",
37 .start = 0xf1000000 + MV643XX_ETH_SHARED_REGS,
38 .end = 0xf1000000 + MV643XX_ETH_SHARED_REGS +
39 MV643XX_ETH_SHARED_REGS_SIZE - 1,
40 .flags = IORESOURCE_MEM,
41 },
42};
43
44static struct platform_device mv643xx_eth_shared_device = {
45 .name = MV643XX_ETH_SHARED_NAME,
46 .id = 0,
47 .num_resources = ARRAY_SIZE(mv643xx_eth_shared_resources),
48 .resource = mv643xx_eth_shared_resources,
49};
50
51static struct resource mv643xx_eth0_resources[] = {
52 [0] = {
53 .name = "eth0 irq",
54 .start = 9,
55 .end = 9,
56 .flags = IORESOURCE_IRQ,
57 },
58};
59
60
61static struct mv643xx_eth_platform_data eth0_pd = {
62 .tx_sram_addr = PEGASOS2_SRAM_BASE_ETH0,
63 .tx_sram_size = PEGASOS2_SRAM_TXRING_SIZE,
64 .tx_queue_size = PEGASOS2_SRAM_TXRING_SIZE/16,
65
66 .rx_sram_addr = PEGASOS2_SRAM_BASE_ETH0 + PEGASOS2_SRAM_TXRING_SIZE,
67 .rx_sram_size = PEGASOS2_SRAM_RXRING_SIZE,
68 .rx_queue_size = PEGASOS2_SRAM_RXRING_SIZE/16,
69};
70
71static struct platform_device eth0_device = {
72 .name = MV643XX_ETH_NAME,
73 .id = 0,
74 .num_resources = ARRAY_SIZE(mv643xx_eth0_resources),
75 .resource = mv643xx_eth0_resources,
76 .dev = {
77 .platform_data = &eth0_pd,
78 },
79};
80
81static struct resource mv643xx_eth1_resources[] = {
82 [0] = {
83 .name = "eth1 irq",
84 .start = 9,
85 .end = 9,
86 .flags = IORESOURCE_IRQ,
87 },
88};
89
90static struct mv643xx_eth_platform_data eth1_pd = {
91 .tx_sram_addr = PEGASOS2_SRAM_BASE_ETH1,
92 .tx_sram_size = PEGASOS2_SRAM_TXRING_SIZE,
93 .tx_queue_size = PEGASOS2_SRAM_TXRING_SIZE/16,
94
95 .rx_sram_addr = PEGASOS2_SRAM_BASE_ETH1 + PEGASOS2_SRAM_TXRING_SIZE,
96 .rx_sram_size = PEGASOS2_SRAM_RXRING_SIZE,
97 .rx_queue_size = PEGASOS2_SRAM_RXRING_SIZE/16,
98};
99
100static struct platform_device eth1_device = {
101 .name = MV643XX_ETH_NAME,
102 .id = 1,
103 .num_resources = ARRAY_SIZE(mv643xx_eth1_resources),
104 .resource = mv643xx_eth1_resources,
105 .dev = {
106 .platform_data = &eth1_pd,
107 },
108};
109
110static struct platform_device *mv643xx_eth_pd_devs[] __initdata = {
111 &mv643xx_eth_shared_device,
112 &eth0_device,
113 &eth1_device,
114};
115
116/***********/
117/***********/
118#define MV_READ(offset,val) { val = readl(mv643xx_reg_base + offset); }
119#define MV_WRITE(offset,data) writel(data, mv643xx_reg_base + offset)
120
121static void __iomem *mv643xx_reg_base;
122
123static int Enable_SRAM(void)
124{
125 u32 ALong;
126
127 if (mv643xx_reg_base == NULL)
128 mv643xx_reg_base = ioremap(PEGASOS2_MARVELL_REGBASE,
129 PEGASOS2_MARVELL_REGSIZE);
130
131 if (mv643xx_reg_base == NULL)
132 return -ENOMEM;
133
134#ifdef BE_VERBOSE
135 printk("Pegasos II/Marvell MV64361: register remapped from %p to %p\n",
136 (void *)PEGASOS2_MARVELL_REGBASE, (void *)mv643xx_reg_base);
137#endif
138
139 MV_WRITE(MV64340_SRAM_CONFIG, 0);
140
141 MV_WRITE(MV64340_INTEGRATED_SRAM_BASE_ADDR, PEGASOS2_SRAM_BASE >> 16);
142
143 MV_READ(MV64340_BASE_ADDR_ENABLE, ALong);
144 ALong &= ~(1 << 19);
145 MV_WRITE(MV64340_BASE_ADDR_ENABLE, ALong);
146
147 ALong = 0x02;
148 ALong |= PEGASOS2_SRAM_BASE & 0xffff0000;
149 MV_WRITE(MV643XX_ETH_BAR_4, ALong);
150
151 MV_WRITE(MV643XX_ETH_SIZE_REG_4, (PEGASOS2_SRAM_SIZE-1) & 0xffff0000);
152
153 MV_READ(MV643XX_ETH_BASE_ADDR_ENABLE_REG, ALong);
154 ALong &= ~(1 << 4);
155 MV_WRITE(MV643XX_ETH_BASE_ADDR_ENABLE_REG, ALong);
156
157#ifdef BE_VERBOSE
158 printk("Pegasos II/Marvell MV64361: register unmapped\n");
159 printk("Pegasos II/Marvell MV64361: SRAM at %p, size=%x\n", (void*) PEGASOS2_SRAM_BASE, PEGASOS2_SRAM_SIZE);
160#endif
161
162 iounmap(mv643xx_reg_base);
163 mv643xx_reg_base = NULL;
164
165 return 1;
166}
167
168
169/***********/
170/***********/
171int mv643xx_eth_add_pds(void)
172{
173 int ret = 0;
174 static struct pci_device_id pci_marvell_mv64360[] = {
175 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, PCI_DEVICE_ID_MARVELL_MV64360) },
176 { }
177 };
178
179#ifdef BE_VERBOSE
180 printk("Pegasos II/Marvell MV64361: init\n");
181#endif
182
183 if (pci_dev_present(pci_marvell_mv64360)) {
184 ret = platform_add_devices(mv643xx_eth_pd_devs,
185 ARRAY_SIZE(mv643xx_eth_pd_devs));
186
187 if ( Enable_SRAM() < 0)
188 {
189 eth0_pd.tx_sram_addr = 0;
190 eth0_pd.tx_sram_size = 0;
191 eth0_pd.rx_sram_addr = 0;
192 eth0_pd.rx_sram_size = 0;
193
194 eth1_pd.tx_sram_addr = 0;
195 eth1_pd.tx_sram_size = 0;
196 eth1_pd.rx_sram_addr = 0;
197 eth1_pd.rx_sram_size = 0;
198
199#ifdef BE_VERBOSE
200 printk("Pegasos II/Marvell MV64361: Can't enable the "
201 "SRAM\n");
202#endif
203 }
204 }
205
206#ifdef BE_VERBOSE
207 printk("Pegasos II/Marvell MV64361: init is over\n");
208#endif
209
210 return ret;
211}
212
213device_initcall(mv643xx_eth_add_pds);
diff --git a/arch/powerpc/platforms/chrp/setup.c b/arch/powerpc/platforms/chrp/setup.c
new file mode 100644
index 000000000000..ecd32d5d85f4
--- /dev/null
+++ b/arch/powerpc/platforms/chrp/setup.c
@@ -0,0 +1,522 @@
1/*
2 * arch/ppc/platforms/setup.c
3 *
4 * Copyright (C) 1995 Linus Torvalds
5 * Adapted from 'alpha' version by Gary Thomas
6 * Modified by Cort Dougan (cort@cs.nmt.edu)
7 */
8
9/*
10 * bootup setup stuff..
11 */
12
13#include <linux/config.h>
14#include <linux/errno.h>
15#include <linux/sched.h>
16#include <linux/kernel.h>
17#include <linux/mm.h>
18#include <linux/stddef.h>
19#include <linux/unistd.h>
20#include <linux/ptrace.h>
21#include <linux/slab.h>
22#include <linux/user.h>
23#include <linux/a.out.h>
24#include <linux/tty.h>
25#include <linux/major.h>
26#include <linux/interrupt.h>
27#include <linux/reboot.h>
28#include <linux/init.h>
29#include <linux/pci.h>
30#include <linux/version.h>
31#include <linux/adb.h>
32#include <linux/module.h>
33#include <linux/delay.h>
34#include <linux/ide.h>
35#include <linux/console.h>
36#include <linux/seq_file.h>
37#include <linux/root_dev.h>
38#include <linux/initrd.h>
39#include <linux/module.h>
40
41#include <asm/io.h>
42#include <asm/pgtable.h>
43#include <asm/prom.h>
44#include <asm/gg2.h>
45#include <asm/pci-bridge.h>
46#include <asm/dma.h>
47#include <asm/machdep.h>
48#include <asm/irq.h>
49#include <asm/hydra.h>
50#include <asm/sections.h>
51#include <asm/time.h>
52#include <asm/btext.h>
53#include <asm/i8259.h>
54#include <asm/mpic.h>
55#include <asm/rtas.h>
56#include <asm/xmon.h>
57
58#include "chrp.h"
59
60void rtas_indicator_progress(char *, unsigned short);
61void btext_progress(char *, unsigned short);
62
63int _chrp_type;
64EXPORT_SYMBOL(_chrp_type);
65
66struct mpic *chrp_mpic;
67
68/*
69 * XXX this should be in xmon.h, but putting it there means xmon.h
70 * has to include <linux/interrupt.h> (to get irqreturn_t), which
71 * causes all sorts of problems. -- paulus
72 */
73extern irqreturn_t xmon_irq(int, void *, struct pt_regs *);
74
75extern unsigned long loops_per_jiffy;
76
77#ifdef CONFIG_SMP
78extern struct smp_ops_t chrp_smp_ops;
79#endif
80
81static const char *gg2_memtypes[4] = {
82 "FPM", "SDRAM", "EDO", "BEDO"
83};
84static const char *gg2_cachesizes[4] = {
85 "256 KB", "512 KB", "1 MB", "Reserved"
86};
87static const char *gg2_cachetypes[4] = {
88 "Asynchronous", "Reserved", "Flow-Through Synchronous",
89 "Pipelined Synchronous"
90};
91static const char *gg2_cachemodes[4] = {
92 "Disabled", "Write-Through", "Copy-Back", "Transparent Mode"
93};
94
95void chrp_show_cpuinfo(struct seq_file *m)
96{
97 int i, sdramen;
98 unsigned int t;
99 struct device_node *root;
100 const char *model = "";
101
102 root = find_path_device("/");
103 if (root)
104 model = get_property(root, "model", NULL);
105 seq_printf(m, "machine\t\t: CHRP %s\n", model);
106
107 /* longtrail (goldengate) stuff */
108 if (!strncmp(model, "IBM,LongTrail", 13)) {
109 /* VLSI VAS96011/12 `Golden Gate 2' */
110 /* Memory banks */
111 sdramen = (in_le32(gg2_pci_config_base + GG2_PCI_DRAM_CTRL)
112 >>31) & 1;
113 for (i = 0; i < (sdramen ? 4 : 6); i++) {
114 t = in_le32(gg2_pci_config_base+
115 GG2_PCI_DRAM_BANK0+
116 i*4);
117 if (!(t & 1))
118 continue;
119 switch ((t>>8) & 0x1f) {
120 case 0x1f:
121 model = "4 MB";
122 break;
123 case 0x1e:
124 model = "8 MB";
125 break;
126 case 0x1c:
127 model = "16 MB";
128 break;
129 case 0x18:
130 model = "32 MB";
131 break;
132 case 0x10:
133 model = "64 MB";
134 break;
135 case 0x00:
136 model = "128 MB";
137 break;
138 default:
139 model = "Reserved";
140 break;
141 }
142 seq_printf(m, "memory bank %d\t: %s %s\n", i, model,
143 gg2_memtypes[sdramen ? 1 : ((t>>1) & 3)]);
144 }
145 /* L2 cache */
146 t = in_le32(gg2_pci_config_base+GG2_PCI_CC_CTRL);
147 seq_printf(m, "board l2\t: %s %s (%s)\n",
148 gg2_cachesizes[(t>>7) & 3],
149 gg2_cachetypes[(t>>2) & 3],
150 gg2_cachemodes[t & 3]);
151 }
152}
153
154/*
155 * Fixes for the National Semiconductor PC78308VUL SuperI/O
156 *
157 * Some versions of Open Firmware incorrectly initialize the IRQ settings
158 * for keyboard and mouse
159 */
160static inline void __init sio_write(u8 val, u8 index)
161{
162 outb(index, 0x15c);
163 outb(val, 0x15d);
164}
165
166static inline u8 __init sio_read(u8 index)
167{
168 outb(index, 0x15c);
169 return inb(0x15d);
170}
171
172static void __init sio_fixup_irq(const char *name, u8 device, u8 level,
173 u8 type)
174{
175 u8 level0, type0, active;
176
177 /* select logical device */
178 sio_write(device, 0x07);
179 active = sio_read(0x30);
180 level0 = sio_read(0x70);
181 type0 = sio_read(0x71);
182 if (level0 != level || type0 != type || !active) {
183 printk(KERN_WARNING "sio: %s irq level %d, type %d, %sactive: "
184 "remapping to level %d, type %d, active\n",
185 name, level0, type0, !active ? "in" : "", level, type);
186 sio_write(0x01, 0x30);
187 sio_write(level, 0x70);
188 sio_write(type, 0x71);
189 }
190}
191
192static void __init sio_init(void)
193{
194 struct device_node *root;
195
196 if ((root = find_path_device("/")) &&
197 !strncmp(get_property(root, "model", NULL), "IBM,LongTrail", 13)) {
198 /* logical device 0 (KBC/Keyboard) */
199 sio_fixup_irq("keyboard", 0, 1, 2);
200 /* select logical device 1 (KBC/Mouse) */
201 sio_fixup_irq("mouse", 1, 12, 2);
202 }
203}
204
205
206static void __init pegasos_set_l2cr(void)
207{
208 struct device_node *np;
209
210 /* On Pegasos, enable the l2 cache if needed, as the OF forgets it */
211 if (_chrp_type != _CHRP_Pegasos)
212 return;
213
214 /* Enable L2 cache if needed */
215 np = find_type_devices("cpu");
216 if (np != NULL) {
217 unsigned int *l2cr = (unsigned int *)
218 get_property (np, "l2cr", NULL);
219 if (l2cr == NULL) {
220 printk ("Pegasos l2cr : no cpu l2cr property found\n");
221 return;
222 }
223 if (!((*l2cr) & 0x80000000)) {
224 printk ("Pegasos l2cr : L2 cache was not active, "
225 "activating\n");
226 _set_L2CR(0);
227 _set_L2CR((*l2cr) | 0x80000000);
228 }
229 }
230}
231
232void __init chrp_setup_arch(void)
233{
234 struct device_node *root = find_path_device ("/");
235 char *machine = NULL;
236 struct device_node *device;
237 unsigned int *p = NULL;
238
239 /* init to some ~sane value until calibrate_delay() runs */
240 loops_per_jiffy = 50000000/HZ;
241
242 if (root)
243 machine = get_property(root, "model", NULL);
244 if (machine && strncmp(machine, "Pegasos", 7) == 0) {
245 _chrp_type = _CHRP_Pegasos;
246 } else if (machine && strncmp(machine, "IBM", 3) == 0) {
247 _chrp_type = _CHRP_IBM;
248 } else if (machine && strncmp(machine, "MOT", 3) == 0) {
249 _chrp_type = _CHRP_Motorola;
250 } else {
251 /* Let's assume it is an IBM chrp if all else fails */
252 _chrp_type = _CHRP_IBM;
253 }
254 printk("chrp type = %x\n", _chrp_type);
255
256 rtas_initialize();
257 if (rtas_token("display-character") >= 0)
258 ppc_md.progress = rtas_progress;
259
260#ifdef CONFIG_BOOTX_TEXT
261 if (ppc_md.progress == NULL && boot_text_mapped)
262 ppc_md.progress = btext_progress;
263#endif
264
265#ifdef CONFIG_BLK_DEV_INITRD
266 /* this is fine for chrp */
267 initrd_below_start_ok = 1;
268
269 if (initrd_start)
270 ROOT_DEV = Root_RAM0;
271 else
272#endif
273 ROOT_DEV = Root_SDA2; /* sda2 (sda1 is for the kernel) */
274
275 /* On pegasos, enable the L2 cache if not already done by OF */
276 pegasos_set_l2cr();
277
278 /* Lookup PCI host bridges */
279 chrp_find_bridges();
280
281 /*
282 * Temporary fixes for PCI devices.
283 * -- Geert
284 */
285 hydra_init(); /* Mac I/O */
286
287 /*
288 * Fix the Super I/O configuration
289 */
290 sio_init();
291
292 /* Get the event scan rate for the rtas so we know how
293 * often it expects a heartbeat. -- Cort
294 */
295 device = find_devices("rtas");
296 if (device)
297 p = (unsigned int *) get_property
298 (device, "rtas-event-scan-rate", NULL);
299 if (p && *p) {
300 ppc_md.heartbeat = chrp_event_scan;
301 ppc_md.heartbeat_reset = HZ / (*p * 30) - 1;
302 ppc_md.heartbeat_count = 1;
303 printk("RTAS Event Scan Rate: %u (%lu jiffies)\n",
304 *p, ppc_md.heartbeat_reset);
305 }
306
307 pci_create_OF_bus_map();
308
309 /*
310 * Print the banner, then scroll down so boot progress
311 * can be printed. -- Cort
312 */
313 if (ppc_md.progress) ppc_md.progress("Linux/PPC "UTS_RELEASE"\n", 0x0);
314}
315
316void
317chrp_event_scan(void)
318{
319 unsigned char log[1024];
320 int ret = 0;
321
322 /* XXX: we should loop until the hardware says no more error logs -- Cort */
323 rtas_call(rtas_token("event-scan"), 4, 1, &ret, 0xffffffff, 0,
324 __pa(log), 1024);
325 ppc_md.heartbeat_count = ppc_md.heartbeat_reset;
326}
327
328/*
329 * Finds the open-pic node and sets up the mpic driver.
330 */
331static void __init chrp_find_openpic(void)
332{
333 struct device_node *np, *root;
334 int len, i, j, irq_count;
335 int isu_size, idu_size;
336 unsigned int *iranges, *opprop = NULL;
337 int oplen = 0;
338 unsigned long opaddr;
339 int na = 1;
340 unsigned char init_senses[NR_IRQS - NUM_8259_INTERRUPTS];
341
342 np = find_type_devices("open-pic");
343 if (np == NULL)
344 return;
345 root = find_path_device("/");
346 if (root) {
347 opprop = (unsigned int *) get_property
348 (root, "platform-open-pic", &oplen);
349 na = prom_n_addr_cells(root);
350 }
351 if (opprop && oplen >= na * sizeof(unsigned int)) {
352 opaddr = opprop[na-1]; /* assume 32-bit */
353 oplen /= na * sizeof(unsigned int);
354 } else {
355 if (np->n_addrs == 0)
356 return;
357 opaddr = np->addrs[0].address;
358 oplen = 0;
359 }
360
361 printk(KERN_INFO "OpenPIC at %lx\n", opaddr);
362
363 irq_count = NR_IRQS - NUM_ISA_INTERRUPTS - 4; /* leave room for IPIs */
364 prom_get_irq_senses(init_senses, NUM_8259_INTERRUPTS, NR_IRQS - 4);
365
366 iranges = (unsigned int *) get_property(np, "interrupt-ranges", &len);
367 if (iranges == NULL)
368 len = 0; /* non-distributed mpic */
369 else
370 len /= 2 * sizeof(unsigned int);
371
372 /*
373 * The first pair of cells in interrupt-ranges refers to the
374 * IDU; subsequent pairs refer to the ISUs.
375 */
376 if (oplen < len) {
377 printk(KERN_ERR "Insufficient addresses for distributed"
378 " OpenPIC (%d < %d)\n", np->n_addrs, len);
379 len = oplen;
380 }
381
382 isu_size = 0;
383 idu_size = 0;
384 if (len > 0 && iranges[1] != 0) {
385 printk(KERN_INFO "OpenPIC irqs %d..%d in IDU\n",
386 iranges[0], iranges[0] + iranges[1] - 1);
387 idu_size = iranges[1];
388 }
389 if (len > 1)
390 isu_size = iranges[3];
391
392 chrp_mpic = mpic_alloc(opaddr, MPIC_PRIMARY,
393 isu_size, NUM_ISA_INTERRUPTS, irq_count,
394 NR_IRQS - 4, init_senses, irq_count,
395 " MPIC ");
396 if (chrp_mpic == NULL) {
397 printk(KERN_ERR "Failed to allocate MPIC structure\n");
398 return;
399 }
400
401 j = na - 1;
402 for (i = 1; i < len; ++i) {
403 iranges += 2;
404 j += na;
405 printk(KERN_INFO "OpenPIC irqs %d..%d in ISU at %x\n",
406 iranges[0], iranges[0] + iranges[1] - 1,
407 opprop[j]);
408 mpic_assign_isu(chrp_mpic, i - 1, opprop[j]);
409 }
410
411 mpic_init(chrp_mpic);
412 mpic_setup_cascade(NUM_ISA_INTERRUPTS, i8259_irq_cascade, NULL);
413}
414
415#if defined(CONFIG_VT) && defined(CONFIG_INPUT_ADBHID) && defined(XMON)
416static struct irqaction xmon_irqaction = {
417 .handler = xmon_irq,
418 .mask = CPU_MASK_NONE,
419 .name = "XMON break",
420};
421#endif
422
423void __init chrp_init_IRQ(void)
424{
425 struct device_node *np;
426 unsigned long chrp_int_ack = 0;
427#if defined(CONFIG_VT) && defined(CONFIG_INPUT_ADBHID) && defined(XMON)
428 struct device_node *kbd;
429#endif
430
431 for (np = find_devices("pci"); np != NULL; np = np->next) {
432 unsigned int *addrp = (unsigned int *)
433 get_property(np, "8259-interrupt-acknowledge", NULL);
434
435 if (addrp == NULL)
436 continue;
437 chrp_int_ack = addrp[prom_n_addr_cells(np)-1];
438 break;
439 }
440 if (np == NULL)
441 printk(KERN_ERR "Cannot find PCI interrupt acknowledge address\n");
442
443 chrp_find_openpic();
444
445 i8259_init(chrp_int_ack, 0);
446
447 if (_chrp_type == _CHRP_Pegasos)
448 ppc_md.get_irq = i8259_irq;
449 else
450 ppc_md.get_irq = mpic_get_irq;
451
452#if defined(CONFIG_VT) && defined(CONFIG_INPUT_ADBHID) && defined(XMON)
453 /* see if there is a keyboard in the device tree
454 with a parent of type "adb" */
455 for (kbd = find_devices("keyboard"); kbd; kbd = kbd->next)
456 if (kbd->parent && kbd->parent->type
457 && strcmp(kbd->parent->type, "adb") == 0)
458 break;
459 if (kbd)
460 setup_irq(HYDRA_INT_ADB_NMI, &xmon_irqaction);
461#endif
462}
463
464void __init
465chrp_init2(void)
466{
467#ifdef CONFIG_NVRAM
468 chrp_nvram_init();
469#endif
470
471 request_region(0x20,0x20,"pic1");
472 request_region(0xa0,0x20,"pic2");
473 request_region(0x00,0x20,"dma1");
474 request_region(0x40,0x20,"timer");
475 request_region(0x80,0x10,"dma page reg");
476 request_region(0xc0,0x20,"dma2");
477
478 if (ppc_md.progress)
479 ppc_md.progress(" Have fun! ", 0x7777);
480}
481
482void __init chrp_init(void)
483{
484 ISA_DMA_THRESHOLD = ~0L;
485 DMA_MODE_READ = 0x44;
486 DMA_MODE_WRITE = 0x48;
487 isa_io_base = CHRP_ISA_IO_BASE; /* default value */
488 ppc_do_canonicalize_irqs = 1;
489
490 /* Assume we have an 8259... */
491 __irq_offset_value = NUM_ISA_INTERRUPTS;
492
493 ppc_md.setup_arch = chrp_setup_arch;
494 ppc_md.show_cpuinfo = chrp_show_cpuinfo;
495
496 ppc_md.init_IRQ = chrp_init_IRQ;
497 ppc_md.init = chrp_init2;
498
499 ppc_md.phys_mem_access_prot = pci_phys_mem_access_prot;
500
501 ppc_md.restart = rtas_restart;
502 ppc_md.power_off = rtas_power_off;
503 ppc_md.halt = rtas_halt;
504
505 ppc_md.time_init = chrp_time_init;
506 ppc_md.set_rtc_time = chrp_set_rtc_time;
507 ppc_md.get_rtc_time = chrp_get_rtc_time;
508 ppc_md.calibrate_decr = chrp_calibrate_decr;
509
510#ifdef CONFIG_SMP
511 smp_ops = &chrp_smp_ops;
512#endif /* CONFIG_SMP */
513}
514
515#ifdef CONFIG_BOOTX_TEXT
516void
517btext_progress(char *s, unsigned short hex)
518{
519 btext_drawstring(s);
520 btext_drawstring("\n");
521}
522#endif /* CONFIG_BOOTX_TEXT */
diff --git a/arch/powerpc/platforms/chrp/smp.c b/arch/powerpc/platforms/chrp/smp.c
new file mode 100644
index 000000000000..31ee49c25014
--- /dev/null
+++ b/arch/powerpc/platforms/chrp/smp.c
@@ -0,0 +1,122 @@
1/*
2 * Smp support for CHRP machines.
3 *
4 * Written by Cort Dougan (cort@cs.nmt.edu) borrowing a great
5 * deal of code from the sparc and intel versions.
6 *
7 * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu>
8 *
9 */
10
11#include <linux/config.h>
12#include <linux/kernel.h>
13#include <linux/sched.h>
14#include <linux/smp.h>
15#include <linux/smp_lock.h>
16#include <linux/interrupt.h>
17#include <linux/kernel_stat.h>
18#include <linux/delay.h>
19#include <linux/init.h>
20#include <linux/spinlock.h>
21
22#include <asm/ptrace.h>
23#include <asm/atomic.h>
24#include <asm/irq.h>
25#include <asm/page.h>
26#include <asm/pgtable.h>
27#include <asm/sections.h>
28#include <asm/io.h>
29#include <asm/prom.h>
30#include <asm/smp.h>
31#include <asm/residual.h>
32#include <asm/time.h>
33#include <asm/open_pic.h>
34#include <asm/machdep.h>
35#include <asm/smp.h>
36#include <asm/mpic.h>
37
38extern unsigned long smp_chrp_cpu_nr;
39
40static int __init smp_chrp_probe(void)
41{
42 struct device_node *cpus = NULL;
43 unsigned int *reg;
44 int reglen;
45 int ncpus = 0;
46 int cpuid;
47 unsigned int phys;
48
49 /* Count CPUs in the device-tree */
50 cpuid = 1; /* the boot cpu is logical cpu 0 */
51 while ((cpus = of_find_node_by_type(cpus, "cpu")) != NULL) {
52 phys = ncpus;
53 reg = (unsigned int *) get_property(cpus, "reg", &reglen);
54 if (reg && reglen >= sizeof(unsigned int))
55 /* hmmm, not having a reg property would be bad */
56 phys = *reg;
57 if (phys != boot_cpuid_phys) {
58 set_hard_smp_processor_id(cpuid, phys);
59 ++cpuid;
60 }
61 ++ncpus;
62 }
63
64 printk(KERN_INFO "CHRP SMP probe found %d cpus\n", ncpus);
65
66 /* Nothing more to do if less than 2 of them */
67 if (ncpus <= 1)
68 return 1;
69
70 mpic_request_ipis();
71
72 return ncpus;
73}
74
75static void __devinit smp_chrp_kick_cpu(int nr)
76{
77 *(unsigned long *)KERNELBASE = nr;
78 asm volatile("dcbf 0,%0"::"r"(KERNELBASE):"memory");
79}
80
81static void __devinit smp_chrp_setup_cpu(int cpu_nr)
82{
83 mpic_setup_this_cpu();
84}
85
86static DEFINE_SPINLOCK(timebase_lock);
87static unsigned int timebase_upper = 0, timebase_lower = 0;
88
89void __devinit smp_chrp_give_timebase(void)
90{
91 spin_lock(&timebase_lock);
92 rtas_call(rtas_token("freeze-time-base"), 0, 1, NULL);
93 timebase_upper = get_tbu();
94 timebase_lower = get_tbl();
95 spin_unlock(&timebase_lock);
96
97 while (timebase_upper || timebase_lower)
98 barrier();
99 rtas_call(rtas_token("thaw-time-base"), 0, 1, NULL);
100}
101
102void __devinit smp_chrp_take_timebase(void)
103{
104 while (!(timebase_upper || timebase_lower))
105 barrier();
106 spin_lock(&timebase_lock);
107 set_tb(timebase_upper, timebase_lower);
108 timebase_upper = 0;
109 timebase_lower = 0;
110 spin_unlock(&timebase_lock);
111 printk("CPU %i taken timebase\n", smp_processor_id());
112}
113
114/* CHRP with openpic */
115struct smp_ops_t chrp_smp_ops = {
116 .message_pass = smp_mpic_message_pass,
117 .probe = smp_chrp_probe,
118 .kick_cpu = smp_chrp_kick_cpu,
119 .setup_cpu = smp_chrp_setup_cpu,
120 .give_timebase = smp_chrp_give_timebase,
121 .take_timebase = smp_chrp_take_timebase,
122};
diff --git a/arch/powerpc/platforms/chrp/time.c b/arch/powerpc/platforms/chrp/time.c
new file mode 100644
index 000000000000..9e53535ddb82
--- /dev/null
+++ b/arch/powerpc/platforms/chrp/time.c
@@ -0,0 +1,188 @@
1/*
2 * arch/ppc/platforms/chrp_time.c
3 *
4 * Copyright (C) 1991, 1992, 1995 Linus Torvalds
5 *
6 * Adapted for PowerPC (PReP) by Gary Thomas
7 * Modified by Cort Dougan (cort@cs.nmt.edu).
8 * Copied and modified from arch/i386/kernel/time.c
9 *
10 */
11#include <linux/errno.h>
12#include <linux/sched.h>
13#include <linux/kernel.h>
14#include <linux/param.h>
15#include <linux/string.h>
16#include <linux/mm.h>
17#include <linux/interrupt.h>
18#include <linux/time.h>
19#include <linux/timex.h>
20#include <linux/kernel_stat.h>
21#include <linux/mc146818rtc.h>
22#include <linux/init.h>
23#include <linux/bcd.h>
24
25#include <asm/io.h>
26#include <asm/nvram.h>
27#include <asm/prom.h>
28#include <asm/sections.h>
29#include <asm/time.h>
30
31extern spinlock_t rtc_lock;
32
33static int nvram_as1 = NVRAM_AS1;
34static int nvram_as0 = NVRAM_AS0;
35static int nvram_data = NVRAM_DATA;
36
37long __init chrp_time_init(void)
38{
39 struct device_node *rtcs;
40 int base;
41
42 rtcs = find_compatible_devices("rtc", "pnpPNP,b00");
43 if (rtcs == NULL)
44 rtcs = find_compatible_devices("rtc", "ds1385-rtc");
45 if (rtcs == NULL || rtcs->addrs == NULL)
46 return 0;
47 base = rtcs->addrs[0].address;
48 nvram_as1 = 0;
49 nvram_as0 = base;
50 nvram_data = base + 1;
51
52 return 0;
53}
54
55int chrp_cmos_clock_read(int addr)
56{
57 if (nvram_as1 != 0)
58 outb(addr>>8, nvram_as1);
59 outb(addr, nvram_as0);
60 return (inb(nvram_data));
61}
62
63void chrp_cmos_clock_write(unsigned long val, int addr)
64{
65 if (nvram_as1 != 0)
66 outb(addr>>8, nvram_as1);
67 outb(addr, nvram_as0);
68 outb(val, nvram_data);
69 return;
70}
71
72/*
73 * Set the hardware clock. -- Cort
74 */
75int chrp_set_rtc_time(struct rtc_time *tmarg)
76{
77 unsigned char save_control, save_freq_select;
78 struct rtc_time tm = *tmarg;
79
80 spin_lock(&rtc_lock);
81
82 save_control = chrp_cmos_clock_read(RTC_CONTROL); /* tell the clock it's being set */
83
84 chrp_cmos_clock_write((save_control|RTC_SET), RTC_CONTROL);
85
86 save_freq_select = chrp_cmos_clock_read(RTC_FREQ_SELECT); /* stop and reset prescaler */
87
88 chrp_cmos_clock_write((save_freq_select|RTC_DIV_RESET2), RTC_FREQ_SELECT);
89
90 tm.tm_year -= 1900;
91 if (!(save_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD) {
92 BIN_TO_BCD(tm.tm_sec);
93 BIN_TO_BCD(tm.tm_min);
94 BIN_TO_BCD(tm.tm_hour);
95 BIN_TO_BCD(tm.tm_mon);
96 BIN_TO_BCD(tm.tm_mday);
97 BIN_TO_BCD(tm.tm_year);
98 }
99 chrp_cmos_clock_write(tm.tm_sec,RTC_SECONDS);
100 chrp_cmos_clock_write(tm.tm_min,RTC_MINUTES);
101 chrp_cmos_clock_write(tm.tm_hour,RTC_HOURS);
102 chrp_cmos_clock_write(tm.tm_mon,RTC_MONTH);
103 chrp_cmos_clock_write(tm.tm_mday,RTC_DAY_OF_MONTH);
104 chrp_cmos_clock_write(tm.tm_year,RTC_YEAR);
105
106 /* The following flags have to be released exactly in this order,
107 * otherwise the DS12887 (popular MC146818A clone with integrated
108 * battery and quartz) will not reset the oscillator and will not
109 * update precisely 500 ms later. You won't find this mentioned in
110 * the Dallas Semiconductor data sheets, but who believes data
111 * sheets anyway ... -- Markus Kuhn
112 */
113 chrp_cmos_clock_write(save_control, RTC_CONTROL);
114 chrp_cmos_clock_write(save_freq_select, RTC_FREQ_SELECT);
115
116 spin_unlock(&rtc_lock);
117 return 0;
118}
119
120void chrp_get_rtc_time(struct rtc_time *tm)
121{
122 unsigned int year, mon, day, hour, min, sec;
123 int uip, i;
124
125 /* The Linux interpretation of the CMOS clock register contents:
126 * When the Update-In-Progress (UIP) flag goes from 1 to 0, the
127 * RTC registers show the second which has precisely just started.
128 * Let's hope other operating systems interpret the RTC the same way.
129 */
130
131 /* Since the UIP flag is set for about 2.2 ms and the clock
132 * is typically written with a precision of 1 jiffy, trying
133 * to obtain a precision better than a few milliseconds is
134 * an illusion. Only consistency is interesting, this also
135 * allows to use the routine for /dev/rtc without a potential
136 * 1 second kernel busy loop triggered by any reader of /dev/rtc.
137 */
138
139 for ( i = 0; i<1000000; i++) {
140 uip = chrp_cmos_clock_read(RTC_FREQ_SELECT);
141 sec = chrp_cmos_clock_read(RTC_SECONDS);
142 min = chrp_cmos_clock_read(RTC_MINUTES);
143 hour = chrp_cmos_clock_read(RTC_HOURS);
144 day = chrp_cmos_clock_read(RTC_DAY_OF_MONTH);
145 mon = chrp_cmos_clock_read(RTC_MONTH);
146 year = chrp_cmos_clock_read(RTC_YEAR);
147 uip |= chrp_cmos_clock_read(RTC_FREQ_SELECT);
148 if ((uip & RTC_UIP)==0) break;
149 }
150
151 if (!(chrp_cmos_clock_read(RTC_CONTROL) & RTC_DM_BINARY) || RTC_ALWAYS_BCD) {
152 BCD_TO_BIN(sec);
153 BCD_TO_BIN(min);
154 BCD_TO_BIN(hour);
155 BCD_TO_BIN(day);
156 BCD_TO_BIN(mon);
157 BCD_TO_BIN(year);
158 }
159 if ((year += 1900) < 1970)
160 year += 100;
161 tm->tm_sec = sec;
162 tm->tm_min = min;
163 tm->tm_hour = hour;
164 tm->tm_mday = day;
165 tm->tm_mon = mon;
166 tm->tm_year = year;
167}
168
169
170void __init chrp_calibrate_decr(void)
171{
172 struct device_node *cpu;
173 unsigned int freq, *fp;
174
175 /*
176 * The cpu node should have a timebase-frequency property
177 * to tell us the rate at which the decrementer counts.
178 */
179 freq = 16666000; /* hardcoded default */
180 cpu = find_type_devices("cpu");
181 if (cpu != 0) {
182 fp = (unsigned int *)
183 get_property(cpu, "timebase-frequency", NULL);
184 if (fp != 0)
185 freq = *fp;
186 }
187 ppc_tb_freq = freq;
188}
diff --git a/arch/powerpc/platforms/embedded6xx/Kconfig b/arch/powerpc/platforms/embedded6xx/Kconfig
new file mode 100644
index 000000000000..81250090f98d
--- /dev/null
+++ b/arch/powerpc/platforms/embedded6xx/Kconfig
@@ -0,0 +1,318 @@
1choice
2 prompt "Machine Type"
3 depends on EMBEDDED6xx
4
5config KATANA
6 bool "Artesyn-Katana"
7 help
8 Select KATANA if configuring an Artesyn KATANA 750i or 3750
9 cPCI board.
10
11config WILLOW
12 bool "Cogent-Willow"
13
14config CPCI690
15 bool "Force-CPCI690"
16 help
17 Select CPCI690 if configuring a Force CPCI690 cPCI board.
18
19config POWERPMC250
20 bool "Force-PowerPMC250"
21
22config CHESTNUT
23 bool "IBM 750FX Eval board or 750GX Eval board"
24 help
25 Select CHESTNUT if configuring an IBM 750FX Eval Board or a
26 IBM 750GX Eval board.
27
28config SPRUCE
29 bool "IBM-Spruce"
30 select PPC_INDIRECT_PCI
31
32config HDPU
33 bool "Sky-HDPU"
34 help
35 Select HDPU if configuring a Sky Computers Compute Blade.
36
37config HDPU_FEATURES
38 depends HDPU
39 tristate "HDPU-Features"
40 help
41 Select to enable HDPU enhanced features.
42
43config EV64260
44 bool "Marvell-EV64260BP"
45 help
46 Select EV64260 if configuring a Marvell (formerly Galileo)
47 EV64260BP Evaluation platform.
48
49config LOPEC
50 bool "Motorola-LoPEC"
51 select PPC_I8259
52
53config MVME5100
54 bool "Motorola-MVME5100"
55 select PPC_INDIRECT_PCI
56
57config PPLUS
58 bool "Motorola-PowerPlus"
59 select PPC_I8259
60 select PPC_INDIRECT_PCI
61
62config PRPMC750
63 bool "Motorola-PrPMC750"
64 select PPC_INDIRECT_PCI
65
66config PRPMC800
67 bool "Motorola-PrPMC800"
68 select PPC_INDIRECT_PCI
69
70config SANDPOINT
71 bool "Motorola-Sandpoint"
72 select PPC_I8259
73 help
74 Select SANDPOINT if configuring for a Motorola Sandpoint X3
75 (any flavor).
76
77config RADSTONE_PPC7D
78 bool "Radstone Technology PPC7D board"
79 select PPC_I8259
80
81config PAL4
82 bool "SBS-Palomar4"
83
84config GEMINI
85 bool "Synergy-Gemini"
86 select PPC_INDIRECT_PCI
87 depends on BROKEN
88 help
89 Select Gemini if configuring for a Synergy Microsystems' Gemini
90 series Single Board Computer. More information is available at:
91 <http://www.synergymicro.com/PressRel/97_10_15.html>.
92
93config EST8260
94 bool "EST8260"
95 ---help---
96 The EST8260 is a single-board computer manufactured by Wind River
97 Systems, Inc. (formerly Embedded Support Tools Corp.) and based on
98 the MPC8260. Wind River Systems has a website at
99 <http://www.windriver.com/>, but the EST8260 cannot be found on it
100 and has probably been discontinued or rebadged.
101
102config SBC82xx
103 bool "SBC82xx"
104 ---help---
105 SBC PowerQUICC II, single-board computer with MPC82xx CPU
106 Manufacturer: Wind River Systems, Inc.
107 Date of Release: May 2003
108 End of Life: -
109 URL: <http://www.windriver.com/>
110
111config SBS8260
112 bool "SBS8260"
113
114config RPX8260
115 bool "RPXSUPER"
116
117config TQM8260
118 bool "TQM8260"
119 ---help---
120 MPC8260 based module, little larger than credit card,
121 up to 128 MB global + 64 MB local RAM, 32 MB Flash,
122 32 kB EEPROM, 256 kB L@ Cache, 10baseT + 100baseT Ethernet,
123 2 x serial ports, ...
124 Manufacturer: TQ Components, www.tq-group.de
125 Date of Release: June 2001
126 End of Life: not yet :-)
127 URL: <http://www.denx.de/PDF/TQM82xx_SPEC_Rev005.pdf>
128
129config ADS8272
130 bool "ADS8272"
131
132config PQ2FADS
133 bool "Freescale-PQ2FADS"
134 help
135 Select PQ2FADS if you wish to configure for a Freescale
136 PQ2FADS board (-VR or -ZU).
137
138config LITE5200
139 bool "Freescale LITE5200 / (IceCube)"
140 select PPC_MPC52xx
141 help
142 Support for the LITE5200 dev board for the MPC5200 from Freescale.
143 This is for the LITE5200 version 2.0 board. Don't know if it changes
144 much but it's only been tested on this board version. I think this
145 board is also known as IceCube.
146
147config MPC834x_SYS
148 bool "Freescale MPC834x SYS"
149 help
150 This option enables support for the MPC 834x SYS evaluation board.
151
152 Be aware that PCI buses can only function when SYS board is plugged
153 into the PIB (Platform IO Board) board from Freescale which provide
154 3 PCI slots. The PIBs PCI initialization is the bootloader's
155 responsiblilty.
156
157config EV64360
158 bool "Marvell-EV64360BP"
159 help
160 Select EV64360 if configuring a Marvell EV64360BP Evaluation
161 platform.
162endchoice
163
164config PQ2ADS
165 bool
166 depends on ADS8272
167 default y
168
169config TQM8xxL
170 bool
171 depends on 8xx && (TQM823L || TQM850L || FPS850L || TQM855L || TQM860L)
172 default y
173
174config PPC_MPC52xx
175 bool
176
177config 8260
178 bool "CPM2 Support" if WILLOW
179 depends on 6xx
180 default y if TQM8260 || RPX8260 || EST8260 || SBS8260 || SBC82xx || PQ2FADS
181 help
182 The MPC8260 is a typical embedded CPU made by Motorola. Selecting
183 this option means that you wish to build a kernel for a machine with
184 an 8260 class CPU.
185
186config 8272
187 bool
188 depends on 6xx
189 default y if ADS8272
190 select 8260
191 help
192 The MPC8272 CPM has a different internal dpram setup than other CPM2
193 devices
194
195config 83xx
196 bool
197 default y if MPC834x_SYS
198
199config MPC834x
200 bool
201 default y if MPC834x_SYS
202
203config CPM2
204 bool
205 depends on 8260 || MPC8560 || MPC8555
206 default y
207 help
208 The CPM2 (Communications Processor Module) is a coprocessor on
209 embedded CPUs made by Motorola. Selecting this option means that
210 you wish to build a kernel for a machine with a CPM2 coprocessor
211 on it (826x, 827x, 8560).
212
213config PPC_GEN550
214 bool
215 depends on SANDPOINT || SPRUCE || PPLUS || \
216 PRPMC750 || PRPMC800 || LOPEC || \
217 (EV64260 && !SERIAL_MPSC) || CHESTNUT || RADSTONE_PPC7D || \
218 83xx
219 default y
220
221config FORCE
222 bool
223 depends on 6xx && POWERPMC250
224 default y
225
226config GT64260
227 bool
228 depends on EV64260 || CPCI690
229 default y
230
231config MV64360 # Really MV64360 & MV64460
232 bool
233 depends on CHESTNUT || KATANA || RADSTONE_PPC7D || HDPU || EV64360
234 default y
235
236config MV64X60
237 bool
238 depends on (GT64260 || MV64360)
239 select PPC_INDIRECT_PCI
240 default y
241
242menu "Set bridge options"
243 depends on MV64X60
244
245config NOT_COHERENT_CACHE
246 bool "Turn off Cache Coherency"
247 default n
248 help
249 Some 64x60 bridges lock up when trying to enforce cache coherency.
250 When this option is selected, cache coherency will be turned off.
251 Note that this can cause other problems (e.g., stale data being
252 speculatively loaded via a cached mapping). Use at your own risk.
253
254config MV64X60_BASE
255 hex "Set bridge base used by firmware"
256 default "0xf1000000"
257 help
258 A firmware can leave the base address of the bridge's registers at
259 a non-standard location. If so, set this value to reflect the
260 address of that non-standard location.
261
262config MV64X60_NEW_BASE
263 hex "Set bridge base used by kernel"
264 default "0xf1000000"
265 help
266 If the current base address of the bridge's registers is not where
267 you want it, set this value to the address that you want it moved to.
268
269endmenu
270
271config NONMONARCH_SUPPORT
272 bool "Enable Non-Monarch Support"
273 depends on PRPMC800
274
275config HARRIER
276 bool
277 depends on PRPMC800
278 default y
279
280config EPIC_SERIAL_MODE
281 bool
282 depends on 6xx && (LOPEC || SANDPOINT)
283 default y
284
285config MPC10X_BRIDGE
286 bool
287 depends on POWERPMC250 || LOPEC || SANDPOINT
288 select PPC_INDIRECT_PCI
289 default y
290
291config MPC10X_OPENPIC
292 bool
293 depends on POWERPMC250 || LOPEC || SANDPOINT
294 default y
295
296config MPC10X_STORE_GATHERING
297 bool "Enable MPC10x store gathering"
298 depends on MPC10X_BRIDGE
299
300config SANDPOINT_ENABLE_UART1
301 bool "Enable DUART mode on Sandpoint"
302 depends on SANDPOINT
303 help
304 If this option is enabled then the MPC824x processor will run
305 in DUART mode instead of UART mode.
306
307config HARRIER_STORE_GATHERING
308 bool "Enable Harrier store gathering"
309 depends on HARRIER
310
311config MVME5100_IPMC761_PRESENT
312 bool "MVME5100 configured with an IPMC761"
313 depends on MVME5100
314 select PPC_I8259
315
316config SPRUCE_BAUD_33M
317 bool "Spruce baud clock support"
318 depends on SPRUCE
diff --git a/arch/powerpc/platforms/iseries/Kconfig b/arch/powerpc/platforms/iseries/Kconfig
new file mode 100644
index 000000000000..3d957a30c8c2
--- /dev/null
+++ b/arch/powerpc/platforms/iseries/Kconfig
@@ -0,0 +1,31 @@
1
2menu "iSeries device drivers"
3 depends on PPC_ISERIES
4
5config VIOCONS
6 tristate "iSeries Virtual Console Support"
7
8config VIODASD
9 tristate "iSeries Virtual I/O disk support"
10 help
11 If you are running on an iSeries system and you want to use
12 virtual disks created and managed by OS/400, say Y.
13
14config VIOCD
15 tristate "iSeries Virtual I/O CD support"
16 help
17 If you are running Linux on an IBM iSeries system and you want to
18 read a CD drive owned by OS/400, say Y here.
19
20config VIOTAPE
21 tristate "iSeries Virtual Tape Support"
22 help
23 If you are running Linux on an iSeries system and you want Linux
24 to read and/or write a tape drive owned by OS/400, say Y here.
25
26endmenu
27
28config VIOPATH
29 bool
30 depends on VIOCONS || VIODASD || VIOCD || VIOTAPE || VETH
31 default y
diff --git a/arch/powerpc/platforms/iseries/Makefile b/arch/powerpc/platforms/iseries/Makefile
new file mode 100644
index 000000000000..127b465308be
--- /dev/null
+++ b/arch/powerpc/platforms/iseries/Makefile
@@ -0,0 +1,9 @@
1EXTRA_CFLAGS += -mno-minimal-toc
2
3obj-y += hvlog.o hvlpconfig.o lpardata.o setup.o mf.o lpevents.o \
4 hvcall.o proc.o htab.o iommu.o misc.o
5obj-$(CONFIG_PCI) += pci.o irq.o vpdinfo.o
6obj-$(CONFIG_IBMVIO) += vio.o
7obj-$(CONFIG_SMP) += smp.o
8obj-$(CONFIG_VIOPATH) += viopath.o
9obj-$(CONFIG_MODULES) += ksyms.o
diff --git a/arch/powerpc/platforms/iseries/call_hpt.h b/arch/powerpc/platforms/iseries/call_hpt.h
new file mode 100644
index 000000000000..321f3bb7a8f5
--- /dev/null
+++ b/arch/powerpc/platforms/iseries/call_hpt.h
@@ -0,0 +1,101 @@
1/*
2 * Copyright (C) 2001 Mike Corrigan IBM Corporation
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17 */
18#ifndef _PLATFORMS_ISERIES_CALL_HPT_H
19#define _PLATFORMS_ISERIES_CALL_HPT_H
20
21/*
22 * This file contains the "hypervisor call" interface which is used to
23 * drive the hypervisor from the OS.
24 */
25
26#include <asm/iSeries/HvCallSc.h>
27#include <asm/iSeries/HvTypes.h>
28#include <asm/mmu.h>
29
30#define HvCallHptGetHptAddress HvCallHpt + 0
31#define HvCallHptGetHptPages HvCallHpt + 1
32#define HvCallHptSetPp HvCallHpt + 5
33#define HvCallHptSetSwBits HvCallHpt + 6
34#define HvCallHptUpdate HvCallHpt + 7
35#define HvCallHptInvalidateNoSyncICache HvCallHpt + 8
36#define HvCallHptGet HvCallHpt + 11
37#define HvCallHptFindNextValid HvCallHpt + 12
38#define HvCallHptFindValid HvCallHpt + 13
39#define HvCallHptAddValidate HvCallHpt + 16
40#define HvCallHptInvalidateSetSwBitsGet HvCallHpt + 18
41
42
43static inline u64 HvCallHpt_getHptAddress(void)
44{
45 return HvCall0(HvCallHptGetHptAddress);
46}
47
48static inline u64 HvCallHpt_getHptPages(void)
49{
50 return HvCall0(HvCallHptGetHptPages);
51}
52
53static inline void HvCallHpt_setPp(u32 hpteIndex, u8 value)
54{
55 HvCall2(HvCallHptSetPp, hpteIndex, value);
56}
57
58static inline void HvCallHpt_setSwBits(u32 hpteIndex, u8 bitson, u8 bitsoff)
59{
60 HvCall3(HvCallHptSetSwBits, hpteIndex, bitson, bitsoff);
61}
62
63static inline void HvCallHpt_invalidateNoSyncICache(u32 hpteIndex)
64{
65 HvCall1(HvCallHptInvalidateNoSyncICache, hpteIndex);
66}
67
68static inline u64 HvCallHpt_invalidateSetSwBitsGet(u32 hpteIndex, u8 bitson,
69 u8 bitsoff)
70{
71 u64 compressedStatus;
72
73 compressedStatus = HvCall4(HvCallHptInvalidateSetSwBitsGet,
74 hpteIndex, bitson, bitsoff, 1);
75 HvCall1(HvCallHptInvalidateNoSyncICache, hpteIndex);
76 return compressedStatus;
77}
78
79static inline u64 HvCallHpt_findValid(hpte_t *hpte, u64 vpn)
80{
81 return HvCall3Ret16(HvCallHptFindValid, hpte, vpn, 0, 0);
82}
83
84static inline u64 HvCallHpt_findNextValid(hpte_t *hpte, u32 hpteIndex,
85 u8 bitson, u8 bitsoff)
86{
87 return HvCall3Ret16(HvCallHptFindNextValid, hpte, hpteIndex,
88 bitson, bitsoff);
89}
90
91static inline void HvCallHpt_get(hpte_t *hpte, u32 hpteIndex)
92{
93 HvCall2Ret16(HvCallHptGet, hpte, hpteIndex, 0);
94}
95
96static inline void HvCallHpt_addValidate(u32 hpteIndex, u32 hBit, hpte_t *hpte)
97{
98 HvCall4(HvCallHptAddValidate, hpteIndex, hBit, hpte->v, hpte->r);
99}
100
101#endif /* _PLATFORMS_ISERIES_CALL_HPT_H */
diff --git a/arch/powerpc/platforms/iseries/call_pci.h b/arch/powerpc/platforms/iseries/call_pci.h
new file mode 100644
index 000000000000..a86e065b9577
--- /dev/null
+++ b/arch/powerpc/platforms/iseries/call_pci.h
@@ -0,0 +1,290 @@
1/*
2 * Provides the Hypervisor PCI calls for iSeries Linux Parition.
3 * Copyright (C) 2001 <Wayne G Holm> <IBM Corporation>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the:
17 * Free Software Foundation, Inc.,
18 * 59 Temple Place, Suite 330,
19 * Boston, MA 02111-1307 USA
20 *
21 * Change Activity:
22 * Created, Jan 9, 2001
23 */
24
25#ifndef _PLATFORMS_ISERIES_CALL_PCI_H
26#define _PLATFORMS_ISERIES_CALL_PCI_H
27
28#include <asm/iSeries/HvCallSc.h>
29#include <asm/iSeries/HvTypes.h>
30
31/*
32 * DSA == Direct Select Address
33 * this struct must be 64 bits in total
34 */
35struct HvCallPci_DsaAddr {
36 u16 busNumber; /* PHB index? */
37 u8 subBusNumber; /* PCI bus number? */
38 u8 deviceId; /* device and function? */
39 u8 barNumber;
40 u8 reserved[3];
41};
42
43union HvDsaMap {
44 u64 DsaAddr;
45 struct HvCallPci_DsaAddr Dsa;
46};
47
48struct HvCallPci_LoadReturn {
49 u64 rc;
50 u64 value;
51};
52
53enum HvCallPci_DeviceType {
54 HvCallPci_NodeDevice = 1,
55 HvCallPci_SpDevice = 2,
56 HvCallPci_IopDevice = 3,
57 HvCallPci_BridgeDevice = 4,
58 HvCallPci_MultiFunctionDevice = 5,
59 HvCallPci_IoaDevice = 6
60};
61
62
63struct HvCallPci_DeviceInfo {
64 u32 deviceType; /* See DeviceType enum for values */
65};
66
67struct HvCallPci_BusUnitInfo {
68 u32 sizeReturned; /* length of data returned */
69 u32 deviceType; /* see DeviceType enum for values */
70};
71
72struct HvCallPci_BridgeInfo {
73 struct HvCallPci_BusUnitInfo busUnitInfo; /* Generic bus unit info */
74 u8 subBusNumber; /* Bus number of secondary bus */
75 u8 maxAgents; /* Max idsels on secondary bus */
76 u8 maxSubBusNumber; /* Max Sub Bus */
77 u8 logicalSlotNumber; /* Logical Slot Number for IOA */
78};
79
80
81/*
82 * Maximum BusUnitInfo buffer size. Provided for clients so
83 * they can allocate a buffer big enough for any type of bus
84 * unit. Increase as needed.
85 */
86enum {HvCallPci_MaxBusUnitInfoSize = 128};
87
88struct HvCallPci_BarParms {
89 u64 vaddr;
90 u64 raddr;
91 u64 size;
92 u64 protectStart;
93 u64 protectEnd;
94 u64 relocationOffset;
95 u64 pciAddress;
96 u64 reserved[3];
97};
98
99enum HvCallPci_VpdType {
100 HvCallPci_BusVpd = 1,
101 HvCallPci_BusAdapterVpd = 2
102};
103
104#define HvCallPciConfigLoad8 HvCallPci + 0
105#define HvCallPciConfigLoad16 HvCallPci + 1
106#define HvCallPciConfigLoad32 HvCallPci + 2
107#define HvCallPciConfigStore8 HvCallPci + 3
108#define HvCallPciConfigStore16 HvCallPci + 4
109#define HvCallPciConfigStore32 HvCallPci + 5
110#define HvCallPciEoi HvCallPci + 16
111#define HvCallPciGetBarParms HvCallPci + 18
112#define HvCallPciMaskFisr HvCallPci + 20
113#define HvCallPciUnmaskFisr HvCallPci + 21
114#define HvCallPciSetSlotReset HvCallPci + 25
115#define HvCallPciGetDeviceInfo HvCallPci + 27
116#define HvCallPciGetCardVpd HvCallPci + 28
117#define HvCallPciBarLoad8 HvCallPci + 40
118#define HvCallPciBarLoad16 HvCallPci + 41
119#define HvCallPciBarLoad32 HvCallPci + 42
120#define HvCallPciBarLoad64 HvCallPci + 43
121#define HvCallPciBarStore8 HvCallPci + 44
122#define HvCallPciBarStore16 HvCallPci + 45
123#define HvCallPciBarStore32 HvCallPci + 46
124#define HvCallPciBarStore64 HvCallPci + 47
125#define HvCallPciMaskInterrupts HvCallPci + 48
126#define HvCallPciUnmaskInterrupts HvCallPci + 49
127#define HvCallPciGetBusUnitInfo HvCallPci + 50
128
129static inline u64 HvCallPci_configLoad16(u16 busNumber, u8 subBusNumber,
130 u8 deviceId, u32 offset, u16 *value)
131{
132 struct HvCallPci_DsaAddr dsa;
133 struct HvCallPci_LoadReturn retVal;
134
135 *((u64*)&dsa) = 0;
136
137 dsa.busNumber = busNumber;
138 dsa.subBusNumber = subBusNumber;
139 dsa.deviceId = deviceId;
140
141 HvCall3Ret16(HvCallPciConfigLoad16, &retVal, *(u64 *)&dsa, offset, 0);
142
143 *value = retVal.value;
144
145 return retVal.rc;
146}
147
148static inline u64 HvCallPci_configStore8(u16 busNumber, u8 subBusNumber,
149 u8 deviceId, u32 offset, u8 value)
150{
151 struct HvCallPci_DsaAddr dsa;
152
153 *((u64*)&dsa) = 0;
154
155 dsa.busNumber = busNumber;
156 dsa.subBusNumber = subBusNumber;
157 dsa.deviceId = deviceId;
158
159 return HvCall4(HvCallPciConfigStore8, *(u64 *)&dsa, offset, value, 0);
160}
161
162static inline u64 HvCallPci_eoi(u16 busNumberParm, u8 subBusParm,
163 u8 deviceIdParm)
164{
165 struct HvCallPci_DsaAddr dsa;
166 struct HvCallPci_LoadReturn retVal;
167
168 *((u64*)&dsa) = 0;
169
170 dsa.busNumber = busNumberParm;
171 dsa.subBusNumber = subBusParm;
172 dsa.deviceId = deviceIdParm;
173
174 HvCall1Ret16(HvCallPciEoi, &retVal, *(u64*)&dsa);
175
176 return retVal.rc;
177}
178
179static inline u64 HvCallPci_getBarParms(u16 busNumberParm, u8 subBusParm,
180 u8 deviceIdParm, u8 barNumberParm, u64 parms, u32 sizeofParms)
181{
182 struct HvCallPci_DsaAddr dsa;
183
184 *((u64*)&dsa) = 0;
185
186 dsa.busNumber = busNumberParm;
187 dsa.subBusNumber = subBusParm;
188 dsa.deviceId = deviceIdParm;
189 dsa.barNumber = barNumberParm;
190
191 return HvCall3(HvCallPciGetBarParms, *(u64*)&dsa, parms, sizeofParms);
192}
193
194static inline u64 HvCallPci_maskFisr(u16 busNumberParm, u8 subBusParm,
195 u8 deviceIdParm, u64 fisrMask)
196{
197 struct HvCallPci_DsaAddr dsa;
198
199 *((u64*)&dsa) = 0;
200
201 dsa.busNumber = busNumberParm;
202 dsa.subBusNumber = subBusParm;
203 dsa.deviceId = deviceIdParm;
204
205 return HvCall2(HvCallPciMaskFisr, *(u64*)&dsa, fisrMask);
206}
207
208static inline u64 HvCallPci_unmaskFisr(u16 busNumberParm, u8 subBusParm,
209 u8 deviceIdParm, u64 fisrMask)
210{
211 struct HvCallPci_DsaAddr dsa;
212
213 *((u64*)&dsa) = 0;
214
215 dsa.busNumber = busNumberParm;
216 dsa.subBusNumber = subBusParm;
217 dsa.deviceId = deviceIdParm;
218
219 return HvCall2(HvCallPciUnmaskFisr, *(u64*)&dsa, fisrMask);
220}
221
222static inline u64 HvCallPci_getDeviceInfo(u16 busNumberParm, u8 subBusParm,
223 u8 deviceNumberParm, u64 parms, u32 sizeofParms)
224{
225 struct HvCallPci_DsaAddr dsa;
226
227 *((u64*)&dsa) = 0;
228
229 dsa.busNumber = busNumberParm;
230 dsa.subBusNumber = subBusParm;
231 dsa.deviceId = deviceNumberParm << 4;
232
233 return HvCall3(HvCallPciGetDeviceInfo, *(u64*)&dsa, parms, sizeofParms);
234}
235
236static inline u64 HvCallPci_maskInterrupts(u16 busNumberParm, u8 subBusParm,
237 u8 deviceIdParm, u64 interruptMask)
238{
239 struct HvCallPci_DsaAddr dsa;
240
241 *((u64*)&dsa) = 0;
242
243 dsa.busNumber = busNumberParm;
244 dsa.subBusNumber = subBusParm;
245 dsa.deviceId = deviceIdParm;
246
247 return HvCall2(HvCallPciMaskInterrupts, *(u64*)&dsa, interruptMask);
248}
249
250static inline u64 HvCallPci_unmaskInterrupts(u16 busNumberParm, u8 subBusParm,
251 u8 deviceIdParm, u64 interruptMask)
252{
253 struct HvCallPci_DsaAddr dsa;
254
255 *((u64*)&dsa) = 0;
256
257 dsa.busNumber = busNumberParm;
258 dsa.subBusNumber = subBusParm;
259 dsa.deviceId = deviceIdParm;
260
261 return HvCall2(HvCallPciUnmaskInterrupts, *(u64*)&dsa, interruptMask);
262}
263
264static inline u64 HvCallPci_getBusUnitInfo(u16 busNumberParm, u8 subBusParm,
265 u8 deviceIdParm, u64 parms, u32 sizeofParms)
266{
267 struct HvCallPci_DsaAddr dsa;
268
269 *((u64*)&dsa) = 0;
270
271 dsa.busNumber = busNumberParm;
272 dsa.subBusNumber = subBusParm;
273 dsa.deviceId = deviceIdParm;
274
275 return HvCall3(HvCallPciGetBusUnitInfo, *(u64*)&dsa, parms,
276 sizeofParms);
277}
278
279static inline int HvCallPci_getBusVpd(u16 busNumParm, u64 destParm,
280 u16 sizeParm)
281{
282 u64 xRc = HvCall4(HvCallPciGetCardVpd, busNumParm, destParm,
283 sizeParm, HvCallPci_BusVpd);
284 if (xRc == -1)
285 return -1;
286 else
287 return xRc & 0xFFFF;
288}
289
290#endif /* _PLATFORMS_ISERIES_CALL_PCI_H */
diff --git a/arch/powerpc/platforms/iseries/call_sm.h b/arch/powerpc/platforms/iseries/call_sm.h
new file mode 100644
index 000000000000..ef223166cf22
--- /dev/null
+++ b/arch/powerpc/platforms/iseries/call_sm.h
@@ -0,0 +1,37 @@
1/*
2 * Copyright (C) 2001 Mike Corrigan IBM Corporation
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17 */
18#ifndef _ISERIES_CALL_SM_H
19#define _ISERIES_CALL_SM_H
20
21/*
22 * This file contains the "hypervisor call" interface which is used to
23 * drive the hypervisor from the OS.
24 */
25
26#include <asm/iSeries/HvCallSc.h>
27#include <asm/iSeries/HvTypes.h>
28
29#define HvCallSmGet64BitsOfAccessMap HvCallSm + 11
30
31static inline u64 HvCallSm_get64BitsOfAccessMap(HvLpIndex lpIndex,
32 u64 indexIntoBitMap)
33{
34 return HvCall2(HvCallSmGet64BitsOfAccessMap, lpIndex, indexIntoBitMap);
35}
36
37#endif /* _ISERIES_CALL_SM_H */
diff --git a/arch/ppc64/kernel/iSeries_htab.c b/arch/powerpc/platforms/iseries/htab.c
index 073b76661747..b3c6c3374ca6 100644
--- a/arch/ppc64/kernel/iSeries_htab.c
+++ b/arch/powerpc/platforms/iseries/htab.c
@@ -1,10 +1,10 @@
1/* 1/*
2 * iSeries hashtable management. 2 * iSeries hashtable management.
3 * Derived from pSeries_htab.c 3 * Derived from pSeries_htab.c
4 * 4 *
5 * SMP scalability work: 5 * SMP scalability work:
6 * Copyright (C) 2001 Anton Blanchard <anton@au.ibm.com>, IBM 6 * Copyright (C) 2001 Anton Blanchard <anton@au.ibm.com>, IBM
7 * 7 *
8 * This program is free software; you can redistribute it and/or 8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License 9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version 10 * as published by the Free Software Foundation; either version
@@ -14,11 +14,13 @@
14#include <asm/pgtable.h> 14#include <asm/pgtable.h>
15#include <asm/mmu.h> 15#include <asm/mmu.h>
16#include <asm/mmu_context.h> 16#include <asm/mmu_context.h>
17#include <asm/iSeries/HvCallHpt.h>
18#include <asm/abs_addr.h> 17#include <asm/abs_addr.h>
19#include <linux/spinlock.h> 18#include <linux/spinlock.h>
20 19
21static spinlock_t iSeries_hlocks[64] __cacheline_aligned_in_smp = { [0 ... 63] = SPIN_LOCK_UNLOCKED}; 20#include "call_hpt.h"
21
22static spinlock_t iSeries_hlocks[64] __cacheline_aligned_in_smp =
23 { [0 ... 63] = SPIN_LOCK_UNLOCKED};
22 24
23/* 25/*
24 * Very primitive algorithm for picking up a lock 26 * Very primitive algorithm for picking up a lock
@@ -84,6 +86,25 @@ static long iSeries_hpte_insert(unsigned long hpte_group, unsigned long va,
84 return (secondary << 3) | (slot & 7); 86 return (secondary << 3) | (slot & 7);
85} 87}
86 88
89long iSeries_hpte_bolt_or_insert(unsigned long hpte_group,
90 unsigned long va, unsigned long prpn, unsigned long vflags,
91 unsigned long rflags)
92{
93 long slot;
94 hpte_t lhpte;
95
96 slot = HvCallHpt_findValid(&lhpte, va >> PAGE_SHIFT);
97
98 if (lhpte.v & HPTE_V_VALID) {
99 /* Bolt the existing HPTE */
100 HvCallHpt_setSwBits(slot, 0x10, 0);
101 HvCallHpt_setPp(slot, PP_RWXX);
102 return 0;
103 }
104
105 return iSeries_hpte_insert(hpte_group, va, prpn, vflags, rflags);
106}
107
87static unsigned long iSeries_hpte_getword0(unsigned long slot) 108static unsigned long iSeries_hpte_getword0(unsigned long slot)
88{ 109{
89 hpte_t hpte; 110 hpte_t hpte;
@@ -107,7 +128,7 @@ static long iSeries_hpte_remove(unsigned long hpte_group)
107 hpte_v = iSeries_hpte_getword0(hpte_group + slot_offset); 128 hpte_v = iSeries_hpte_getword0(hpte_group + slot_offset);
108 129
109 if (! (hpte_v & HPTE_V_BOLTED)) { 130 if (! (hpte_v & HPTE_V_BOLTED)) {
110 HvCallHpt_invalidateSetSwBitsGet(hpte_group + 131 HvCallHpt_invalidateSetSwBitsGet(hpte_group +
111 slot_offset, 0, 0); 132 slot_offset, 0, 0);
112 iSeries_hunlock(hpte_group); 133 iSeries_hunlock(hpte_group);
113 return i; 134 return i;
@@ -124,9 +145,9 @@ static long iSeries_hpte_remove(unsigned long hpte_group)
124 145
125/* 146/*
126 * The HyperVisor expects the "flags" argument in this form: 147 * The HyperVisor expects the "flags" argument in this form:
127 * bits 0..59 : reserved 148 * bits 0..59 : reserved
128 * bit 60 : N 149 * bit 60 : N
129 * bits 61..63 : PP2,PP1,PP0 150 * bits 61..63 : PP2,PP1,PP0
130 */ 151 */
131static long iSeries_hpte_updatepp(unsigned long slot, unsigned long newpp, 152static long iSeries_hpte_updatepp(unsigned long slot, unsigned long newpp,
132 unsigned long va, int large, int local) 153 unsigned long va, int large, int local)
@@ -152,7 +173,7 @@ static long iSeries_hpte_updatepp(unsigned long slot, unsigned long newpp,
152} 173}
153 174
154/* 175/*
155 * Functions used to find the PTE for a particular virtual address. 176 * Functions used to find the PTE for a particular virtual address.
156 * Only used during boot when bolting pages. 177 * Only used during boot when bolting pages.
157 * 178 *
158 * Input : vpn : virtual page number 179 * Input : vpn : virtual page number
@@ -170,7 +191,7 @@ static long iSeries_hpte_find(unsigned long vpn)
170 * 0x00000000xxxxxxxx : Entry found in primary group, slot x 191 * 0x00000000xxxxxxxx : Entry found in primary group, slot x
171 * 0x80000000xxxxxxxx : Entry found in secondary group, slot x 192 * 0x80000000xxxxxxxx : Entry found in secondary group, slot x
172 */ 193 */
173 slot = HvCallHpt_findValid(&hpte, vpn); 194 slot = HvCallHpt_findValid(&hpte, vpn);
174 if (hpte.v & HPTE_V_VALID) { 195 if (hpte.v & HPTE_V_VALID) {
175 if (slot < 0) { 196 if (slot < 0) {
176 slot &= 0x7fffffffffffffff; 197 slot &= 0x7fffffffffffffff;
@@ -197,7 +218,7 @@ static void iSeries_hpte_updateboltedpp(unsigned long newpp, unsigned long ea)
197 vsid = get_kernel_vsid(ea); 218 vsid = get_kernel_vsid(ea);
198 va = (vsid << 28) | (ea & 0x0fffffff); 219 va = (vsid << 28) | (ea & 0x0fffffff);
199 vpn = va >> PAGE_SHIFT; 220 vpn = va >> PAGE_SHIFT;
200 slot = iSeries_hpte_find(vpn); 221 slot = iSeries_hpte_find(vpn);
201 if (slot == -1) 222 if (slot == -1)
202 panic("updateboltedpp: Could not find page to bolt\n"); 223 panic("updateboltedpp: Could not find page to bolt\n");
203 HvCallHpt_setPp(slot, newpp); 224 HvCallHpt_setPp(slot, newpp);
@@ -215,7 +236,7 @@ static void iSeries_hpte_invalidate(unsigned long slot, unsigned long va,
215 iSeries_hlock(slot); 236 iSeries_hlock(slot);
216 237
217 hpte_v = iSeries_hpte_getword0(slot); 238 hpte_v = iSeries_hpte_getword0(slot);
218 239
219 if ((HPTE_V_AVPN_VAL(hpte_v) == avpn) && (hpte_v & HPTE_V_VALID)) 240 if ((HPTE_V_AVPN_VAL(hpte_v) == avpn) && (hpte_v & HPTE_V_VALID))
220 HvCallHpt_invalidateSetSwBitsGet(slot, 0, 0); 241 HvCallHpt_invalidateSetSwBitsGet(slot, 0, 0);
221 242
@@ -230,7 +251,7 @@ void hpte_init_iSeries(void)
230 ppc_md.hpte_updatepp = iSeries_hpte_updatepp; 251 ppc_md.hpte_updatepp = iSeries_hpte_updatepp;
231 ppc_md.hpte_updateboltedpp = iSeries_hpte_updateboltedpp; 252 ppc_md.hpte_updateboltedpp = iSeries_hpte_updateboltedpp;
232 ppc_md.hpte_insert = iSeries_hpte_insert; 253 ppc_md.hpte_insert = iSeries_hpte_insert;
233 ppc_md.hpte_remove = iSeries_hpte_remove; 254 ppc_md.hpte_remove = iSeries_hpte_remove;
234 255
235 htab_finish_init(); 256 htab_finish_init();
236} 257}
diff --git a/arch/ppc64/kernel/hvCall.S b/arch/powerpc/platforms/iseries/hvcall.S
index 4c699eab1b95..07ae6ad5f49f 100644
--- a/arch/ppc64/kernel/hvCall.S
+++ b/arch/powerpc/platforms/iseries/hvcall.S
@@ -1,7 +1,4 @@
1/* 1/*
2 * arch/ppc64/kernel/hvCall.S
3 *
4 *
5 * This file contains the code to perform calls to the 2 * This file contains the code to perform calls to the
6 * iSeries LPAR hypervisor 3 * iSeries LPAR hypervisor
7 * 4 *
@@ -13,15 +10,16 @@
13 10
14#include <asm/ppc_asm.h> 11#include <asm/ppc_asm.h>
15#include <asm/processor.h> 12#include <asm/processor.h>
13#include <asm/ptrace.h> /* XXX for STACK_FRAME_OVERHEAD */
16 14
17 .text 15 .text
18 16
19/* 17/*
20 * Hypervisor call 18 * Hypervisor call
21 * 19 *
22 * Invoke the iSeries hypervisor via the System Call instruction 20 * Invoke the iSeries hypervisor via the System Call instruction
23 * Parameters are passed to this routine in registers r3 - r10 21 * Parameters are passed to this routine in registers r3 - r10
24 * 22 *
25 * r3 contains the HV function to be called 23 * r3 contains the HV function to be called
26 * r4-r10 contain the operands to the hypervisor function 24 * r4-r10 contain the operands to the hypervisor function
27 * 25 *
@@ -41,11 +39,11 @@ _GLOBAL(HvCall7)
41 mfcr r0 39 mfcr r0
42 std r0,-8(r1) 40 std r0,-8(r1)
43 stdu r1,-(STACK_FRAME_OVERHEAD+16)(r1) 41 stdu r1,-(STACK_FRAME_OVERHEAD+16)(r1)
44 42
45 /* r0 = 0xffffffffffffffff indicates a hypervisor call */ 43 /* r0 = 0xffffffffffffffff indicates a hypervisor call */
46 44
47 li r0,-1 45 li r0,-1
48 46
49 /* Invoke the hypervisor */ 47 /* Invoke the hypervisor */
50 48
51 sc 49 sc
@@ -55,7 +53,7 @@ _GLOBAL(HvCall7)
55 mtcrf 0xff,r0 53 mtcrf 0xff,r0
56 54
57 /* return to caller, return value in r3 */ 55 /* return to caller, return value in r3 */
58 56
59 blr 57 blr
60 58
61_GLOBAL(HvCall0Ret16) 59_GLOBAL(HvCall0Ret16)
@@ -92,7 +90,5 @@ _GLOBAL(HvCall7Ret16)
92 ld r0,-8(r1) 90 ld r0,-8(r1)
93 mtcrf 0xff,r0 91 mtcrf 0xff,r0
94 ld r31,-16(r1) 92 ld r31,-16(r1)
95
96 blr
97
98 93
94 blr
diff --git a/arch/ppc64/kernel/HvCall.c b/arch/powerpc/platforms/iseries/hvlog.c
index b772e65b57a2..f61e2e9ac9ec 100644
--- a/arch/ppc64/kernel/HvCall.c
+++ b/arch/powerpc/platforms/iseries/hvlog.c
@@ -1,5 +1,4 @@
1/* 1/*
2 * HvCall.c
3 * Copyright (C) 2001 Mike Corrigan IBM Corporation 2 * Copyright (C) 2001 Mike Corrigan IBM Corporation
4 * 3 *
5 * This program is free software; you can redistribute it and/or modify 4 * This program is free software; you can redistribute it and/or modify
diff --git a/arch/ppc64/kernel/HvLpConfig.c b/arch/powerpc/platforms/iseries/hvlpconfig.c
index cb1d6473203c..dc28621aea0d 100644
--- a/arch/ppc64/kernel/HvLpConfig.c
+++ b/arch/powerpc/platforms/iseries/hvlpconfig.c
@@ -1,5 +1,4 @@
1/* 1/*
2 * HvLpConfig.c
3 * Copyright (C) 2001 Kyle A. Lucke, IBM Corporation 2 * Copyright (C) 2001 Kyle A. Lucke, IBM Corporation
4 * 3 *
5 * This program is free software; you can redistribute it and/or modify 4 * This program is free software; you can redistribute it and/or modify
diff --git a/arch/ppc64/kernel/iSeries_iommu.c b/arch/powerpc/platforms/iseries/iommu.c
index f8ff1bb054dc..1db26d8be640 100644
--- a/arch/ppc64/kernel/iSeries_iommu.c
+++ b/arch/powerpc/platforms/iseries/iommu.c
@@ -1,6 +1,4 @@
1/* 1/*
2 * arch/ppc64/kernel/iSeries_iommu.c
3 *
4 * Copyright (C) 2001 Mike Corrigan & Dave Engebretsen, IBM Corporation 2 * Copyright (C) 2001 Mike Corrigan & Dave Engebretsen, IBM Corporation
5 * 3 *
6 * Rewrite, cleanup: 4 * Rewrite, cleanup:
@@ -30,9 +28,11 @@
30#include <linux/list.h> 28#include <linux/list.h>
31 29
32#include <asm/iommu.h> 30#include <asm/iommu.h>
31#include <asm/tce.h>
33#include <asm/machdep.h> 32#include <asm/machdep.h>
33#include <asm/abs_addr.h>
34#include <asm/pci-bridge.h>
34#include <asm/iSeries/HvCallXm.h> 35#include <asm/iSeries/HvCallXm.h>
35#include <asm/iSeries/iSeries_pci.h>
36 36
37extern struct list_head iSeries_Global_Device_List; 37extern struct list_head iSeries_Global_Device_List;
38 38
@@ -90,15 +90,16 @@ static void tce_free_iSeries(struct iommu_table *tbl, long index, long npages)
90 */ 90 */
91static struct iommu_table *iommu_table_find(struct iommu_table * tbl) 91static struct iommu_table *iommu_table_find(struct iommu_table * tbl)
92{ 92{
93 struct iSeries_Device_Node *dp; 93 struct pci_dn *pdn;
94 94
95 list_for_each_entry(dp, &iSeries_Global_Device_List, Device_List) { 95 list_for_each_entry(pdn, &iSeries_Global_Device_List, Device_List) {
96 if ((dp->iommu_table != NULL) && 96 struct iommu_table *it = pdn->iommu_table;
97 (dp->iommu_table->it_type == TCE_PCI) && 97 if ((it != NULL) &&
98 (dp->iommu_table->it_offset == tbl->it_offset) && 98 (it->it_type == TCE_PCI) &&
99 (dp->iommu_table->it_index == tbl->it_index) && 99 (it->it_offset == tbl->it_offset) &&
100 (dp->iommu_table->it_size == tbl->it_size)) 100 (it->it_index == tbl->it_index) &&
101 return dp->iommu_table; 101 (it->it_size == tbl->it_size))
102 return it;
102 } 103 }
103 return NULL; 104 return NULL;
104} 105}
@@ -112,7 +113,7 @@ static struct iommu_table *iommu_table_find(struct iommu_table * tbl)
112 * 2. TCE table per Bus. 113 * 2. TCE table per Bus.
113 * 3. TCE Table per IOA. 114 * 3. TCE Table per IOA.
114 */ 115 */
115static void iommu_table_getparms(struct iSeries_Device_Node* dn, 116static void iommu_table_getparms(struct pci_dn *pdn,
116 struct iommu_table* tbl) 117 struct iommu_table* tbl)
117{ 118{
118 struct iommu_table_cb *parms; 119 struct iommu_table_cb *parms;
@@ -123,11 +124,11 @@ static void iommu_table_getparms(struct iSeries_Device_Node* dn,
123 124
124 memset(parms, 0, sizeof(*parms)); 125 memset(parms, 0, sizeof(*parms));
125 126
126 parms->itc_busno = ISERIES_BUS(dn); 127 parms->itc_busno = pdn->busno;
127 parms->itc_slotno = dn->LogicalSlot; 128 parms->itc_slotno = pdn->LogicalSlot;
128 parms->itc_virtbus = 0; 129 parms->itc_virtbus = 0;
129 130
130 HvCallXm_getTceTableParms(ISERIES_HV_ADDR(parms)); 131 HvCallXm_getTceTableParms(iseries_hv_addr(parms));
131 132
132 if (parms->itc_size == 0) 133 if (parms->itc_size == 0)
133 panic("PCI_DMA: parms->size is zero, parms is 0x%p", parms); 134 panic("PCI_DMA: parms->size is zero, parms is 0x%p", parms);
@@ -144,18 +145,19 @@ static void iommu_table_getparms(struct iSeries_Device_Node* dn,
144} 145}
145 146
146 147
147void iommu_devnode_init_iSeries(struct iSeries_Device_Node *dn) 148void iommu_devnode_init_iSeries(struct device_node *dn)
148{ 149{
149 struct iommu_table *tbl; 150 struct iommu_table *tbl;
151 struct pci_dn *pdn = PCI_DN(dn);
150 152
151 tbl = kmalloc(sizeof(struct iommu_table), GFP_KERNEL); 153 tbl = kmalloc(sizeof(struct iommu_table), GFP_KERNEL);
152 154
153 iommu_table_getparms(dn, tbl); 155 iommu_table_getparms(pdn, tbl);
154 156
155 /* Look for existing tce table */ 157 /* Look for existing tce table */
156 dn->iommu_table = iommu_table_find(tbl); 158 pdn->iommu_table = iommu_table_find(tbl);
157 if (dn->iommu_table == NULL) 159 if (pdn->iommu_table == NULL)
158 dn->iommu_table = iommu_init_table(tbl); 160 pdn->iommu_table = iommu_init_table(tbl);
159 else 161 else
160 kfree(tbl); 162 kfree(tbl);
161} 163}
diff --git a/arch/powerpc/platforms/iseries/ipl_parms.h b/arch/powerpc/platforms/iseries/ipl_parms.h
new file mode 100644
index 000000000000..77c135ddbf1b
--- /dev/null
+++ b/arch/powerpc/platforms/iseries/ipl_parms.h
@@ -0,0 +1,70 @@
1/*
2 * Copyright (C) 2001 Mike Corrigan IBM Corporation
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17 */
18#ifndef _ISERIES_IPL_PARMS_H
19#define _ISERIES_IPL_PARMS_H
20
21/*
22 * This struct maps the IPL Parameters DMA'd from the SP.
23 *
24 * Warning:
25 * This data must map in exactly 64 bytes and match the architecture for
26 * the IPL parms
27 */
28
29#include <asm/types.h>
30
31struct ItIplParmsReal {
32 u8 xFormat; // Defines format of IplParms x00-x00
33 u8 xRsvd01:6; // Reserved x01-x01
34 u8 xAlternateSearch:1; // Alternate search indicator ...
35 u8 xUaSupplied:1; // UA Supplied on programmed IPL...
36 u8 xLsUaFormat; // Format byte for UA x02-x02
37 u8 xRsvd02; // Reserved x03-x03
38 u32 xLsUa; // LS UA x04-x07
39 u32 xUnusedLsLid; // First OS LID to load x08-x0B
40 u16 xLsBusNumber; // LS Bus Number x0C-x0D
41 u8 xLsCardAdr; // LS Card Address x0E-x0E
42 u8 xLsBoardAdr; // LS Board Address x0F-x0F
43 u32 xRsvd03; // Reserved x10-x13
44 u8 xSpcnPresent:1; // SPCN present x14-x14
45 u8 xCpmPresent:1; // CPM present ...
46 u8 xRsvd04:6; // Reserved ...
47 u8 xRsvd05:4; // Reserved x15-x15
48 u8 xKeyLock:4; // Keylock setting ...
49 u8 xRsvd06:6; // Reserved x16-x16
50 u8 xIplMode:2; // Ipl mode (A|B|C|D) ...
51 u8 xHwIplType; // Fast v slow v slow EC HW IPL x17-x17
52 u16 xCpmEnabledIpl:1; // CPM in effect when IPL initiatedx18-x19
53 u16 xPowerOnResetIpl:1; // Indicate POR condition ...
54 u16 xMainStorePreserved:1; // Main Storage is preserved ...
55 u16 xRsvd07:13; // Reserved ...
56 u16 xIplSource:16; // Ipl source x1A-x1B
57 u8 xIplReason:8; // Reason for this IPL x1C-x1C
58 u8 xRsvd08; // Reserved x1D-x1D
59 u16 xRsvd09; // Reserved x1E-x1F
60 u16 xSysBoxType; // System Box Type x20-x21
61 u16 xSysProcType; // System Processor Type x22-x23
62 u32 xRsvd10; // Reserved x24-x27
63 u64 xRsvd11; // Reserved x28-x2F
64 u64 xRsvd12; // Reserved x30-x37
65 u64 xRsvd13; // Reserved x38-x3F
66};
67
68extern struct ItIplParmsReal xItIplParmsReal;
69
70#endif /* _ISERIES_IPL_PARMS_H */
diff --git a/arch/ppc64/kernel/iSeries_irq.c b/arch/powerpc/platforms/iseries/irq.c
index 77376c1bd611..937ac99b9d33 100644
--- a/arch/ppc64/kernel/iSeries_irq.c
+++ b/arch/powerpc/platforms/iseries/irq.c
@@ -38,9 +38,10 @@
38#include <asm/ppcdebug.h> 38#include <asm/ppcdebug.h>
39#include <asm/iSeries/HvTypes.h> 39#include <asm/iSeries/HvTypes.h>
40#include <asm/iSeries/HvLpEvent.h> 40#include <asm/iSeries/HvLpEvent.h>
41#include <asm/iSeries/HvCallPci.h>
42#include <asm/iSeries/HvCallXm.h> 41#include <asm/iSeries/HvCallXm.h>
43#include <asm/iSeries/iSeries_irq.h> 42
43#include "irq.h"
44#include "call_pci.h"
44 45
45/* This maps virtual irq numbers to real irqs */ 46/* This maps virtual irq numbers to real irqs */
46unsigned int virt_irq_to_real_map[NR_IRQS]; 47unsigned int virt_irq_to_real_map[NR_IRQS];
@@ -351,3 +352,15 @@ int __init iSeries_allocate_IRQ(HvBusNumber busNumber,
351 irq_desc[virtirq].handler = &iSeries_IRQ_handler; 352 irq_desc[virtirq].handler = &iSeries_IRQ_handler;
352 return virtirq; 353 return virtirq;
353} 354}
355
356int virt_irq_create_mapping(unsigned int real_irq)
357{
358 BUG(); /* Don't call this on iSeries, yet */
359
360 return 0;
361}
362
363void virt_irq_init(void)
364{
365 return;
366}
diff --git a/arch/powerpc/platforms/iseries/irq.h b/arch/powerpc/platforms/iseries/irq.h
new file mode 100644
index 000000000000..5f643f16ecc0
--- /dev/null
+++ b/arch/powerpc/platforms/iseries/irq.h
@@ -0,0 +1,8 @@
1#ifndef _ISERIES_IRQ_H
2#define _ISERIES_IRQ_H
3
4extern void iSeries_init_IRQ(void);
5extern int iSeries_allocate_IRQ(HvBusNumber, HvSubBusNumber, HvAgentId);
6extern void iSeries_activate_IRQs(void);
7
8#endif /* _ISERIES_IRQ_H */
diff --git a/arch/powerpc/platforms/iseries/ksyms.c b/arch/powerpc/platforms/iseries/ksyms.c
new file mode 100644
index 000000000000..f271b3539721
--- /dev/null
+++ b/arch/powerpc/platforms/iseries/ksyms.c
@@ -0,0 +1,27 @@
1/*
2 * (C) 2001-2005 PPC 64 Team, IBM Corp
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 */
9#include <linux/module.h>
10
11#include <asm/hw_irq.h>
12#include <asm/iSeries/HvCallSc.h>
13
14EXPORT_SYMBOL(HvCall0);
15EXPORT_SYMBOL(HvCall1);
16EXPORT_SYMBOL(HvCall2);
17EXPORT_SYMBOL(HvCall3);
18EXPORT_SYMBOL(HvCall4);
19EXPORT_SYMBOL(HvCall5);
20EXPORT_SYMBOL(HvCall6);
21EXPORT_SYMBOL(HvCall7);
22
23#ifdef CONFIG_SMP
24EXPORT_SYMBOL(local_get_flags);
25EXPORT_SYMBOL(local_irq_disable);
26EXPORT_SYMBOL(local_irq_restore);
27#endif
diff --git a/arch/ppc64/kernel/LparData.c b/arch/powerpc/platforms/iseries/lpardata.c
index 0a9c23ca2f0c..ed2ffee6f731 100644
--- a/arch/ppc64/kernel/LparData.c
+++ b/arch/powerpc/platforms/iseries/lpardata.c
@@ -1,4 +1,4 @@
1/* 1/*
2 * Copyright 2001 Mike Corrigan, IBM Corp 2 * Copyright 2001 Mike Corrigan, IBM Corp
3 * 3 *
4 * This program is free software; you can redistribute it and/or 4 * This program is free software; you can redistribute it and/or
@@ -19,18 +19,18 @@
19#include <asm/lppaca.h> 19#include <asm/lppaca.h>
20#include <asm/iSeries/ItLpRegSave.h> 20#include <asm/iSeries/ItLpRegSave.h>
21#include <asm/paca.h> 21#include <asm/paca.h>
22#include <asm/iSeries/HvReleaseData.h>
23#include <asm/iSeries/LparMap.h> 22#include <asm/iSeries/LparMap.h>
24#include <asm/iSeries/ItVpdAreas.h>
25#include <asm/iSeries/ItIplParmsReal.h>
26#include <asm/iSeries/ItExtVpdPanel.h> 23#include <asm/iSeries/ItExtVpdPanel.h>
27#include <asm/iSeries/ItLpQueue.h> 24#include <asm/iSeries/ItLpQueue.h>
28#include <asm/iSeries/IoHriProcessorVpd.h>
29#include <asm/iSeries/ItSpCommArea.h>
30 25
26#include "vpd_areas.h"
27#include "spcomm_area.h"
28#include "ipl_parms.h"
29#include "processor_vpd.h"
30#include "release_data.h"
31 31
32/* The HvReleaseData is the root of the information shared between 32/* The HvReleaseData is the root of the information shared between
33 * the hypervisor and Linux. 33 * the hypervisor and Linux.
34 */ 34 */
35struct HvReleaseData hvReleaseData = { 35struct HvReleaseData hvReleaseData = {
36 .xDesc = 0xc8a5d9c4, /* "HvRD" ebcdic */ 36 .xDesc = 0xc8a5d9c4, /* "HvRD" ebcdic */
@@ -79,7 +79,7 @@ extern void trap_0e_iSeries(void);
79extern void performance_monitor_iSeries(void); 79extern void performance_monitor_iSeries(void);
80extern void data_access_slb_iSeries(void); 80extern void data_access_slb_iSeries(void);
81extern void instruction_access_slb_iSeries(void); 81extern void instruction_access_slb_iSeries(void);
82 82
83struct ItLpNaca itLpNaca = { 83struct ItLpNaca itLpNaca = {
84 .xDesc = 0xd397d581, /* "LpNa" ebcdic */ 84 .xDesc = 0xd397d581, /* "LpNa" ebcdic */
85 .xSize = 0x0400, /* size of ItLpNaca */ 85 .xSize = 0x0400, /* size of ItLpNaca */
@@ -106,7 +106,7 @@ struct ItLpNaca itLpNaca = {
106 .xLoadAreaChunks = 0, /* chunks for load area */ 106 .xLoadAreaChunks = 0, /* chunks for load area */
107 .xPaseSysCallCRMask = 0, /* PASE mask */ 107 .xPaseSysCallCRMask = 0, /* PASE mask */
108 .xSlicSegmentTablePtr = 0, /* seg table */ 108 .xSlicSegmentTablePtr = 0, /* seg table */
109 .xOldLpQueue = { 0 }, /* Old LP Queue */ 109 .xOldLpQueue = { 0 }, /* Old LP Queue */
110 .xInterruptHdlr = { 110 .xInterruptHdlr = {
111 (u64)system_reset_iSeries, /* 0x100 System Reset */ 111 (u64)system_reset_iSeries, /* 0x100 System Reset */
112 (u64)machine_check_iSeries, /* 0x200 Machine Check */ 112 (u64)machine_check_iSeries, /* 0x200 Machine Check */
@@ -134,7 +134,7 @@ struct ItLpNaca itLpNaca = {
134EXPORT_SYMBOL(itLpNaca); 134EXPORT_SYMBOL(itLpNaca);
135 135
136/* May be filled in by the hypervisor so cannot end up in the BSS */ 136/* May be filled in by the hypervisor so cannot end up in the BSS */
137struct ItIplParmsReal xItIplParmsReal __attribute__((__section__(".data"))); 137struct ItIplParmsReal xItIplParmsReal __attribute__((__section__(".data")));
138 138
139/* May be filled in by the hypervisor so cannot end up in the BSS */ 139/* May be filled in by the hypervisor so cannot end up in the BSS */
140struct ItExtVpdPanel xItExtVpdPanel __attribute__((__section__(".data"))); 140struct ItExtVpdPanel xItExtVpdPanel __attribute__((__section__(".data")));
@@ -151,7 +151,7 @@ struct IoHriProcessorVpd xIoHriProcessorVpd[maxPhysicalProcessors] = {
151 .xPVR = 0x3600 151 .xPVR = 0x3600
152 } 152 }
153}; 153};
154 154
155/* Space for Main Store Vpd 27,200 bytes */ 155/* Space for Main Store Vpd 27,200 bytes */
156/* May be filled in by the hypervisor so cannot end up in the BSS */ 156/* May be filled in by the hypervisor so cannot end up in the BSS */
157u64 xMsVpd[3400] __attribute__((__section__(".data"))); 157u64 xMsVpd[3400] __attribute__((__section__(".data")));
@@ -197,7 +197,7 @@ struct ItVpdAreas itVpdAreas = {
197 26992, /* 7 length of MS VPD */ 197 26992, /* 7 length of MS VPD */
198 0, /* 8 */ 198 0, /* 8 */
199 sizeof(struct ItLpNaca),/* 9 length of LP Naca */ 199 sizeof(struct ItLpNaca),/* 9 length of LP Naca */
200 0, /* 10 */ 200 0, /* 10 */
201 256, /* 11 length of Recovery Log Buf */ 201 256, /* 11 length of Recovery Log Buf */
202 sizeof(struct SpCommArea), /* 12 length of SP Comm Area */ 202 sizeof(struct SpCommArea), /* 12 length of SP Comm Area */
203 0,0,0, /* 13 - 15 */ 203 0,0,0, /* 13 - 15 */
@@ -207,7 +207,7 @@ struct ItVpdAreas itVpdAreas = {
207 0,0 /* 24 - 25 */ 207 0,0 /* 24 - 25 */
208 }, 208 },
209 .xSlicVpdAdrs = { /* VPD addresses */ 209 .xSlicVpdAdrs = { /* VPD addresses */
210 0,0,0, /* 0 - 2 */ 210 0,0,0, /* 0 - 2 */
211 &xItExtVpdPanel, /* 3 Extended VPD */ 211 &xItExtVpdPanel, /* 3 Extended VPD */
212 &paca[0], /* 4 first Paca */ 212 &paca[0], /* 4 first Paca */
213 0, /* 5 */ 213 0, /* 5 */
diff --git a/arch/ppc64/kernel/ItLpQueue.c b/arch/powerpc/platforms/iseries/lpevents.c
index 4231861288a3..54c7753dbe05 100644
--- a/arch/ppc64/kernel/ItLpQueue.c
+++ b/arch/powerpc/platforms/iseries/lpevents.c
@@ -1,5 +1,4 @@
1/* 1/*
2 * ItLpQueue.c
3 * Copyright (C) 2001 Mike Corrigan IBM Corporation 2 * Copyright (C) 2001 Mike Corrigan IBM Corporation
4 * 3 *
5 * This program is free software; you can redistribute it and/or modify 4 * This program is free software; you can redistribute it and/or modify
@@ -14,11 +13,14 @@
14#include <linux/bootmem.h> 13#include <linux/bootmem.h>
15#include <linux/seq_file.h> 14#include <linux/seq_file.h>
16#include <linux/proc_fs.h> 15#include <linux/proc_fs.h>
16#include <linux/module.h>
17
17#include <asm/system.h> 18#include <asm/system.h>
18#include <asm/paca.h> 19#include <asm/paca.h>
19#include <asm/iSeries/ItLpQueue.h> 20#include <asm/iSeries/ItLpQueue.h>
20#include <asm/iSeries/HvLpEvent.h> 21#include <asm/iSeries/HvLpEvent.h>
21#include <asm/iSeries/HvCallEvent.h> 22#include <asm/iSeries/HvCallEvent.h>
23#include <asm/iSeries/ItLpNaca.h>
22 24
23/* 25/*
24 * The LpQueue is used to pass event data from the hypervisor to 26 * The LpQueue is used to pass event data from the hypervisor to
@@ -43,7 +45,8 @@ static char *event_types[HvLpEvent_Type_NumTypes] = {
43}; 45};
44 46
45/* Array of LpEvent handler functions */ 47/* Array of LpEvent handler functions */
46extern LpEventHandler lpEventHandler[HvLpEvent_Type_NumTypes]; 48static LpEventHandler lpEventHandler[HvLpEvent_Type_NumTypes];
49static unsigned lpEventHandlerPaths[HvLpEvent_Type_NumTypes];
47 50
48static struct HvLpEvent * get_next_hvlpevent(void) 51static struct HvLpEvent * get_next_hvlpevent(void)
49{ 52{
@@ -181,11 +184,7 @@ void setup_hvlpevent_queue(void)
181{ 184{
182 void *eventStack; 185 void *eventStack;
183 186
184 /* 187 /* Allocate a page for the Event Stack. */
185 * Allocate a page for the Event Stack. The Hypervisor needs the
186 * absolute real address, so we subtract out the KERNELBASE and add
187 * in the absolute real address of the kernel load area.
188 */
189 eventStack = alloc_bootmem_pages(LpEventStackSize); 188 eventStack = alloc_bootmem_pages(LpEventStackSize);
190 memset(eventStack, 0, LpEventStackSize); 189 memset(eventStack, 0, LpEventStackSize);
191 190
@@ -199,6 +198,70 @@ void setup_hvlpevent_queue(void)
199 hvlpevent_queue.xIndex = 0; 198 hvlpevent_queue.xIndex = 0;
200} 199}
201 200
201/* Register a handler for an LpEvent type */
202int HvLpEvent_registerHandler(HvLpEvent_Type eventType, LpEventHandler handler)
203{
204 if (eventType < HvLpEvent_Type_NumTypes) {
205 lpEventHandler[eventType] = handler;
206 return 0;
207 }
208 return 1;
209}
210EXPORT_SYMBOL(HvLpEvent_registerHandler);
211
212int HvLpEvent_unregisterHandler(HvLpEvent_Type eventType)
213{
214 might_sleep();
215
216 if (eventType < HvLpEvent_Type_NumTypes) {
217 if (!lpEventHandlerPaths[eventType]) {
218 lpEventHandler[eventType] = NULL;
219 /*
220 * We now sleep until all other CPUs have scheduled.
221 * This ensures that the deletion is seen by all
222 * other CPUs, and that the deleted handler isn't
223 * still running on another CPU when we return.
224 */
225 synchronize_rcu();
226 return 0;
227 }
228 }
229 return 1;
230}
231EXPORT_SYMBOL(HvLpEvent_unregisterHandler);
232
233/*
234 * lpIndex is the partition index of the target partition.
235 * needed only for VirtualIo, VirtualLan and SessionMgr. Zero
236 * indicates to use our partition index - for the other types.
237 */
238int HvLpEvent_openPath(HvLpEvent_Type eventType, HvLpIndex lpIndex)
239{
240 if ((eventType < HvLpEvent_Type_NumTypes) &&
241 lpEventHandler[eventType]) {
242 if (lpIndex == 0)
243 lpIndex = itLpNaca.xLpIndex;
244 HvCallEvent_openLpEventPath(lpIndex, eventType);
245 ++lpEventHandlerPaths[eventType];
246 return 0;
247 }
248 return 1;
249}
250
251int HvLpEvent_closePath(HvLpEvent_Type eventType, HvLpIndex lpIndex)
252{
253 if ((eventType < HvLpEvent_Type_NumTypes) &&
254 lpEventHandler[eventType] &&
255 lpEventHandlerPaths[eventType]) {
256 if (lpIndex == 0)
257 lpIndex = itLpNaca.xLpIndex;
258 HvCallEvent_closeLpEventPath(lpIndex, eventType);
259 --lpEventHandlerPaths[eventType];
260 return 0;
261 }
262 return 1;
263}
264
202static int proc_lpevents_show(struct seq_file *m, void *v) 265static int proc_lpevents_show(struct seq_file *m, void *v)
203{ 266{
204 int cpu, i; 267 int cpu, i;
diff --git a/arch/powerpc/platforms/iseries/main_store.h b/arch/powerpc/platforms/iseries/main_store.h
new file mode 100644
index 000000000000..74f6889f834f
--- /dev/null
+++ b/arch/powerpc/platforms/iseries/main_store.h
@@ -0,0 +1,165 @@
1/*
2 * Copyright (C) 2001 Mike Corrigan IBM Corporation
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17 */
18
19#ifndef _ISERIES_MAIN_STORE_H
20#define _ISERIES_MAIN_STORE_H
21
22/* Main Store Vpd for Condor,iStar,sStar */
23struct IoHriMainStoreSegment4 {
24 u8 msArea0Exists:1;
25 u8 msArea1Exists:1;
26 u8 msArea2Exists:1;
27 u8 msArea3Exists:1;
28 u8 reserved1:4;
29 u8 reserved2;
30
31 u8 msArea0Functional:1;
32 u8 msArea1Functional:1;
33 u8 msArea2Functional:1;
34 u8 msArea3Functional:1;
35 u8 reserved3:4;
36 u8 reserved4;
37
38 u32 totalMainStore;
39
40 u64 msArea0Ptr;
41 u64 msArea1Ptr;
42 u64 msArea2Ptr;
43 u64 msArea3Ptr;
44
45 u32 cardProductionLevel;
46
47 u32 msAdrHole;
48
49 u8 msArea0HasRiserVpd:1;
50 u8 msArea1HasRiserVpd:1;
51 u8 msArea2HasRiserVpd:1;
52 u8 msArea3HasRiserVpd:1;
53 u8 reserved5:4;
54 u8 reserved6;
55 u16 reserved7;
56
57 u8 reserved8[28];
58
59 u64 nonInterleavedBlocksStartAdr;
60 u64 nonInterleavedBlocksEndAdr;
61};
62
63/* Main Store VPD for Power4 */
64struct IoHriMainStoreChipInfo1 {
65 u32 chipMfgID __attribute((packed));
66 char chipECLevel[4] __attribute((packed));
67};
68
69struct IoHriMainStoreVpdIdData {
70 char typeNumber[4];
71 char modelNumber[4];
72 char partNumber[12];
73 char serialNumber[12];
74};
75
76struct IoHriMainStoreVpdFruData {
77 char fruLabel[8] __attribute((packed));
78 u8 numberOfSlots __attribute((packed));
79 u8 pluggingType __attribute((packed));
80 u16 slotMapIndex __attribute((packed));
81};
82
83struct IoHriMainStoreAdrRangeBlock {
84 void *blockStart __attribute((packed));
85 void *blockEnd __attribute((packed));
86 u32 blockProcChipId __attribute((packed));
87};
88
89#define MaxAreaAdrRangeBlocks 4
90
91struct IoHriMainStoreArea4 {
92 u32 msVpdFormat __attribute((packed));
93 u8 containedVpdType __attribute((packed));
94 u8 reserved1 __attribute((packed));
95 u16 reserved2 __attribute((packed));
96
97 u64 msExists __attribute((packed));
98 u64 msFunctional __attribute((packed));
99
100 u32 memorySize __attribute((packed));
101 u32 procNodeId __attribute((packed));
102
103 u32 numAdrRangeBlocks __attribute((packed));
104 struct IoHriMainStoreAdrRangeBlock xAdrRangeBlock[MaxAreaAdrRangeBlocks] __attribute((packed));
105
106 struct IoHriMainStoreChipInfo1 chipInfo0 __attribute((packed));
107 struct IoHriMainStoreChipInfo1 chipInfo1 __attribute((packed));
108 struct IoHriMainStoreChipInfo1 chipInfo2 __attribute((packed));
109 struct IoHriMainStoreChipInfo1 chipInfo3 __attribute((packed));
110 struct IoHriMainStoreChipInfo1 chipInfo4 __attribute((packed));
111 struct IoHriMainStoreChipInfo1 chipInfo5 __attribute((packed));
112 struct IoHriMainStoreChipInfo1 chipInfo6 __attribute((packed));
113 struct IoHriMainStoreChipInfo1 chipInfo7 __attribute((packed));
114
115 void *msRamAreaArray __attribute((packed));
116 u32 msRamAreaArrayNumEntries __attribute((packed));
117 u32 msRamAreaArrayEntrySize __attribute((packed));
118
119 u32 numaDimmExists __attribute((packed));
120 u32 numaDimmFunctional __attribute((packed));
121 void *numaDimmArray __attribute((packed));
122 u32 numaDimmArrayNumEntries __attribute((packed));
123 u32 numaDimmArrayEntrySize __attribute((packed));
124
125 struct IoHriMainStoreVpdIdData idData __attribute((packed));
126
127 u64 powerData __attribute((packed));
128 u64 cardAssemblyPartNum __attribute((packed));
129 u64 chipSerialNum __attribute((packed));
130
131 u64 reserved3 __attribute((packed));
132 char reserved4[16] __attribute((packed));
133
134 struct IoHriMainStoreVpdFruData fruData __attribute((packed));
135
136 u8 vpdPortNum __attribute((packed));
137 u8 reserved5 __attribute((packed));
138 u8 frameId __attribute((packed));
139 u8 rackUnit __attribute((packed));
140 char asciiKeywordVpd[256] __attribute((packed));
141 u32 reserved6 __attribute((packed));
142};
143
144
145struct IoHriMainStoreSegment5 {
146 u16 reserved1;
147 u8 reserved2;
148 u8 msVpdFormat;
149
150 u32 totalMainStore;
151 u64 maxConfiguredMsAdr;
152
153 struct IoHriMainStoreArea4 *msAreaArray;
154 u32 msAreaArrayNumEntries;
155 u32 msAreaArrayEntrySize;
156
157 u32 msAreaExists;
158 u32 msAreaFunctional;
159
160 u64 reserved3;
161};
162
163extern u64 xMsVpd[];
164
165#endif /* _ISERIES_MAIN_STORE_H */
diff --git a/arch/ppc64/kernel/mf.c b/arch/powerpc/platforms/iseries/mf.c
index ef4a338ebd01..e5de31aa0015 100644
--- a/arch/ppc64/kernel/mf.c
+++ b/arch/powerpc/platforms/iseries/mf.c
@@ -1,29 +1,28 @@
1/* 1/*
2 * mf.c 2 * Copyright (C) 2001 Troy D. Armstrong IBM Corporation
3 * Copyright (C) 2001 Troy D. Armstrong IBM Corporation 3 * Copyright (C) 2004-2005 Stephen Rothwell IBM Corporation
4 * Copyright (C) 2004-2005 Stephen Rothwell IBM Corporation 4 *
5 * 5 * This modules exists as an interface between a Linux secondary partition
6 * This modules exists as an interface between a Linux secondary partition 6 * running on an iSeries and the primary partition's Virtual Service
7 * running on an iSeries and the primary partition's Virtual Service 7 * Processor (VSP) object. The VSP has final authority over powering on/off
8 * Processor (VSP) object. The VSP has final authority over powering on/off 8 * all partitions in the iSeries. It also provides miscellaneous low-level
9 * all partitions in the iSeries. It also provides miscellaneous low-level 9 * machine facility type operations.
10 * machine facility type operations. 10 *
11 * 11 *
12 * 12 * This program is free software; you can redistribute it and/or modify
13 * This program is free software; you can redistribute it and/or modify 13 * it under the terms of the GNU General Public License as published by
14 * it under the terms of the GNU General Public License as published by 14 * the Free Software Foundation; either version 2 of the License, or
15 * the Free Software Foundation; either version 2 of the License, or 15 * (at your option) any later version.
16 * (at your option) any later version. 16 *
17 * 17 * This program is distributed in the hope that it will be useful,
18 * This program is distributed in the hope that it will be useful, 18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * but WITHOUT ANY WARRANTY; without even the implied warranty of 19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 20 * GNU General Public License for more details.
21 * GNU General Public License for more details. 21 *
22 * 22 * You should have received a copy of the GNU General Public License
23 * You should have received a copy of the GNU General Public License 23 * along with this program; if not, write to the Free Software
24 * along with this program; if not, write to the Free Software 24 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
25 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 25 */
26 */
27 26
28#include <linux/types.h> 27#include <linux/types.h>
29#include <linux/errno.h> 28#include <linux/errno.h>
@@ -33,15 +32,21 @@
33#include <linux/delay.h> 32#include <linux/delay.h>
34#include <linux/dma-mapping.h> 33#include <linux/dma-mapping.h>
35#include <linux/bcd.h> 34#include <linux/bcd.h>
35#include <linux/rtc.h>
36 36
37#include <asm/time.h> 37#include <asm/time.h>
38#include <asm/uaccess.h> 38#include <asm/uaccess.h>
39#include <asm/paca.h> 39#include <asm/paca.h>
40#include <asm/abs_addr.h>
40#include <asm/iSeries/vio.h> 41#include <asm/iSeries/vio.h>
41#include <asm/iSeries/mf.h> 42#include <asm/iSeries/mf.h>
42#include <asm/iSeries/HvLpConfig.h> 43#include <asm/iSeries/HvLpConfig.h>
43#include <asm/iSeries/ItLpQueue.h> 44#include <asm/iSeries/ItLpQueue.h>
44 45
46#include "setup.h"
47
48extern int piranha_simulator;
49
45/* 50/*
46 * This is the structure layout for the Machine Facilites LPAR event 51 * This is the structure layout for the Machine Facilites LPAR event
47 * flows. 52 * flows.
@@ -1061,10 +1066,10 @@ static void mf_getSrcHistory(char *buffer, int size)
1061 ev->event.data.vsp_cmd.lp_index = HvLpConfig_getLpIndex(); 1066 ev->event.data.vsp_cmd.lp_index = HvLpConfig_getLpIndex();
1062 ev->event.data.vsp_cmd.result_code = 0xFF; 1067 ev->event.data.vsp_cmd.result_code = 0xFF;
1063 ev->event.data.vsp_cmd.reserved = 0; 1068 ev->event.data.vsp_cmd.reserved = 0;
1064 ev->event.data.vsp_cmd.sub_data.page[0] = ISERIES_HV_ADDR(pages[0]); 1069 ev->event.data.vsp_cmd.sub_data.page[0] = iseries_hv_addr(pages[0]);
1065 ev->event.data.vsp_cmd.sub_data.page[1] = ISERIES_HV_ADDR(pages[1]); 1070 ev->event.data.vsp_cmd.sub_data.page[1] = iseries_hv_addr(pages[1]);
1066 ev->event.data.vsp_cmd.sub_data.page[2] = ISERIES_HV_ADDR(pages[2]); 1071 ev->event.data.vsp_cmd.sub_data.page[2] = iseries_hv_addr(pages[2]);
1067 ev->event.data.vsp_cmd.sub_data.page[3] = ISERIES_HV_ADDR(pages[3]); 1072 ev->event.data.vsp_cmd.sub_data.page[3] = iseries_hv_addr(pages[3]);
1068 mb(); 1073 mb();
1069 if (signal_event(ev) != 0) 1074 if (signal_event(ev) != 0)
1070 return; 1075 return;
@@ -1279,3 +1284,38 @@ static int __init mf_proc_init(void)
1279__initcall(mf_proc_init); 1284__initcall(mf_proc_init);
1280 1285
1281#endif /* CONFIG_PROC_FS */ 1286#endif /* CONFIG_PROC_FS */
1287
1288/*
1289 * Get the RTC from the virtual service processor
1290 * This requires flowing LpEvents to the primary partition
1291 */
1292void iSeries_get_rtc_time(struct rtc_time *rtc_tm)
1293{
1294 if (piranha_simulator)
1295 return;
1296
1297 mf_get_rtc(rtc_tm);
1298 rtc_tm->tm_mon--;
1299}
1300
1301/*
1302 * Set the RTC in the virtual service processor
1303 * This requires flowing LpEvents to the primary partition
1304 */
1305int iSeries_set_rtc_time(struct rtc_time *tm)
1306{
1307 mf_set_rtc(tm);
1308 return 0;
1309}
1310
1311unsigned long iSeries_get_boot_time(void)
1312{
1313 struct rtc_time tm;
1314
1315 if (piranha_simulator)
1316 return 0;
1317
1318 mf_get_boot_rtc(&tm);
1319 return mktime(tm.tm_year + 1900, tm.tm_mon, tm.tm_mday,
1320 tm.tm_hour, tm.tm_min, tm.tm_sec);
1321}
diff --git a/arch/powerpc/platforms/iseries/misc.S b/arch/powerpc/platforms/iseries/misc.S
new file mode 100644
index 000000000000..09f14522e176
--- /dev/null
+++ b/arch/powerpc/platforms/iseries/misc.S
@@ -0,0 +1,55 @@
1/*
2 * This file contains miscellaneous low-level functions.
3 * Copyright (C) 1995-2005 IBM Corp
4 *
5 * Largely rewritten by Cort Dougan (cort@cs.nmt.edu)
6 * and Paul Mackerras.
7 * Adapted for iSeries by Mike Corrigan (mikejc@us.ibm.com)
8 * PPC64 updates by Dave Engebretsen (engebret@us.ibm.com)
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version
13 * 2 of the License, or (at your option) any later version.
14 */
15
16#include <asm/processor.h>
17#include <asm/asm-offsets.h>
18
19 .text
20
21/* unsigned long local_save_flags(void) */
22_GLOBAL(local_get_flags)
23 lbz r3,PACAPROCENABLED(r13)
24 blr
25
26/* unsigned long local_irq_disable(void) */
27_GLOBAL(local_irq_disable)
28 lbz r3,PACAPROCENABLED(r13)
29 li r4,0
30 stb r4,PACAPROCENABLED(r13)
31 blr /* Done */
32
33/* void local_irq_restore(unsigned long flags) */
34_GLOBAL(local_irq_restore)
35 lbz r5,PACAPROCENABLED(r13)
36 /* Check if things are setup the way we want _already_. */
37 cmpw 0,r3,r5
38 beqlr
39 /* are we enabling interrupts? */
40 cmpdi 0,r3,0
41 stb r3,PACAPROCENABLED(r13)
42 beqlr
43 /* Check pending interrupts */
44 /* A decrementer, IPI or PMC interrupt may have occurred
45 * while we were in the hypervisor (which enables) */
46 ld r4,PACALPPACA+LPPACAANYINT(r13)
47 cmpdi r4,0
48 beqlr
49
50 /*
51 * Handle pending interrupts in interrupt context
52 */
53 li r0,0x5555
54 sc
55 blr
diff --git a/arch/ppc64/kernel/iSeries_pci.c b/arch/powerpc/platforms/iseries/pci.c
index fbc273c32bcc..959e59fd9c11 100644
--- a/arch/ppc64/kernel/iSeries_pci.c
+++ b/arch/powerpc/platforms/iseries/pci.c
@@ -1,28 +1,26 @@
1/* 1/*
2 * iSeries_pci.c
3 *
4 * Copyright (C) 2001 Allan Trautman, IBM Corporation 2 * Copyright (C) 2001 Allan Trautman, IBM Corporation
5 * 3 *
6 * iSeries specific routines for PCI. 4 * iSeries specific routines for PCI.
7 * 5 *
8 * Based on code from pci.c and iSeries_pci.c 32bit 6 * Based on code from pci.c and iSeries_pci.c 32bit
9 * 7 *
10 * This program is free software; you can redistribute it and/or modify 8 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by 9 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or 10 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version. 11 * (at your option) any later version.
14 * 12 *
15 * This program is distributed in the hope that it will be useful, 13 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of 14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details. 16 * GNU General Public License for more details.
19 * 17 *
20 * You should have received a copy of the GNU General Public License 18 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software 19 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 */ 21 */
24#include <linux/kernel.h> 22#include <linux/kernel.h>
25#include <linux/list.h> 23#include <linux/list.h>
26#include <linux/string.h> 24#include <linux/string.h>
27#include <linux/init.h> 25#include <linux/init.h>
28#include <linux/module.h> 26#include <linux/module.h>
@@ -36,21 +34,23 @@
36#include <asm/pci-bridge.h> 34#include <asm/pci-bridge.h>
37#include <asm/ppcdebug.h> 35#include <asm/ppcdebug.h>
38#include <asm/iommu.h> 36#include <asm/iommu.h>
37#include <asm/abs_addr.h>
39 38
40#include <asm/iSeries/HvCallPci.h>
41#include <asm/iSeries/HvCallXm.h> 39#include <asm/iSeries/HvCallXm.h>
42#include <asm/iSeries/iSeries_irq.h>
43#include <asm/iSeries/iSeries_pci.h>
44#include <asm/iSeries/mf.h> 40#include <asm/iSeries/mf.h>
45 41
42#include <asm/ppc-pci.h>
43
44#include "irq.h"
46#include "pci.h" 45#include "pci.h"
46#include "call_pci.h"
47 47
48extern unsigned long io_page_mask; 48extern unsigned long io_page_mask;
49 49
50/* 50/*
51 * Forward declares of prototypes. 51 * Forward declares of prototypes.
52 */ 52 */
53static struct iSeries_Device_Node *find_Device_Node(int bus, int devfn); 53static struct device_node *find_Device_Node(int bus, int devfn);
54static void scan_PHB_slots(struct pci_controller *Phb); 54static void scan_PHB_slots(struct pci_controller *Phb);
55static void scan_EADS_bridge(HvBusNumber Bus, HvSubBusNumber SubBus, int IdSel); 55static void scan_EADS_bridge(HvBusNumber Bus, HvSubBusNumber SubBus, int IdSel);
56static int scan_bridge_slot(HvBusNumber Bus, struct HvCallPci_BridgeInfo *Info); 56static int scan_bridge_slot(HvBusNumber Bus, struct HvCallPci_BridgeInfo *Info);
@@ -68,7 +68,7 @@ static long Pci_Cfg_Write_Count;
68#endif 68#endif
69static long Pci_Error_Count; 69static long Pci_Error_Count;
70 70
71static int Pci_Retry_Max = 3; /* Only retry 3 times */ 71static int Pci_Retry_Max = 3; /* Only retry 3 times */
72static int Pci_Error_Flag = 1; /* Set Retry Error on. */ 72static int Pci_Error_Flag = 1; /* Set Retry Error on. */
73 73
74static struct pci_ops iSeries_pci_ops; 74static struct pci_ops iSeries_pci_ops;
@@ -87,7 +87,7 @@ static long current_iomm_table_entry;
87/* 87/*
88 * Lookup Tables. 88 * Lookup Tables.
89 */ 89 */
90static struct iSeries_Device_Node **iomm_table; 90static struct device_node **iomm_table;
91static u8 *iobar_table; 91static u8 *iobar_table;
92 92
93/* 93/*
@@ -179,7 +179,7 @@ static void allocate_device_bars(struct pci_dev *dev)
179 for (bar_num = 0; bar_num <= PCI_ROM_RESOURCE; ++bar_num) { 179 for (bar_num = 0; bar_num <= PCI_ROM_RESOURCE; ++bar_num) {
180 bar_res = &dev->resource[bar_num]; 180 bar_res = &dev->resource[bar_num];
181 iomm_table_allocate_entry(dev, bar_num); 181 iomm_table_allocate_entry(dev, bar_num);
182 } 182 }
183} 183}
184 184
185/* 185/*
@@ -201,29 +201,31 @@ static void pci_Log_Error(char *Error_Text, int Bus, int SubBus,
201/* 201/*
202 * build_device_node(u16 Bus, int SubBus, u8 DevFn) 202 * build_device_node(u16 Bus, int SubBus, u8 DevFn)
203 */ 203 */
204static struct iSeries_Device_Node *build_device_node(HvBusNumber Bus, 204static struct device_node *build_device_node(HvBusNumber Bus,
205 HvSubBusNumber SubBus, int AgentId, int Function) 205 HvSubBusNumber SubBus, int AgentId, int Function)
206{ 206{
207 struct iSeries_Device_Node *node; 207 struct device_node *node;
208 struct pci_dn *pdn;
208 209
209 PPCDBG(PPCDBG_BUSWALK, 210 PPCDBG(PPCDBG_BUSWALK,
210 "-build_device_node 0x%02X.%02X.%02X Function: %02X\n", 211 "-build_device_node 0x%02X.%02X.%02X Function: %02X\n",
211 Bus, SubBus, AgentId, Function); 212 Bus, SubBus, AgentId, Function);
212 213
213 node = kmalloc(sizeof(struct iSeries_Device_Node), GFP_KERNEL); 214 node = kmalloc(sizeof(struct device_node), GFP_KERNEL);
214 if (node == NULL) 215 if (node == NULL)
215 return NULL; 216 return NULL;
216 217 memset(node, 0, sizeof(struct device_node));
217 memset(node, 0, sizeof(struct iSeries_Device_Node)); 218 pdn = kzalloc(sizeof(*pdn), GFP_KERNEL);
218 list_add_tail(&node->Device_List, &iSeries_Global_Device_List); 219 if (pdn == NULL) {
219#if 0 220 kfree(node);
220 node->DsaAddr = ((u64)Bus << 48) + ((u64)SubBus << 40) + ((u64)0x10 << 32); 221 return NULL;
221#endif 222 }
222 node->DsaAddr.DsaAddr = 0; 223 node->data = pdn;
223 node->DsaAddr.Dsa.busNumber = Bus; 224 pdn->node = node;
224 node->DsaAddr.Dsa.subBusNumber = SubBus; 225 list_add_tail(&pdn->Device_List, &iSeries_Global_Device_List);
225 node->DsaAddr.Dsa.deviceId = 0x10; 226 pdn->busno = Bus;
226 node->DevFn = PCI_DEVFN(ISERIES_ENCODE_DEVICE(AgentId), Function); 227 pdn->bussubno = SubBus;
228 pdn->devfn = PCI_DEVFN(ISERIES_ENCODE_DEVICE(AgentId), Function);
227 return node; 229 return node;
228} 230}
229 231
@@ -278,28 +280,28 @@ unsigned long __init find_and_init_phbs(void)
278 280
279/* 281/*
280 * iSeries_pcibios_init 282 * iSeries_pcibios_init
281 * 283 *
282 * Chance to initialize and structures or variable before PCI Bus walk. 284 * Chance to initialize and structures or variable before PCI Bus walk.
283 */ 285 */
284void iSeries_pcibios_init(void) 286void iSeries_pcibios_init(void)
285{ 287{
286 PPCDBG(PPCDBG_BUSWALK, "iSeries_pcibios_init Entry.\n"); 288 PPCDBG(PPCDBG_BUSWALK, "iSeries_pcibios_init Entry.\n");
287 iomm_table_initialize(); 289 iomm_table_initialize();
288 find_and_init_phbs(); 290 find_and_init_phbs();
289 io_page_mask = -1; 291 io_page_mask = -1;
290 PPCDBG(PPCDBG_BUSWALK, "iSeries_pcibios_init Exit.\n"); 292 PPCDBG(PPCDBG_BUSWALK, "iSeries_pcibios_init Exit.\n");
291} 293}
292 294
293/* 295/*
294 * iSeries_pci_final_fixup(void) 296 * iSeries_pci_final_fixup(void)
295 */ 297 */
296void __init iSeries_pci_final_fixup(void) 298void __init iSeries_pci_final_fixup(void)
297{ 299{
298 struct pci_dev *pdev = NULL; 300 struct pci_dev *pdev = NULL;
299 struct iSeries_Device_Node *node; 301 struct device_node *node;
300 int DeviceCount = 0; 302 int DeviceCount = 0;
301 303
302 PPCDBG(PPCDBG_BUSWALK, "iSeries_pcibios_fixup Entry.\n"); 304 PPCDBG(PPCDBG_BUSWALK, "iSeries_pcibios_fixup Entry.\n");
303 305
304 /* Fix up at the device node and pci_dev relationship */ 306 /* Fix up at the device node and pci_dev relationship */
305 mf_display_src(0xC9000100); 307 mf_display_src(0xC9000100);
@@ -313,7 +315,7 @@ void __init iSeries_pci_final_fixup(void)
313 if (node != NULL) { 315 if (node != NULL) {
314 ++DeviceCount; 316 ++DeviceCount;
315 pdev->sysdata = (void *)node; 317 pdev->sysdata = (void *)node;
316 node->PciDev = pdev; 318 PCI_DN(node)->pcidev = pdev;
317 PPCDBG(PPCDBG_BUSWALK, 319 PPCDBG(PPCDBG_BUSWALK,
318 "pdev 0x%p <==> DevNode 0x%p\n", 320 "pdev 0x%p <==> DevNode 0x%p\n",
319 pdev, node); 321 pdev, node);
@@ -323,7 +325,7 @@ void __init iSeries_pci_final_fixup(void)
323 } else 325 } else
324 printk("PCI: Device Tree not found for 0x%016lX\n", 326 printk("PCI: Device Tree not found for 0x%016lX\n",
325 (unsigned long)pdev); 327 (unsigned long)pdev);
326 pdev->irq = node->Irq; 328 pdev->irq = PCI_DN(node)->Irq;
327 } 329 }
328 iSeries_activate_IRQs(); 330 iSeries_activate_IRQs();
329 mf_display_src(0xC9000200); 331 mf_display_src(0xC9000200);
@@ -332,24 +334,24 @@ void __init iSeries_pci_final_fixup(void)
332void pcibios_fixup_bus(struct pci_bus *PciBus) 334void pcibios_fixup_bus(struct pci_bus *PciBus)
333{ 335{
334 PPCDBG(PPCDBG_BUSWALK, "iSeries_pcibios_fixup_bus(0x%04X) Entry.\n", 336 PPCDBG(PPCDBG_BUSWALK, "iSeries_pcibios_fixup_bus(0x%04X) Entry.\n",
335 PciBus->number); 337 PciBus->number);
336} 338}
337 339
338void pcibios_fixup_resources(struct pci_dev *pdev) 340void pcibios_fixup_resources(struct pci_dev *pdev)
339{ 341{
340 PPCDBG(PPCDBG_BUSWALK, "fixup_resources pdev %p\n", pdev); 342 PPCDBG(PPCDBG_BUSWALK, "fixup_resources pdev %p\n", pdev);
341} 343}
342 344
343/* 345/*
344 * Loop through each node function to find usable EADs bridges. 346 * Loop through each node function to find usable EADs bridges.
345 */ 347 */
346static void scan_PHB_slots(struct pci_controller *Phb) 348static void scan_PHB_slots(struct pci_controller *Phb)
347{ 349{
348 struct HvCallPci_DeviceInfo *DevInfo; 350 struct HvCallPci_DeviceInfo *DevInfo;
349 HvBusNumber bus = Phb->local_number; /* System Bus */ 351 HvBusNumber bus = Phb->local_number; /* System Bus */
350 const HvSubBusNumber SubBus = 0; /* EADs is always 0. */ 352 const HvSubBusNumber SubBus = 0; /* EADs is always 0. */
351 int HvRc = 0; 353 int HvRc = 0;
352 int IdSel; 354 int IdSel;
353 const int MaxAgents = 8; 355 const int MaxAgents = 8;
354 356
355 DevInfo = (struct HvCallPci_DeviceInfo*) 357 DevInfo = (struct HvCallPci_DeviceInfo*)
@@ -358,11 +360,11 @@ static void scan_PHB_slots(struct pci_controller *Phb)
358 return; 360 return;
359 361
360 /* 362 /*
361 * Probe for EADs Bridges 363 * Probe for EADs Bridges
362 */ 364 */
363 for (IdSel = 1; IdSel < MaxAgents; ++IdSel) { 365 for (IdSel = 1; IdSel < MaxAgents; ++IdSel) {
364 HvRc = HvCallPci_getDeviceInfo(bus, SubBus, IdSel, 366 HvRc = HvCallPci_getDeviceInfo(bus, SubBus, IdSel,
365 ISERIES_HV_ADDR(DevInfo), 367 iseries_hv_addr(DevInfo),
366 sizeof(struct HvCallPci_DeviceInfo)); 368 sizeof(struct HvCallPci_DeviceInfo));
367 if (HvRc == 0) { 369 if (HvRc == 0) {
368 if (DevInfo->deviceType == HvCallPci_NodeDevice) 370 if (DevInfo->deviceType == HvCallPci_NodeDevice)
@@ -393,19 +395,19 @@ static void scan_EADS_bridge(HvBusNumber bus, HvSubBusNumber SubBus,
393 395
394 /* Note: hvSubBus and irq is always be 0 at this level! */ 396 /* Note: hvSubBus and irq is always be 0 at this level! */
395 for (Function = 0; Function < 8; ++Function) { 397 for (Function = 0; Function < 8; ++Function) {
396 AgentId = ISERIES_PCI_AGENTID(IdSel, Function); 398 AgentId = ISERIES_PCI_AGENTID(IdSel, Function);
397 HvRc = HvCallXm_connectBusUnit(bus, SubBus, AgentId, 0); 399 HvRc = HvCallXm_connectBusUnit(bus, SubBus, AgentId, 0);
398 if (HvRc == 0) { 400 if (HvRc == 0) {
399 printk("found device at bus %d idsel %d func %d (AgentId %x)\n", 401 printk("found device at bus %d idsel %d func %d (AgentId %x)\n",
400 bus, IdSel, Function, AgentId); 402 bus, IdSel, Function, AgentId);
401 /* Connect EADs: 0x18.00.12 = 0x00 */ 403 /* Connect EADs: 0x18.00.12 = 0x00 */
402 PPCDBG(PPCDBG_BUSWALK, 404 PPCDBG(PPCDBG_BUSWALK,
403 "PCI:Connect EADs: 0x%02X.%02X.%02X\n", 405 "PCI:Connect EADs: 0x%02X.%02X.%02X\n",
404 bus, SubBus, AgentId); 406 bus, SubBus, AgentId);
405 HvRc = HvCallPci_getBusUnitInfo(bus, SubBus, AgentId, 407 HvRc = HvCallPci_getBusUnitInfo(bus, SubBus, AgentId,
406 ISERIES_HV_ADDR(BridgeInfo), 408 iseries_hv_addr(BridgeInfo),
407 sizeof(struct HvCallPci_BridgeInfo)); 409 sizeof(struct HvCallPci_BridgeInfo));
408 if (HvRc == 0) { 410 if (HvRc == 0) {
409 printk("bridge info: type %x subbus %x maxAgents %x maxsubbus %x logslot %x\n", 411 printk("bridge info: type %x subbus %x maxAgents %x maxsubbus %x logslot %x\n",
410 BridgeInfo->busUnitInfo.deviceType, 412 BridgeInfo->busUnitInfo.deviceType,
411 BridgeInfo->subBusNumber, 413 BridgeInfo->subBusNumber,
@@ -428,7 +430,7 @@ static void scan_EADS_bridge(HvBusNumber bus, HvSubBusNumber SubBus,
428 printk("PCI: Invalid Bridge Configuration(0x%02X)", 430 printk("PCI: Invalid Bridge Configuration(0x%02X)",
429 BridgeInfo->busUnitInfo.deviceType); 431 BridgeInfo->busUnitInfo.deviceType);
430 } 432 }
431 } else if (HvRc != 0x000B) 433 } else if (HvRc != 0x000B)
432 pci_Log_Error("EADs Connect", 434 pci_Log_Error("EADs Connect",
433 bus, SubBus, AgentId, HvRc); 435 bus, SubBus, AgentId, HvRc);
434 } 436 }
@@ -441,7 +443,7 @@ static void scan_EADS_bridge(HvBusNumber bus, HvSubBusNumber SubBus,
441static int scan_bridge_slot(HvBusNumber Bus, 443static int scan_bridge_slot(HvBusNumber Bus,
442 struct HvCallPci_BridgeInfo *BridgeInfo) 444 struct HvCallPci_BridgeInfo *BridgeInfo)
443{ 445{
444 struct iSeries_Device_Node *node; 446 struct device_node *node;
445 HvSubBusNumber SubBus = BridgeInfo->subBusNumber; 447 HvSubBusNumber SubBus = BridgeInfo->subBusNumber;
446 u16 VendorId = 0; 448 u16 VendorId = 0;
447 int HvRc = 0; 449 int HvRc = 0;
@@ -451,16 +453,16 @@ static int scan_bridge_slot(HvBusNumber Bus,
451 HvAgentId EADsIdSel = ISERIES_PCI_AGENTID(IdSel, Function); 453 HvAgentId EADsIdSel = ISERIES_PCI_AGENTID(IdSel, Function);
452 454
453 /* iSeries_allocate_IRQ.: 0x18.00.12(0xA3) */ 455 /* iSeries_allocate_IRQ.: 0x18.00.12(0xA3) */
454 Irq = iSeries_allocate_IRQ(Bus, 0, EADsIdSel); 456 Irq = iSeries_allocate_IRQ(Bus, 0, EADsIdSel);
455 PPCDBG(PPCDBG_BUSWALK, 457 PPCDBG(PPCDBG_BUSWALK,
456 "PCI:- allocate and assign IRQ 0x%02X.%02X.%02X = 0x%02X\n", 458 "PCI:- allocate and assign IRQ 0x%02X.%02X.%02X = 0x%02X\n",
457 Bus, 0, EADsIdSel, Irq); 459 Bus, 0, EADsIdSel, Irq);
458 460
459 /* 461 /*
460 * Connect all functions of any device found. 462 * Connect all functions of any device found.
461 */ 463 */
462 for (IdSel = 1; IdSel <= BridgeInfo->maxAgents; ++IdSel) { 464 for (IdSel = 1; IdSel <= BridgeInfo->maxAgents; ++IdSel) {
463 for (Function = 0; Function < 8; ++Function) { 465 for (Function = 0; Function < 8; ++Function) {
464 HvAgentId AgentId = ISERIES_PCI_AGENTID(IdSel, Function); 466 HvAgentId AgentId = ISERIES_PCI_AGENTID(IdSel, Function);
465 HvRc = HvCallXm_connectBusUnit(Bus, SubBus, 467 HvRc = HvCallXm_connectBusUnit(Bus, SubBus,
466 AgentId, Irq); 468 AgentId, Irq);
@@ -484,15 +486,15 @@ static int scan_bridge_slot(HvBusNumber Bus,
484 "PCI:- FoundDevice: 0x%02X.%02X.%02X = 0x%04X, irq %d\n", 486 "PCI:- FoundDevice: 0x%02X.%02X.%02X = 0x%04X, irq %d\n",
485 Bus, SubBus, AgentId, VendorId, Irq); 487 Bus, SubBus, AgentId, VendorId, Irq);
486 HvRc = HvCallPci_configStore8(Bus, SubBus, AgentId, 488 HvRc = HvCallPci_configStore8(Bus, SubBus, AgentId,
487 PCI_INTERRUPT_LINE, Irq); 489 PCI_INTERRUPT_LINE, Irq);
488 if (HvRc != 0) 490 if (HvRc != 0)
489 pci_Log_Error("PciCfgStore Irq Failed!", 491 pci_Log_Error("PciCfgStore Irq Failed!",
490 Bus, SubBus, AgentId, HvRc); 492 Bus, SubBus, AgentId, HvRc);
491 493
492 ++DeviceCount; 494 ++DeviceCount;
493 node = build_device_node(Bus, SubBus, EADsIdSel, Function); 495 node = build_device_node(Bus, SubBus, EADsIdSel, Function);
494 node->Irq = Irq; 496 PCI_DN(node)->Irq = Irq;
495 node->LogicalSlot = BridgeInfo->logicalSlotNumber; 497 PCI_DN(node)->LogicalSlot = BridgeInfo->logicalSlotNumber;
496 498
497 } /* for (Function = 0; Function < 8; ++Function) */ 499 } /* for (Function = 0; Function < 8; ++Function) */
498 } /* for (IdSel = 1; IdSel <= MaxAgents; ++IdSel) */ 500 } /* for (IdSel = 1; IdSel <= MaxAgents; ++IdSel) */
@@ -542,16 +544,13 @@ EXPORT_SYMBOL(iSeries_memcpy_fromio);
542/* 544/*
543 * Look down the chain to find the matching Device Device 545 * Look down the chain to find the matching Device Device
544 */ 546 */
545static struct iSeries_Device_Node *find_Device_Node(int bus, int devfn) 547static struct device_node *find_Device_Node(int bus, int devfn)
546{ 548{
547 struct list_head *pos; 549 struct pci_dn *pdn;
548 550
549 list_for_each(pos, &iSeries_Global_Device_List) { 551 list_for_each_entry(pdn, &iSeries_Global_Device_List, Device_List) {
550 struct iSeries_Device_Node *node = 552 if ((bus == pdn->busno) && (devfn == pdn->devfn))
551 list_entry(pos, struct iSeries_Device_Node, Device_List); 553 return pdn->node;
552
553 if ((bus == ISERIES_BUS(node)) && (devfn == node->DevFn))
554 return node;
555 } 554 }
556 return NULL; 555 return NULL;
557} 556}
@@ -562,12 +561,12 @@ static struct iSeries_Device_Node *find_Device_Node(int bus, int devfn)
562 * Sanity Check Node PciDev to passed pci_dev 561 * Sanity Check Node PciDev to passed pci_dev
563 * If none is found, returns a NULL which the client must handle. 562 * If none is found, returns a NULL which the client must handle.
564 */ 563 */
565static struct iSeries_Device_Node *get_Device_Node(struct pci_dev *pdev) 564static struct device_node *get_Device_Node(struct pci_dev *pdev)
566{ 565{
567 struct iSeries_Device_Node *node; 566 struct device_node *node;
568 567
569 node = pdev->sysdata; 568 node = pdev->sysdata;
570 if (node == NULL || node->PciDev != pdev) 569 if (node == NULL || PCI_DN(node)->pcidev != pdev)
571 node = find_Device_Node(pdev->bus->number, pdev->devfn); 570 node = find_Device_Node(pdev->bus->number, pdev->devfn);
572 return node; 571 return node;
573} 572}
@@ -595,7 +594,7 @@ static u64 hv_cfg_write_func[4] = {
595static int iSeries_pci_read_config(struct pci_bus *bus, unsigned int devfn, 594static int iSeries_pci_read_config(struct pci_bus *bus, unsigned int devfn,
596 int offset, int size, u32 *val) 595 int offset, int size, u32 *val)
597{ 596{
598 struct iSeries_Device_Node *node = find_Device_Node(bus->number, devfn); 597 struct device_node *node = find_Device_Node(bus->number, devfn);
599 u64 fn; 598 u64 fn;
600 struct HvCallPci_LoadReturn ret; 599 struct HvCallPci_LoadReturn ret;
601 600
@@ -607,7 +606,7 @@ static int iSeries_pci_read_config(struct pci_bus *bus, unsigned int devfn,
607 } 606 }
608 607
609 fn = hv_cfg_read_func[(size - 1) & 3]; 608 fn = hv_cfg_read_func[(size - 1) & 3];
610 HvCall3Ret16(fn, &ret, node->DsaAddr.DsaAddr, offset, 0); 609 HvCall3Ret16(fn, &ret, iseries_ds_addr(node), offset, 0);
611 610
612 if (ret.rc != 0) { 611 if (ret.rc != 0) {
613 *val = ~0; 612 *val = ~0;
@@ -625,7 +624,7 @@ static int iSeries_pci_read_config(struct pci_bus *bus, unsigned int devfn,
625static int iSeries_pci_write_config(struct pci_bus *bus, unsigned int devfn, 624static int iSeries_pci_write_config(struct pci_bus *bus, unsigned int devfn,
626 int offset, int size, u32 val) 625 int offset, int size, u32 val)
627{ 626{
628 struct iSeries_Device_Node *node = find_Device_Node(bus->number, devfn); 627 struct device_node *node = find_Device_Node(bus->number, devfn);
629 u64 fn; 628 u64 fn;
630 u64 ret; 629 u64 ret;
631 630
@@ -635,7 +634,7 @@ static int iSeries_pci_write_config(struct pci_bus *bus, unsigned int devfn,
635 return PCIBIOS_BAD_REGISTER_NUMBER; 634 return PCIBIOS_BAD_REGISTER_NUMBER;
636 635
637 fn = hv_cfg_write_func[(size - 1) & 3]; 636 fn = hv_cfg_write_func[(size - 1) & 3];
638 ret = HvCall4(fn, node->DsaAddr.DsaAddr, offset, val, 0); 637 ret = HvCall4(fn, iseries_ds_addr(node), offset, val, 0);
639 638
640 if (ret != 0) 639 if (ret != 0)
641 return PCIBIOS_DEVICE_NOT_FOUND; 640 return PCIBIOS_DEVICE_NOT_FOUND;
@@ -657,14 +656,16 @@ static struct pci_ops iSeries_pci_ops = {
657 * PCI: Device 23.90 ReadL Retry( 1) 656 * PCI: Device 23.90 ReadL Retry( 1)
658 * PCI: Device 23.90 ReadL Retry Successful(1) 657 * PCI: Device 23.90 ReadL Retry Successful(1)
659 */ 658 */
660static int CheckReturnCode(char *TextHdr, struct iSeries_Device_Node *DevNode, 659static int CheckReturnCode(char *TextHdr, struct device_node *DevNode,
661 int *retry, u64 ret) 660 int *retry, u64 ret)
662{ 661{
663 if (ret != 0) { 662 if (ret != 0) {
663 struct pci_dn *pdn = PCI_DN(DevNode);
664
664 ++Pci_Error_Count; 665 ++Pci_Error_Count;
665 (*retry)++; 666 (*retry)++;
666 printk("PCI: %s: Device 0x%04X:%02X I/O Error(%2d): 0x%04X\n", 667 printk("PCI: %s: Device 0x%04X:%02X I/O Error(%2d): 0x%04X\n",
667 TextHdr, DevNode->DsaAddr.Dsa.busNumber, DevNode->DevFn, 668 TextHdr, pdn->busno, pdn->devfn,
668 *retry, (int)ret); 669 *retry, (int)ret);
669 /* 670 /*
670 * Bump the retry and check for retry count exceeded. 671 * Bump the retry and check for retry count exceeded.
@@ -687,14 +688,14 @@ static int CheckReturnCode(char *TextHdr, struct iSeries_Device_Node *DevNode,
687 * Note: Make sure the passed variable end up on the stack to avoid 688 * Note: Make sure the passed variable end up on the stack to avoid
688 * the exposure of being device global. 689 * the exposure of being device global.
689 */ 690 */
690static inline struct iSeries_Device_Node *xlate_iomm_address( 691static inline struct device_node *xlate_iomm_address(
691 const volatile void __iomem *IoAddress, 692 const volatile void __iomem *IoAddress,
692 u64 *dsaptr, u64 *BarOffsetPtr) 693 u64 *dsaptr, u64 *BarOffsetPtr)
693{ 694{
694 unsigned long OrigIoAddr; 695 unsigned long OrigIoAddr;
695 unsigned long BaseIoAddr; 696 unsigned long BaseIoAddr;
696 unsigned long TableIndex; 697 unsigned long TableIndex;
697 struct iSeries_Device_Node *DevNode; 698 struct device_node *DevNode;
698 699
699 OrigIoAddr = (unsigned long __force)IoAddress; 700 OrigIoAddr = (unsigned long __force)IoAddress;
700 if ((OrigIoAddr < BASE_IO_MEMORY) || (OrigIoAddr >= max_io_memory)) 701 if ((OrigIoAddr < BASE_IO_MEMORY) || (OrigIoAddr >= max_io_memory))
@@ -705,7 +706,7 @@ static inline struct iSeries_Device_Node *xlate_iomm_address(
705 706
706 if (DevNode != NULL) { 707 if (DevNode != NULL) {
707 int barnum = iobar_table[TableIndex]; 708 int barnum = iobar_table[TableIndex];
708 *dsaptr = DevNode->DsaAddr.DsaAddr | (barnum << 24); 709 *dsaptr = iseries_ds_addr(DevNode) | (barnum << 24);
709 *BarOffsetPtr = BaseIoAddr % IOMM_TABLE_ENTRY_SIZE; 710 *BarOffsetPtr = BaseIoAddr % IOMM_TABLE_ENTRY_SIZE;
710 } else 711 } else
711 panic("PCI: Invalid PCI IoAddress detected!\n"); 712 panic("PCI: Invalid PCI IoAddress detected!\n");
@@ -727,7 +728,7 @@ u8 iSeries_Read_Byte(const volatile void __iomem *IoAddress)
727 u64 dsa; 728 u64 dsa;
728 int retry = 0; 729 int retry = 0;
729 struct HvCallPci_LoadReturn ret; 730 struct HvCallPci_LoadReturn ret;
730 struct iSeries_Device_Node *DevNode = 731 struct device_node *DevNode =
731 xlate_iomm_address(IoAddress, &dsa, &BarOffset); 732 xlate_iomm_address(IoAddress, &dsa, &BarOffset);
732 733
733 if (DevNode == NULL) { 734 if (DevNode == NULL) {
@@ -757,7 +758,7 @@ u16 iSeries_Read_Word(const volatile void __iomem *IoAddress)
757 u64 dsa; 758 u64 dsa;
758 int retry = 0; 759 int retry = 0;
759 struct HvCallPci_LoadReturn ret; 760 struct HvCallPci_LoadReturn ret;
760 struct iSeries_Device_Node *DevNode = 761 struct device_node *DevNode =
761 xlate_iomm_address(IoAddress, &dsa, &BarOffset); 762 xlate_iomm_address(IoAddress, &dsa, &BarOffset);
762 763
763 if (DevNode == NULL) { 764 if (DevNode == NULL) {
@@ -788,7 +789,7 @@ u32 iSeries_Read_Long(const volatile void __iomem *IoAddress)
788 u64 dsa; 789 u64 dsa;
789 int retry = 0; 790 int retry = 0;
790 struct HvCallPci_LoadReturn ret; 791 struct HvCallPci_LoadReturn ret;
791 struct iSeries_Device_Node *DevNode = 792 struct device_node *DevNode =
792 xlate_iomm_address(IoAddress, &dsa, &BarOffset); 793 xlate_iomm_address(IoAddress, &dsa, &BarOffset);
793 794
794 if (DevNode == NULL) { 795 if (DevNode == NULL) {
@@ -826,7 +827,7 @@ void iSeries_Write_Byte(u8 data, volatile void __iomem *IoAddress)
826 u64 dsa; 827 u64 dsa;
827 int retry = 0; 828 int retry = 0;
828 u64 rc; 829 u64 rc;
829 struct iSeries_Device_Node *DevNode = 830 struct device_node *DevNode =
830 xlate_iomm_address(IoAddress, &dsa, &BarOffset); 831 xlate_iomm_address(IoAddress, &dsa, &BarOffset);
831 832
832 if (DevNode == NULL) { 833 if (DevNode == NULL) {
@@ -854,7 +855,7 @@ void iSeries_Write_Word(u16 data, volatile void __iomem *IoAddress)
854 u64 dsa; 855 u64 dsa;
855 int retry = 0; 856 int retry = 0;
856 u64 rc; 857 u64 rc;
857 struct iSeries_Device_Node *DevNode = 858 struct device_node *DevNode =
858 xlate_iomm_address(IoAddress, &dsa, &BarOffset); 859 xlate_iomm_address(IoAddress, &dsa, &BarOffset);
859 860
860 if (DevNode == NULL) { 861 if (DevNode == NULL) {
@@ -882,7 +883,7 @@ void iSeries_Write_Long(u32 data, volatile void __iomem *IoAddress)
882 u64 dsa; 883 u64 dsa;
883 int retry = 0; 884 int retry = 0;
884 u64 rc; 885 u64 rc;
885 struct iSeries_Device_Node *DevNode = 886 struct device_node *DevNode =
886 xlate_iomm_address(IoAddress, &dsa, &BarOffset); 887 xlate_iomm_address(IoAddress, &dsa, &BarOffset);
887 888
888 if (DevNode == NULL) { 889 if (DevNode == NULL) {
diff --git a/arch/powerpc/platforms/iseries/pci.h b/arch/powerpc/platforms/iseries/pci.h
new file mode 100644
index 000000000000..33a8489fde54
--- /dev/null
+++ b/arch/powerpc/platforms/iseries/pci.h
@@ -0,0 +1,63 @@
1#ifndef _PLATFORMS_ISERIES_PCI_H
2#define _PLATFORMS_ISERIES_PCI_H
3
4/*
5 * Created by Allan Trautman on Tue Feb 20, 2001.
6 *
7 * Define some useful macros for the iSeries pci routines.
8 * Copyright (C) 2001 Allan H Trautman, IBM Corporation
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the:
22 * Free Software Foundation, Inc.,
23 * 59 Temple Place, Suite 330,
24 * Boston, MA 02111-1307 USA
25 *
26 * Change Activity:
27 * Created Feb 20, 2001
28 * Added device reset, March 22, 2001
29 * Ported to ppc64, May 25, 2001
30 * End Change Activity
31 */
32
33#include <asm/pci-bridge.h>
34
35struct pci_dev; /* For Forward Reference */
36
37/*
38 * Decodes Linux DevFn to iSeries DevFn, bridge device, or function.
39 * For Linux, see PCI_SLOT and PCI_FUNC in include/linux/pci.h
40 */
41
42#define ISERIES_PCI_AGENTID(idsel, func) \
43 (((idsel & 0x0F) << 4) | (func & 0x07))
44#define ISERIES_ENCODE_DEVICE(agentid) \
45 ((0x10) | ((agentid & 0x20) >> 2) | (agentid & 0x07))
46
47#define ISERIES_GET_DEVICE_FROM_SUBBUS(subbus) ((subbus >> 5) & 0x7)
48#define ISERIES_GET_FUNCTION_FROM_SUBBUS(subbus) ((subbus >> 2) & 0x7)
49
50/*
51 * Generate a Direct Select Address for the Hypervisor
52 */
53static inline u64 iseries_ds_addr(struct device_node *node)
54{
55 struct pci_dn *pdn = PCI_DN(node);
56
57 return ((u64)pdn->busno << 48) + ((u64)pdn->bussubno << 40)
58 + ((u64)0x10 << 32);
59}
60
61extern void iSeries_Device_Information(struct pci_dev*, int);
62
63#endif /* _PLATFORMS_ISERIES_PCI_H */
diff --git a/arch/ppc64/kernel/iSeries_proc.c b/arch/powerpc/platforms/iseries/proc.c
index 0fe3116eba29..6f1929cac66b 100644
--- a/arch/ppc64/kernel/iSeries_proc.c
+++ b/arch/powerpc/platforms/iseries/proc.c
@@ -1,5 +1,4 @@
1/* 1/*
2 * iSeries_proc.c
3 * Copyright (C) 2001 Kyle A. Lucke IBM Corporation 2 * Copyright (C) 2001 Kyle A. Lucke IBM Corporation
4 * Copyright (C) 2001 Mike Corrigan & Dave Engebretsen IBM Corporation 3 * Copyright (C) 2001 Mike Corrigan & Dave Engebretsen IBM Corporation
5 * 4 *
@@ -27,8 +26,9 @@
27#include <asm/lppaca.h> 26#include <asm/lppaca.h>
28#include <asm/iSeries/ItLpQueue.h> 27#include <asm/iSeries/ItLpQueue.h>
29#include <asm/iSeries/HvCallXm.h> 28#include <asm/iSeries/HvCallXm.h>
30#include <asm/iSeries/IoHriMainStore.h> 29
31#include <asm/iSeries/IoHriProcessorVpd.h> 30#include "processor_vpd.h"
31#include "main_store.h"
32 32
33static int __init iseries_proc_create(void) 33static int __init iseries_proc_create(void)
34{ 34{
@@ -68,12 +68,15 @@ static int proc_titantod_show(struct seq_file *m, void *v)
68 unsigned long tb_ticks = (tb0 - startTb); 68 unsigned long tb_ticks = (tb0 - startTb);
69 unsigned long titan_jiffies = titan_usec / (1000000/HZ); 69 unsigned long titan_jiffies = titan_usec / (1000000/HZ);
70 unsigned long titan_jiff_usec = titan_jiffies * (1000000/HZ); 70 unsigned long titan_jiff_usec = titan_jiffies * (1000000/HZ);
71 unsigned long titan_jiff_rem_usec = titan_usec - titan_jiff_usec; 71 unsigned long titan_jiff_rem_usec =
72 titan_usec - titan_jiff_usec;
72 unsigned long tb_jiffies = tb_ticks / tb_ticks_per_jiffy; 73 unsigned long tb_jiffies = tb_ticks / tb_ticks_per_jiffy;
73 unsigned long tb_jiff_ticks = tb_jiffies * tb_ticks_per_jiffy; 74 unsigned long tb_jiff_ticks = tb_jiffies * tb_ticks_per_jiffy;
74 unsigned long tb_jiff_rem_ticks = tb_ticks - tb_jiff_ticks; 75 unsigned long tb_jiff_rem_ticks = tb_ticks - tb_jiff_ticks;
75 unsigned long tb_jiff_rem_usec = tb_jiff_rem_ticks / tb_ticks_per_usec; 76 unsigned long tb_jiff_rem_usec =
76 unsigned long new_tb_ticks_per_jiffy = (tb_ticks * (1000000/HZ))/titan_usec; 77 tb_jiff_rem_ticks / tb_ticks_per_usec;
78 unsigned long new_tb_ticks_per_jiffy =
79 (tb_ticks * (1000000/HZ))/titan_usec;
77 80
78 seq_printf(m, " titan elapsed = %lu uSec\n", titan_usec); 81 seq_printf(m, " titan elapsed = %lu uSec\n", titan_usec);
79 seq_printf(m, " tb elapsed = %lu ticks\n", tb_ticks); 82 seq_printf(m, " tb elapsed = %lu ticks\n", tb_ticks);
diff --git a/arch/powerpc/platforms/iseries/processor_vpd.h b/arch/powerpc/platforms/iseries/processor_vpd.h
new file mode 100644
index 000000000000..7ac5d0d0dbfa
--- /dev/null
+++ b/arch/powerpc/platforms/iseries/processor_vpd.h
@@ -0,0 +1,85 @@
1/*
2 * Copyright (C) 2001 Mike Corrigan IBM Corporation
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17 */
18#ifndef _ISERIES_PROCESSOR_VPD_H
19#define _ISERIES_PROCESSOR_VPD_H
20
21#include <asm/types.h>
22
23/*
24 * This struct maps Processor Vpd that is DMAd to SLIC by CSP
25 */
26struct IoHriProcessorVpd {
27 u8 xFormat; // VPD format indicator x00-x00
28 u8 xProcStatus:8; // Processor State x01-x01
29 u8 xSecondaryThreadCount; // Secondary thread cnt x02-x02
30 u8 xSrcType:1; // Src Type x03-x03
31 u8 xSrcSoft:1; // Src stay soft ...
32 u8 xSrcParable:1; // Src parable ...
33 u8 xRsvd1:5; // Reserved ...
34 u16 xHvPhysicalProcIndex; // Hypervisor physical proc index04-x05
35 u16 xRsvd2; // Reserved x06-x07
36 u32 xHwNodeId; // Hardware node id x08-x0B
37 u32 xHwProcId; // Hardware processor id x0C-x0F
38
39 u32 xTypeNum; // Card Type/CCIN number x10-x13
40 u32 xModelNum; // Model/Feature number x14-x17
41 u64 xSerialNum; // Serial number x18-x1F
42 char xPartNum[12]; // Book Part or FPU number x20-x2B
43 char xMfgID[4]; // Manufacturing ID x2C-x2F
44
45 u32 xProcFreq; // Processor Frequency x30-x33
46 u32 xTimeBaseFreq; // Time Base Frequency x34-x37
47
48 u32 xChipEcLevel; // Chip EC Levels x38-x3B
49 u32 xProcIdReg; // PIR SPR value x3C-x3F
50 u32 xPVR; // PVR value x40-x43
51 u8 xRsvd3[12]; // Reserved x44-x4F
52
53 u32 xInstCacheSize; // Instruction cache size in KB x50-x53
54 u32 xInstBlockSize; // Instruction cache block size x54-x57
55 u32 xDataCacheOperandSize; // Data cache operand size x58-x5B
56 u32 xInstCacheOperandSize; // Inst cache operand size x5C-x5F
57
58 u32 xDataL1CacheSizeKB; // L1 data cache size in KB x60-x63
59 u32 xDataL1CacheLineSize; // L1 data cache block size x64-x67
60 u64 xRsvd4; // Reserved x68-x6F
61
62 u32 xDataL2CacheSizeKB; // L2 data cache size in KB x70-x73
63 u32 xDataL2CacheLineSize; // L2 data cache block size x74-x77
64 u64 xRsvd5; // Reserved x78-x7F
65
66 u32 xDataL3CacheSizeKB; // L3 data cache size in KB x80-x83
67 u32 xDataL3CacheLineSize; // L3 data cache block size x84-x87
68 u64 xRsvd6; // Reserved x88-x8F
69
70 u64 xFruLabel; // Card Location Label x90-x97
71 u8 xSlotsOnCard; // Slots on card (0=no slots) x98-x98
72 u8 xPartLocFlag; // Location flag (0-pluggable 1-imbedded) x99-x99
73 u16 xSlotMapIndex; // Index in slot map table x9A-x9B
74 u8 xSmartCardPortNo; // Smart card port number x9C-x9C
75 u8 xRsvd7; // Reserved x9D-x9D
76 u16 xFrameIdAndRackUnit; // Frame ID and rack unit adr x9E-x9F
77
78 u8 xRsvd8[24]; // Reserved xA0-xB7
79
80 char xProcSrc[72]; // CSP format SRC xB8-xFF
81};
82
83extern struct IoHriProcessorVpd xIoHriProcessorVpd[];
84
85#endif /* _ISERIES_PROCESSOR_VPD_H */
diff --git a/arch/powerpc/platforms/iseries/release_data.h b/arch/powerpc/platforms/iseries/release_data.h
new file mode 100644
index 000000000000..c68b9c3e5caf
--- /dev/null
+++ b/arch/powerpc/platforms/iseries/release_data.h
@@ -0,0 +1,63 @@
1/*
2 * Copyright (C) 2001 Mike Corrigan IBM Corporation
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17 */
18#ifndef _ISERIES_RELEASE_DATA_H
19#define _ISERIES_RELEASE_DATA_H
20
21/*
22 * This control block contains the critical information about the
23 * release so that it can be changed in the future (ie, the virtual
24 * address of the OS's NACA).
25 */
26#include <asm/types.h>
27#include <asm/naca.h>
28
29/*
30 * When we IPL a secondary partition, we will check if if the
31 * secondary xMinPlicVrmIndex > the primary xVrmIndex.
32 * If it is then this tells PLIC that this secondary is not
33 * supported running on this "old" of a level of PLIC.
34 *
35 * Likewise, we will compare the primary xMinSlicVrmIndex to
36 * the secondary xVrmIndex.
37 * If the primary xMinSlicVrmDelta > secondary xVrmDelta then we
38 * know that this PLIC does not support running an OS "that old".
39 */
40
41#define HVREL_TAGSINACTIVE 0x8000
42#define HVREL_32BIT 0x4000
43#define HVREL_NOSHAREDPROCS 0x2000
44#define HVREL_NOHMT 0x1000
45
46struct HvReleaseData {
47 u32 xDesc; /* Descriptor "HvRD" ebcdic x00-x03 */
48 u16 xSize; /* Size of this control block x04-x05 */
49 u16 xVpdAreasPtrOffset; /* Offset in NACA of ItVpdAreas x06-x07 */
50 struct naca_struct *xSlicNacaAddr; /* Virt addr of SLIC NACA x08-x0F */
51 u32 xMsNucDataOffset; /* Offset of Linux Mapping Data x10-x13 */
52 u32 xRsvd1; /* Reserved x14-x17 */
53 u16 xFlags;
54 u16 xVrmIndex; /* VRM Index of OS image x1A-x1B */
55 u16 xMinSupportedPlicVrmIndex; /* Min PLIC level (soft) x1C-x1D */
56 u16 xMinCompatablePlicVrmIndex; /* Min PLIC levelP (hard) x1E-x1F */
57 char xVrmName[12]; /* Displayable name x20-x2B */
58 char xRsvd3[20]; /* Reserved x2C-x3F */
59};
60
61extern struct HvReleaseData hvReleaseData;
62
63#endif /* _ISERIES_RELEASE_DATA_H */
diff --git a/arch/ppc64/kernel/iSeries_setup.c b/arch/powerpc/platforms/iseries/setup.c
index 3ffefbbc6623..1544c6f10a38 100644
--- a/arch/ppc64/kernel/iSeries_setup.c
+++ b/arch/powerpc/platforms/iseries/setup.c
@@ -2,8 +2,6 @@
2 * Copyright (c) 2000 Mike Corrigan <mikejc@us.ibm.com> 2 * Copyright (c) 2000 Mike Corrigan <mikejc@us.ibm.com>
3 * Copyright (c) 1999-2000 Grant Erickson <grant@lcse.umn.edu> 3 * Copyright (c) 1999-2000 Grant Erickson <grant@lcse.umn.edu>
4 * 4 *
5 * Module name: iSeries_setup.c
6 *
7 * Description: 5 * Description:
8 * Architecture- / platform-specific boot-time initialization code for 6 * Architecture- / platform-specific boot-time initialization code for
9 * the IBM iSeries LPAR. Adapted from original code by Grant Erickson and 7 * the IBM iSeries LPAR. Adapted from original code by Grant Erickson and
@@ -42,26 +40,27 @@
42#include <asm/firmware.h> 40#include <asm/firmware.h>
43 41
44#include <asm/time.h> 42#include <asm/time.h>
45#include "iSeries_setup.h"
46#include <asm/naca.h> 43#include <asm/naca.h>
47#include <asm/paca.h> 44#include <asm/paca.h>
48#include <asm/cache.h> 45#include <asm/cache.h>
49#include <asm/sections.h> 46#include <asm/sections.h>
50#include <asm/abs_addr.h> 47#include <asm/abs_addr.h>
51#include <asm/iSeries/HvCallHpt.h>
52#include <asm/iSeries/HvLpConfig.h> 48#include <asm/iSeries/HvLpConfig.h>
53#include <asm/iSeries/HvCallEvent.h> 49#include <asm/iSeries/HvCallEvent.h>
54#include <asm/iSeries/HvCallSm.h>
55#include <asm/iSeries/HvCallXm.h> 50#include <asm/iSeries/HvCallXm.h>
56#include <asm/iSeries/ItLpQueue.h> 51#include <asm/iSeries/ItLpQueue.h>
57#include <asm/iSeries/IoHriMainStore.h>
58#include <asm/iSeries/mf.h> 52#include <asm/iSeries/mf.h>
59#include <asm/iSeries/HvLpEvent.h> 53#include <asm/iSeries/HvLpEvent.h>
60#include <asm/iSeries/iSeries_irq.h>
61#include <asm/iSeries/IoHriProcessorVpd.h>
62#include <asm/iSeries/ItVpdAreas.h>
63#include <asm/iSeries/LparMap.h> 54#include <asm/iSeries/LparMap.h>
64 55
56#include "setup.h"
57#include "irq.h"
58#include "vpd_areas.h"
59#include "processor_vpd.h"
60#include "main_store.h"
61#include "call_sm.h"
62#include "call_hpt.h"
63
65extern void hvlog(char *fmt, ...); 64extern void hvlog(char *fmt, ...);
66 65
67#ifdef DEBUG 66#ifdef DEBUG
@@ -74,8 +73,8 @@ extern void hvlog(char *fmt, ...);
74extern void ppcdbg_initialize(void); 73extern void ppcdbg_initialize(void);
75 74
76static void build_iSeries_Memory_Map(void); 75static void build_iSeries_Memory_Map(void);
77static void setup_iSeries_cache_sizes(void); 76static void iseries_shared_idle(void);
78static void iSeries_bolt_kernel(unsigned long saddr, unsigned long eaddr); 77static void iseries_dedicated_idle(void);
79#ifdef CONFIG_PCI 78#ifdef CONFIG_PCI
80extern void iSeries_pci_final_fixup(void); 79extern void iSeries_pci_final_fixup(void);
81#else 80#else
@@ -83,14 +82,6 @@ static void iSeries_pci_final_fixup(void) { }
83#endif 82#endif
84 83
85/* Global Variables */ 84/* Global Variables */
86static unsigned long procFreqHz;
87static unsigned long procFreqMhz;
88static unsigned long procFreqMhzHundreths;
89
90static unsigned long tbFreqHz;
91static unsigned long tbFreqMhz;
92static unsigned long tbFreqMhzHundreths;
93
94int piranha_simulator; 85int piranha_simulator;
95 86
96extern int rd_size; /* Defined in drivers/block/rd.c */ 87extern int rd_size; /* Defined in drivers/block/rd.c */
@@ -311,14 +302,14 @@ static void __init iSeries_get_cmdline(void)
311 302
312static void __init iSeries_init_early(void) 303static void __init iSeries_init_early(void)
313{ 304{
314 extern unsigned long memory_limit;
315
316 DBG(" -> iSeries_init_early()\n"); 305 DBG(" -> iSeries_init_early()\n");
317 306
318 ppc64_firmware_features = FW_FEATURE_ISERIES; 307 ppc64_firmware_features = FW_FEATURE_ISERIES;
319 308
320 ppcdbg_initialize(); 309 ppcdbg_initialize();
321 310
311 ppc64_interrupt_controller = IC_ISERIES;
312
322#if defined(CONFIG_BLK_DEV_INITRD) 313#if defined(CONFIG_BLK_DEV_INITRD)
323 /* 314 /*
324 * If the init RAM disk has been configured and there is 315 * If the init RAM disk has been configured and there is
@@ -341,12 +332,6 @@ static void __init iSeries_init_early(void)
341 iSeries_recal_titan = HvCallXm_loadTod(); 332 iSeries_recal_titan = HvCallXm_loadTod();
342 333
343 /* 334 /*
344 * Cache sizes must be initialized before hpte_init_iSeries is called
345 * as the later need them for flush_icache_range()
346 */
347 setup_iSeries_cache_sizes();
348
349 /*
350 * Initialize the hash table management pointers 335 * Initialize the hash table management pointers
351 */ 336 */
352 hpte_init_iSeries(); 337 hpte_init_iSeries();
@@ -356,12 +341,6 @@ static void __init iSeries_init_early(void)
356 */ 341 */
357 iommu_init_early_iSeries(); 342 iommu_init_early_iSeries();
358 343
359 /*
360 * Initialize the table which translate Linux physical addresses to
361 * AS/400 absolute addresses
362 */
363 build_iSeries_Memory_Map();
364
365 iSeries_get_cmdline(); 344 iSeries_get_cmdline();
366 345
367 /* Save unparsed command line copy for /proc/cmdline */ 346 /* Save unparsed command line copy for /proc/cmdline */
@@ -379,14 +358,6 @@ static void __init iSeries_init_early(void)
379 } 358 }
380 } 359 }
381 360
382 /* Bolt kernel mappings for all of memory (or just a bit if we've got a limit) */
383 iSeries_bolt_kernel(0, systemcfg->physicalMemorySize);
384
385 lmb_init();
386 lmb_add(0, systemcfg->physicalMemorySize);
387 lmb_analyze();
388 lmb_reserve(0, __pa(klimit));
389
390 /* Initialize machine-dependency vectors */ 361 /* Initialize machine-dependency vectors */
391#ifdef CONFIG_SMP 362#ifdef CONFIG_SMP
392 smp_init_iSeries(); 363 smp_init_iSeries();
@@ -457,7 +428,6 @@ static void __init build_iSeries_Memory_Map(void)
457 u32 loadAreaFirstChunk, loadAreaLastChunk, loadAreaSize; 428 u32 loadAreaFirstChunk, loadAreaLastChunk, loadAreaSize;
458 u32 nextPhysChunk; 429 u32 nextPhysChunk;
459 u32 hptFirstChunk, hptLastChunk, hptSizeChunks, hptSizePages; 430 u32 hptFirstChunk, hptLastChunk, hptSizeChunks, hptSizePages;
460 u32 num_ptegs;
461 u32 totalChunks,moreChunks; 431 u32 totalChunks,moreChunks;
462 u32 currChunk, thisChunk, absChunk; 432 u32 currChunk, thisChunk, absChunk;
463 u32 currDword; 433 u32 currDword;
@@ -520,10 +490,7 @@ static void __init build_iSeries_Memory_Map(void)
520 printk("HPT absolute addr = %016lx, size = %dK\n", 490 printk("HPT absolute addr = %016lx, size = %dK\n",
521 chunk_to_addr(hptFirstChunk), hptSizeChunks * 256); 491 chunk_to_addr(hptFirstChunk), hptSizeChunks * 256);
522 492
523 /* Fill in the hashed page table hash mask */ 493 ppc64_pft_size = __ilog2(hptSizePages * PAGE_SIZE);
524 num_ptegs = hptSizePages *
525 (PAGE_SIZE / (sizeof(hpte_t) * HPTES_PER_GROUP));
526 htab_hash_mask = num_ptegs - 1;
527 494
528 /* 495 /*
529 * The actual hashed page table is in the hypervisor, 496 * The actual hashed page table is in the hypervisor,
@@ -592,144 +559,33 @@ static void __init build_iSeries_Memory_Map(void)
592} 559}
593 560
594/* 561/*
595 * Set up the variables that describe the cache line sizes
596 * for this machine.
597 */
598static void __init setup_iSeries_cache_sizes(void)
599{
600 unsigned int i, n;
601 unsigned int procIx = get_paca()->lppaca.dyn_hv_phys_proc_index;
602
603 systemcfg->icache_size =
604 ppc64_caches.isize = xIoHriProcessorVpd[procIx].xInstCacheSize * 1024;
605 systemcfg->icache_line_size =
606 ppc64_caches.iline_size =
607 xIoHriProcessorVpd[procIx].xInstCacheOperandSize;
608 systemcfg->dcache_size =
609 ppc64_caches.dsize =
610 xIoHriProcessorVpd[procIx].xDataL1CacheSizeKB * 1024;
611 systemcfg->dcache_line_size =
612 ppc64_caches.dline_size =
613 xIoHriProcessorVpd[procIx].xDataCacheOperandSize;
614 ppc64_caches.ilines_per_page = PAGE_SIZE / ppc64_caches.iline_size;
615 ppc64_caches.dlines_per_page = PAGE_SIZE / ppc64_caches.dline_size;
616
617 i = ppc64_caches.iline_size;
618 n = 0;
619 while ((i = (i / 2)))
620 ++n;
621 ppc64_caches.log_iline_size = n;
622
623 i = ppc64_caches.dline_size;
624 n = 0;
625 while ((i = (i / 2)))
626 ++n;
627 ppc64_caches.log_dline_size = n;
628
629 printk("D-cache line size = %d\n",
630 (unsigned int)ppc64_caches.dline_size);
631 printk("I-cache line size = %d\n",
632 (unsigned int)ppc64_caches.iline_size);
633}
634
635/*
636 * Create a pte. Used during initialization only.
637 */
638static void iSeries_make_pte(unsigned long va, unsigned long pa,
639 int mode)
640{
641 hpte_t local_hpte, rhpte;
642 unsigned long hash, vpn;
643 long slot;
644
645 vpn = va >> PAGE_SHIFT;
646 hash = hpt_hash(vpn, 0);
647
648 local_hpte.r = pa | mode;
649 local_hpte.v = ((va >> 23) << HPTE_V_AVPN_SHIFT)
650 | HPTE_V_BOLTED | HPTE_V_VALID;
651
652 slot = HvCallHpt_findValid(&rhpte, vpn);
653 if (slot < 0) {
654 /* Must find space in primary group */
655 panic("hash_page: hpte already exists\n");
656 }
657 HvCallHpt_addValidate(slot, 0, &local_hpte);
658}
659
660/*
661 * Bolt the kernel addr space into the HPT
662 */
663static void __init iSeries_bolt_kernel(unsigned long saddr, unsigned long eaddr)
664{
665 unsigned long pa;
666 unsigned long mode_rw = _PAGE_ACCESSED | _PAGE_COHERENT | PP_RWXX;
667 hpte_t hpte;
668
669 for (pa = saddr; pa < eaddr ;pa += PAGE_SIZE) {
670 unsigned long ea = (unsigned long)__va(pa);
671 unsigned long vsid = get_kernel_vsid(ea);
672 unsigned long va = (vsid << 28) | (pa & 0xfffffff);
673 unsigned long vpn = va >> PAGE_SHIFT;
674 unsigned long slot = HvCallHpt_findValid(&hpte, vpn);
675
676 /* Make non-kernel text non-executable */
677 if (!in_kernel_text(ea))
678 mode_rw |= HW_NO_EXEC;
679
680 if (hpte.v & HPTE_V_VALID) {
681 /* HPTE exists, so just bolt it */
682 HvCallHpt_setSwBits(slot, 0x10, 0);
683 /* And make sure the pp bits are correct */
684 HvCallHpt_setPp(slot, PP_RWXX);
685 } else
686 /* No HPTE exists, so create a new bolted one */
687 iSeries_make_pte(va, phys_to_abs(pa), mode_rw);
688 }
689}
690
691/*
692 * Document me. 562 * Document me.
693 */ 563 */
694static void __init iSeries_setup_arch(void) 564static void __init iSeries_setup_arch(void)
695{ 565{
696 unsigned procIx = get_paca()->lppaca.dyn_hv_phys_proc_index; 566 unsigned procIx = get_paca()->lppaca.dyn_hv_phys_proc_index;
697 567
698 /* Add an eye catcher and the systemcfg layout version number */ 568 if (get_paca()->lppaca.shared_proc) {
699 strcpy(systemcfg->eye_catcher, "SYSTEMCFG:PPC64"); 569 ppc_md.idle_loop = iseries_shared_idle;
700 systemcfg->version.major = SYSTEMCFG_MAJOR; 570 printk(KERN_INFO "Using shared processor idle loop\n");
701 systemcfg->version.minor = SYSTEMCFG_MINOR; 571 } else {
572 ppc_md.idle_loop = iseries_dedicated_idle;
573 printk(KERN_INFO "Using dedicated idle loop\n");
574 }
702 575
703 /* Setup the Lp Event Queue */ 576 /* Setup the Lp Event Queue */
704 setup_hvlpevent_queue(); 577 setup_hvlpevent_queue();
705 578
706 /* Compute processor frequency */
707 procFreqHz = ((1UL << 34) * 1000000) /
708 xIoHriProcessorVpd[procIx].xProcFreq;
709 procFreqMhz = procFreqHz / 1000000;
710 procFreqMhzHundreths = (procFreqHz / 10000) - (procFreqMhz * 100);
711 ppc_proc_freq = procFreqHz;
712
713 /* Compute time base frequency */
714 tbFreqHz = ((1UL << 32) * 1000000) /
715 xIoHriProcessorVpd[procIx].xTimeBaseFreq;
716 tbFreqMhz = tbFreqHz / 1000000;
717 tbFreqMhzHundreths = (tbFreqHz / 10000) - (tbFreqMhz * 100);
718 ppc_tb_freq = tbFreqHz;
719
720 printk("Max logical processors = %d\n", 579 printk("Max logical processors = %d\n",
721 itVpdAreas.xSlicMaxLogicalProcs); 580 itVpdAreas.xSlicMaxLogicalProcs);
722 printk("Max physical processors = %d\n", 581 printk("Max physical processors = %d\n",
723 itVpdAreas.xSlicMaxPhysicalProcs); 582 itVpdAreas.xSlicMaxPhysicalProcs);
724 printk("Processor frequency = %lu.%02lu\n", procFreqMhz, 583
725 procFreqMhzHundreths);
726 printk("Time base frequency = %lu.%02lu\n", tbFreqMhz,
727 tbFreqMhzHundreths);
728 systemcfg->processor = xIoHriProcessorVpd[procIx].xPVR; 584 systemcfg->processor = xIoHriProcessorVpd[procIx].xPVR;
729 printk("Processor version = %x\n", systemcfg->processor); 585 printk("Processor version = %x\n", systemcfg->processor);
730} 586}
731 587
732static void iSeries_get_cpuinfo(struct seq_file *m) 588static void iSeries_show_cpuinfo(struct seq_file *m)
733{ 589{
734 seq_printf(m, "machine\t\t: 64-bit iSeries Logical Partition\n"); 590 seq_printf(m, "machine\t\t: 64-bit iSeries Logical Partition\n");
735} 591}
@@ -768,49 +624,6 @@ static void iSeries_halt(void)
768 mf_power_off(); 624 mf_power_off();
769} 625}
770 626
771/*
772 * void __init iSeries_calibrate_decr()
773 *
774 * Description:
775 * This routine retrieves the internal processor frequency from the VPD,
776 * and sets up the kernel timer decrementer based on that value.
777 *
778 */
779static void __init iSeries_calibrate_decr(void)
780{
781 unsigned long cyclesPerUsec;
782 struct div_result divres;
783
784 /* Compute decrementer (and TB) frequency in cycles/sec */
785 cyclesPerUsec = ppc_tb_freq / 1000000;
786
787 /*
788 * Set the amount to refresh the decrementer by. This
789 * is the number of decrementer ticks it takes for
790 * 1/HZ seconds.
791 */
792 tb_ticks_per_jiffy = ppc_tb_freq / HZ;
793
794#if 0
795 /* TEST CODE FOR ADJTIME */
796 tb_ticks_per_jiffy += tb_ticks_per_jiffy / 5000;
797 /* END OF TEST CODE */
798#endif
799
800 /*
801 * tb_ticks_per_sec = freq; would give better accuracy
802 * but tb_ticks_per_sec = tb_ticks_per_jiffy*HZ; assures
803 * that jiffies (and xtime) will match the time returned
804 * by do_gettimeofday.
805 */
806 tb_ticks_per_sec = tb_ticks_per_jiffy * HZ;
807 tb_ticks_per_usec = cyclesPerUsec;
808 tb_to_us = mulhwu_scale_factor(ppc_tb_freq, 1000000);
809 div128_by_32(1024 * 1024, 0, tb_ticks_per_sec, &divres);
810 tb_to_xs = divres.result_low;
811 setup_default_decr();
812}
813
814static void __init iSeries_progress(char * st, unsigned short code) 627static void __init iSeries_progress(char * st, unsigned short code)
815{ 628{
816 printk("Progress: [%04x] - %s\n", (unsigned)code, st); 629 printk("Progress: [%04x] - %s\n", (unsigned)code, st);
@@ -878,7 +691,7 @@ static void yield_shared_processor(void)
878 process_iSeries_events(); 691 process_iSeries_events();
879} 692}
880 693
881static int iseries_shared_idle(void) 694static void iseries_shared_idle(void)
882{ 695{
883 while (1) { 696 while (1) {
884 while (!need_resched() && !hvlpevent_is_pending()) { 697 while (!need_resched() && !hvlpevent_is_pending()) {
@@ -900,11 +713,9 @@ static int iseries_shared_idle(void)
900 713
901 schedule(); 714 schedule();
902 } 715 }
903
904 return 0;
905} 716}
906 717
907static int iseries_dedicated_idle(void) 718static void iseries_dedicated_idle(void)
908{ 719{
909 long oldval; 720 long oldval;
910 721
@@ -934,44 +745,252 @@ static int iseries_dedicated_idle(void)
934 ppc64_runlatch_on(); 745 ppc64_runlatch_on();
935 schedule(); 746 schedule();
936 } 747 }
937
938 return 0;
939} 748}
940 749
941#ifndef CONFIG_PCI 750#ifndef CONFIG_PCI
942void __init iSeries_init_IRQ(void) { } 751void __init iSeries_init_IRQ(void) { }
943#endif 752#endif
944 753
945void __init iSeries_early_setup(void) 754static int __init iseries_probe(int platform)
946{ 755{
947 iSeries_fixup_klimit(); 756 return PLATFORM_ISERIES_LPAR == platform;
757}
948 758
949 ppc_md.setup_arch = iSeries_setup_arch; 759struct machdep_calls __initdata iseries_md = {
950 ppc_md.get_cpuinfo = iSeries_get_cpuinfo; 760 .setup_arch = iSeries_setup_arch,
951 ppc_md.init_IRQ = iSeries_init_IRQ; 761 .show_cpuinfo = iSeries_show_cpuinfo,
952 ppc_md.get_irq = iSeries_get_irq; 762 .init_IRQ = iSeries_init_IRQ,
953 ppc_md.init_early = iSeries_init_early, 763 .get_irq = iSeries_get_irq,
764 .init_early = iSeries_init_early,
765 .pcibios_fixup = iSeries_pci_final_fixup,
766 .restart = iSeries_restart,
767 .power_off = iSeries_power_off,
768 .halt = iSeries_halt,
769 .get_boot_time = iSeries_get_boot_time,
770 .set_rtc_time = iSeries_set_rtc_time,
771 .get_rtc_time = iSeries_get_rtc_time,
772 .calibrate_decr = generic_calibrate_decr,
773 .progress = iSeries_progress,
774 .probe = iseries_probe,
775 /* XXX Implement enable_pmcs for iSeries */
776};
954 777
955 ppc_md.pcibios_fixup = iSeries_pci_final_fixup; 778struct blob {
779 unsigned char data[PAGE_SIZE];
780 unsigned long next;
781};
956 782
957 ppc_md.restart = iSeries_restart; 783struct iseries_flat_dt {
958 ppc_md.power_off = iSeries_power_off; 784 struct boot_param_header header;
959 ppc_md.halt = iSeries_halt; 785 u64 reserve_map[2];
786 struct blob dt;
787 struct blob strings;
788};
960 789
961 ppc_md.get_boot_time = iSeries_get_boot_time; 790struct iseries_flat_dt iseries_dt;
962 ppc_md.set_rtc_time = iSeries_set_rtc_time;
963 ppc_md.get_rtc_time = iSeries_get_rtc_time;
964 ppc_md.calibrate_decr = iSeries_calibrate_decr;
965 ppc_md.progress = iSeries_progress;
966 791
967 /* XXX Implement enable_pmcs for iSeries */ 792void dt_init(struct iseries_flat_dt *dt)
793{
794 dt->header.off_mem_rsvmap =
795 offsetof(struct iseries_flat_dt, reserve_map);
796 dt->header.off_dt_struct = offsetof(struct iseries_flat_dt, dt);
797 dt->header.off_dt_strings = offsetof(struct iseries_flat_dt, strings);
798 dt->header.totalsize = sizeof(struct iseries_flat_dt);
799 dt->header.dt_strings_size = sizeof(struct blob);
968 800
969 if (get_paca()->lppaca.shared_proc) { 801 /* There is no notion of hardware cpu id on iSeries */
970 ppc_md.idle_loop = iseries_shared_idle; 802 dt->header.boot_cpuid_phys = smp_processor_id();
971 printk(KERN_INFO "Using shared processor idle loop\n"); 803
972 } else { 804 dt->dt.next = (unsigned long)&dt->dt.data;
973 ppc_md.idle_loop = iseries_dedicated_idle; 805 dt->strings.next = (unsigned long)&dt->strings.data;
974 printk(KERN_INFO "Using dedicated idle loop\n"); 806
807 dt->header.magic = OF_DT_HEADER;
808 dt->header.version = 0x10;
809 dt->header.last_comp_version = 0x10;
810
811 dt->reserve_map[0] = 0;
812 dt->reserve_map[1] = 0;
813}
814
815void dt_check_blob(struct blob *b)
816{
817 if (b->next >= (unsigned long)&b->next) {
818 DBG("Ran out of space in flat device tree blob!\n");
819 BUG();
975 } 820 }
976} 821}
977 822
823void dt_push_u32(struct iseries_flat_dt *dt, u32 value)
824{
825 *((u32*)dt->dt.next) = value;
826 dt->dt.next += sizeof(u32);
827
828 dt_check_blob(&dt->dt);
829}
830
831void dt_push_u64(struct iseries_flat_dt *dt, u64 value)
832{
833 *((u64*)dt->dt.next) = value;
834 dt->dt.next += sizeof(u64);
835
836 dt_check_blob(&dt->dt);
837}
838
839unsigned long dt_push_bytes(struct blob *blob, char *data, int len)
840{
841 unsigned long start = blob->next - (unsigned long)blob->data;
842
843 memcpy((char *)blob->next, data, len);
844 blob->next = _ALIGN(blob->next + len, 4);
845
846 dt_check_blob(blob);
847
848 return start;
849}
850
851void dt_start_node(struct iseries_flat_dt *dt, char *name)
852{
853 dt_push_u32(dt, OF_DT_BEGIN_NODE);
854 dt_push_bytes(&dt->dt, name, strlen(name) + 1);
855}
856
857#define dt_end_node(dt) dt_push_u32(dt, OF_DT_END_NODE)
858
859void dt_prop(struct iseries_flat_dt *dt, char *name, char *data, int len)
860{
861 unsigned long offset;
862
863 dt_push_u32(dt, OF_DT_PROP);
864
865 /* Length of the data */
866 dt_push_u32(dt, len);
867
868 /* Put the property name in the string blob. */
869 offset = dt_push_bytes(&dt->strings, name, strlen(name) + 1);
870
871 /* The offset of the properties name in the string blob. */
872 dt_push_u32(dt, (u32)offset);
873
874 /* The actual data. */
875 dt_push_bytes(&dt->dt, data, len);
876}
877
878void dt_prop_str(struct iseries_flat_dt *dt, char *name, char *data)
879{
880 dt_prop(dt, name, data, strlen(data) + 1); /* + 1 for NULL */
881}
882
883void dt_prop_u32(struct iseries_flat_dt *dt, char *name, u32 data)
884{
885 dt_prop(dt, name, (char *)&data, sizeof(u32));
886}
887
888void dt_prop_u64(struct iseries_flat_dt *dt, char *name, u64 data)
889{
890 dt_prop(dt, name, (char *)&data, sizeof(u64));
891}
892
893void dt_prop_u64_list(struct iseries_flat_dt *dt, char *name, u64 *data, int n)
894{
895 dt_prop(dt, name, (char *)data, sizeof(u64) * n);
896}
897
898void dt_prop_empty(struct iseries_flat_dt *dt, char *name)
899{
900 dt_prop(dt, name, NULL, 0);
901}
902
903void dt_cpus(struct iseries_flat_dt *dt)
904{
905 unsigned char buf[32];
906 unsigned char *p;
907 unsigned int i, index;
908 struct IoHriProcessorVpd *d;
909
910 /* yuck */
911 snprintf(buf, 32, "PowerPC,%s", cur_cpu_spec->cpu_name);
912 p = strchr(buf, ' ');
913 if (!p) p = buf + strlen(buf);
914
915 dt_start_node(dt, "cpus");
916 dt_prop_u32(dt, "#address-cells", 1);
917 dt_prop_u32(dt, "#size-cells", 0);
918
919 for (i = 0; i < NR_CPUS; i++) {
920 if (paca[i].lppaca.dyn_proc_status >= 2)
921 continue;
922
923 snprintf(p, 32 - (p - buf), "@%d", i);
924 dt_start_node(dt, buf);
925
926 dt_prop_str(dt, "device_type", "cpu");
927
928 index = paca[i].lppaca.dyn_hv_phys_proc_index;
929 d = &xIoHriProcessorVpd[index];
930
931 dt_prop_u32(dt, "i-cache-size", d->xInstCacheSize * 1024);
932 dt_prop_u32(dt, "i-cache-line-size", d->xInstCacheOperandSize);
933
934 dt_prop_u32(dt, "d-cache-size", d->xDataL1CacheSizeKB * 1024);
935 dt_prop_u32(dt, "d-cache-line-size", d->xDataCacheOperandSize);
936
937 /* magic conversions to Hz copied from old code */
938 dt_prop_u32(dt, "clock-frequency",
939 ((1UL << 34) * 1000000) / d->xProcFreq);
940 dt_prop_u32(dt, "timebase-frequency",
941 ((1UL << 32) * 1000000) / d->xTimeBaseFreq);
942
943 dt_prop_u32(dt, "reg", i);
944
945 dt_end_node(dt);
946 }
947
948 dt_end_node(dt);
949}
950
951void build_flat_dt(struct iseries_flat_dt *dt)
952{
953 u64 tmp[2];
954
955 dt_init(dt);
956
957 dt_start_node(dt, "");
958
959 dt_prop_u32(dt, "#address-cells", 2);
960 dt_prop_u32(dt, "#size-cells", 2);
961
962 /* /memory */
963 dt_start_node(dt, "memory@0");
964 dt_prop_str(dt, "name", "memory");
965 dt_prop_str(dt, "device_type", "memory");
966 tmp[0] = 0;
967 tmp[1] = systemcfg->physicalMemorySize;
968 dt_prop_u64_list(dt, "reg", tmp, 2);
969 dt_end_node(dt);
970
971 /* /chosen */
972 dt_start_node(dt, "chosen");
973 dt_prop_u32(dt, "linux,platform", PLATFORM_ISERIES_LPAR);
974 dt_end_node(dt);
975
976 dt_cpus(dt);
977
978 dt_end_node(dt);
979
980 dt_push_u32(dt, OF_DT_END);
981}
982
983void * __init iSeries_early_setup(void)
984{
985 iSeries_fixup_klimit();
986
987 /*
988 * Initialize the table which translate Linux physical addresses to
989 * AS/400 absolute addresses
990 */
991 build_iSeries_Memory_Map();
992
993 build_flat_dt(&iseries_dt);
994
995 return (void *) __pa(&iseries_dt);
996}
diff --git a/arch/ppc64/kernel/iSeries_setup.h b/arch/powerpc/platforms/iseries/setup.h
index c6eb29a245ac..5213044ec411 100644
--- a/arch/ppc64/kernel/iSeries_setup.h
+++ b/arch/powerpc/platforms/iseries/setup.h
@@ -2,8 +2,6 @@
2 * Copyright (c) 2000 Mike Corrigan <mikejc@us.ibm.com> 2 * Copyright (c) 2000 Mike Corrigan <mikejc@us.ibm.com>
3 * Copyright (c) 1999-2000 Grant Erickson <grant@lcse.umn.edu> 3 * Copyright (c) 1999-2000 Grant Erickson <grant@lcse.umn.edu>
4 * 4 *
5 * Module name: as400_setup.h
6 *
7 * Description: 5 * Description:
8 * Architecture- / platform-specific boot-time initialization code for 6 * Architecture- / platform-specific boot-time initialization code for
9 * the IBM AS/400 LPAR. Adapted from original code by Grant Erickson and 7 * the IBM AS/400 LPAR. Adapted from original code by Grant Erickson and
@@ -19,7 +17,7 @@
19#ifndef __ISERIES_SETUP_H__ 17#ifndef __ISERIES_SETUP_H__
20#define __ISERIES_SETUP_H__ 18#define __ISERIES_SETUP_H__
21 19
22extern void iSeries_get_boot_time(struct rtc_time *tm); 20extern unsigned long iSeries_get_boot_time(void);
23extern int iSeries_set_rtc_time(struct rtc_time *tm); 21extern int iSeries_set_rtc_time(struct rtc_time *tm);
24extern void iSeries_get_rtc_time(struct rtc_time *tm); 22extern void iSeries_get_rtc_time(struct rtc_time *tm);
25 23
diff --git a/arch/ppc64/kernel/iSeries_smp.c b/arch/powerpc/platforms/iseries/smp.c
index f74386e31638..f720916682f6 100644
--- a/arch/ppc64/kernel/iSeries_smp.c
+++ b/arch/powerpc/platforms/iseries/smp.c
@@ -47,17 +47,17 @@
47 47
48static unsigned long iSeries_smp_message[NR_CPUS]; 48static unsigned long iSeries_smp_message[NR_CPUS];
49 49
50void iSeries_smp_message_recv( struct pt_regs * regs ) 50void iSeries_smp_message_recv(struct pt_regs *regs)
51{ 51{
52 int cpu = smp_processor_id(); 52 int cpu = smp_processor_id();
53 int msg; 53 int msg;
54 54
55 if ( num_online_cpus() < 2 ) 55 if (num_online_cpus() < 2)
56 return; 56 return;
57 57
58 for ( msg = 0; msg < 4; ++msg ) 58 for (msg = 0; msg < 4; msg++)
59 if ( test_and_clear_bit( msg, &iSeries_smp_message[cpu] ) ) 59 if (test_and_clear_bit(msg, &iSeries_smp_message[cpu]))
60 smp_message_recv( msg, regs ); 60 smp_message_recv(msg, regs);
61} 61}
62 62
63static inline void smp_iSeries_do_message(int cpu, int msg) 63static inline void smp_iSeries_do_message(int cpu, int msg)
@@ -74,48 +74,22 @@ static void smp_iSeries_message_pass(int target, int msg)
74 smp_iSeries_do_message(target, msg); 74 smp_iSeries_do_message(target, msg);
75 else { 75 else {
76 for_each_online_cpu(i) { 76 for_each_online_cpu(i) {
77 if (target == MSG_ALL_BUT_SELF 77 if ((target == MSG_ALL_BUT_SELF) &&
78 && i == smp_processor_id()) 78 (i == smp_processor_id()))
79 continue; 79 continue;
80 smp_iSeries_do_message(i, msg); 80 smp_iSeries_do_message(i, msg);
81 } 81 }
82 } 82 }
83} 83}
84 84
85static int smp_iSeries_numProcs(void)
86{
87 unsigned np, i;
88
89 np = 0;
90 for (i=0; i < NR_CPUS; ++i) {
91 if (paca[i].lppaca.dyn_proc_status < 2) {
92 cpu_set(i, cpu_possible_map);
93 cpu_set(i, cpu_present_map);
94 cpu_set(i, cpu_sibling_map[i]);
95 ++np;
96 }
97 }
98 return np;
99}
100
101static int smp_iSeries_probe(void) 85static int smp_iSeries_probe(void)
102{ 86{
103 unsigned i; 87 return cpus_weight(cpu_possible_map);
104 unsigned np = 0;
105
106 for (i=0; i < NR_CPUS; ++i) {
107 if (paca[i].lppaca.dyn_proc_status < 2) {
108 /*paca[i].active = 1;*/
109 ++np;
110 }
111 }
112
113 return np;
114} 88}
115 89
116static void smp_iSeries_kick_cpu(int nr) 90static void smp_iSeries_kick_cpu(int nr)
117{ 91{
118 BUG_ON(nr < 0 || nr >= NR_CPUS); 92 BUG_ON((nr < 0) || (nr >= NR_CPUS));
119 93
120 /* Verify that our partition has a processor nr */ 94 /* Verify that our partition has a processor nr */
121 if (paca[nr].lppaca.dyn_proc_status >= 2) 95 if (paca[nr].lppaca.dyn_proc_status >= 2)
@@ -144,6 +118,4 @@ static struct smp_ops_t iSeries_smp_ops = {
144void __init smp_init_iSeries(void) 118void __init smp_init_iSeries(void)
145{ 119{
146 smp_ops = &iSeries_smp_ops; 120 smp_ops = &iSeries_smp_ops;
147 systemcfg->processorCount = smp_iSeries_numProcs();
148} 121}
149
diff --git a/arch/powerpc/platforms/iseries/spcomm_area.h b/arch/powerpc/platforms/iseries/spcomm_area.h
new file mode 100644
index 000000000000..6e3b685115c9
--- /dev/null
+++ b/arch/powerpc/platforms/iseries/spcomm_area.h
@@ -0,0 +1,36 @@
1/*
2 * Copyright (C) 2001 Mike Corrigan IBM Corporation
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17 */
18
19#ifndef _ISERIES_SPCOMM_AREA_H
20#define _ISERIES_SPCOMM_AREA_H
21
22
23struct SpCommArea {
24 u32 xDesc; // Descriptor (only in new formats) 000-003
25 u8 xFormat; // Format (only in new formats) 004-004
26 u8 xRsvd1[11]; // Reserved 005-00F
27 u64 xRawTbAtIplStart; // Raw HW TB value when IPL is started 010-017
28 u64 xRawTodAtIplStart; // Raw HW TOD value when IPL is started 018-01F
29 u64 xBcdTimeAtIplStart; // BCD time when IPL is started 020-027
30 u64 xBcdTimeAtOsStart; // BCD time when OS passed control 028-02F
31 u8 xRsvd2[80]; // Reserved 030-07F
32};
33
34extern struct SpCommArea xSpCommArea;
35
36#endif /* _ISERIES_SPCOMM_AREA_H */
diff --git a/arch/ppc64/kernel/iSeries_vio.c b/arch/powerpc/platforms/iseries/vio.c
index 6b754b0c8344..c0f7d2e9153f 100644
--- a/arch/ppc64/kernel/iSeries_vio.c
+++ b/arch/powerpc/platforms/iseries/vio.c
@@ -14,6 +14,7 @@
14 14
15#include <asm/vio.h> 15#include <asm/vio.h>
16#include <asm/iommu.h> 16#include <asm/iommu.h>
17#include <asm/tce.h>
17#include <asm/abs_addr.h> 18#include <asm/abs_addr.h>
18#include <asm/page.h> 19#include <asm/page.h>
19#include <asm/iSeries/vio.h> 20#include <asm/iSeries/vio.h>
diff --git a/arch/ppc64/kernel/viopath.c b/arch/powerpc/platforms/iseries/viopath.c
index 2a6c4f01c45e..c0c767bd37f1 100644
--- a/arch/ppc64/kernel/viopath.c
+++ b/arch/powerpc/platforms/iseries/viopath.c
@@ -1,5 +1,4 @@
1/* -*- linux-c -*- 1/* -*- linux-c -*-
2 * arch/ppc64/kernel/viopath.c
3 * 2 *
4 * iSeries Virtual I/O Message Path code 3 * iSeries Virtual I/O Message Path code
5 * 4 *
@@ -7,7 +6,7 @@
7 * Ryan Arnold <ryanarn@us.ibm.com> 6 * Ryan Arnold <ryanarn@us.ibm.com>
8 * Colin Devilbiss <devilbis@us.ibm.com> 7 * Colin Devilbiss <devilbis@us.ibm.com>
9 * 8 *
10 * (C) Copyright 2000-2003 IBM Corporation 9 * (C) Copyright 2000-2005 IBM Corporation
11 * 10 *
12 * This code is used by the iSeries virtual disk, cd, 11 * This code is used by the iSeries virtual disk, cd,
13 * tape, and console to communicate with OS/400 in another 12 * tape, and console to communicate with OS/400 in another
diff --git a/arch/powerpc/platforms/iseries/vpd_areas.h b/arch/powerpc/platforms/iseries/vpd_areas.h
new file mode 100644
index 000000000000..601e6dd860ed
--- /dev/null
+++ b/arch/powerpc/platforms/iseries/vpd_areas.h
@@ -0,0 +1,88 @@
1/*
2 * Copyright (C) 2001 Mike Corrigan IBM Corporation
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17 */
18#ifndef _ISERIES_VPD_AREAS_H
19#define _ISERIES_VPD_AREAS_H
20
21/*
22 * This file defines the address and length of all of the VPD area passed to
23 * the OS from PLIC (most of which start from the SP).
24 */
25
26#include <asm/types.h>
27
28/* VPD Entry index is carved in stone - cannot be changed (easily). */
29#define ItVpdCecVpd 0
30#define ItVpdDynamicSpace 1
31#define ItVpdExtVpd 2
32#define ItVpdExtVpdOnPanel 3
33#define ItVpdFirstPaca 4
34#define ItVpdIoVpd 5
35#define ItVpdIplParms 6
36#define ItVpdMsVpd 7
37#define ItVpdPanelVpd 8
38#define ItVpdLpNaca 9
39#define ItVpdBackplaneAndMaybeClockCardVpd 10
40#define ItVpdRecoveryLogBuffer 11
41#define ItVpdSpCommArea 12
42#define ItVpdSpLogBuffer 13
43#define ItVpdSpLogBufferSave 14
44#define ItVpdSpCardVpd 15
45#define ItVpdFirstProcVpd 16
46#define ItVpdApModelVpd 17
47#define ItVpdClockCardVpd 18
48#define ItVpdBusExtCardVpd 19
49#define ItVpdProcCapacityVpd 20
50#define ItVpdInteractiveCapacityVpd 21
51#define ItVpdFirstSlotLabel 22
52#define ItVpdFirstLpQueue 23
53#define ItVpdFirstL3CacheVpd 24
54#define ItVpdFirstProcFruVpd 25
55
56#define ItVpdMaxEntries 26
57
58#define ItDmaMaxEntries 10
59
60#define ItVpdAreasMaxSlotLabels 192
61
62
63struct ItVpdAreas {
64 u32 xSlicDesc; // Descriptor 000-003
65 u16 xSlicSize; // Size of this control block 004-005
66 u16 xPlicAdjustVpdLens:1; // Flag to indicate new interface006-007
67 u16 xRsvd1:15; // Reserved bits ...
68 u16 xSlicVpdEntries; // Number of VPD entries 008-009
69 u16 xSlicDmaEntries; // Number of DMA entries 00A-00B
70 u16 xSlicMaxLogicalProcs; // Maximum logical processors 00C-00D
71 u16 xSlicMaxPhysicalProcs; // Maximum physical processors 00E-00F
72 u16 xSlicDmaToksOffset; // Offset into this of array 010-011
73 u16 xSlicVpdAdrsOffset; // Offset into this of array 012-013
74 u16 xSlicDmaLensOffset; // Offset into this of array 014-015
75 u16 xSlicVpdLensOffset; // Offset into this of array 016-017
76 u16 xSlicMaxSlotLabels; // Maximum number of slot labels018-019
77 u16 xSlicMaxLpQueues; // Maximum number of LP Queues 01A-01B
78 u8 xRsvd2[4]; // Reserved 01C-01F
79 u64 xRsvd3[12]; // Reserved 020-07F
80 u32 xPlicDmaLens[ItDmaMaxEntries];// Array of DMA lengths 080-0A7
81 u32 xPlicDmaToks[ItDmaMaxEntries];// Array of DMA tokens 0A8-0CF
82 u32 xSlicVpdLens[ItVpdMaxEntries];// Array of VPD lengths 0D0-12F
83 void *xSlicVpdAdrs[ItVpdMaxEntries];// Array of VPD buffers 130-1EF
84};
85
86extern struct ItVpdAreas itVpdAreas;
87
88#endif /* _ISERIES_VPD_AREAS_H */
diff --git a/arch/ppc64/kernel/iSeries_VpdInfo.c b/arch/powerpc/platforms/iseries/vpdinfo.c
index 5d921792571f..9c318849dee7 100644
--- a/arch/ppc64/kernel/iSeries_VpdInfo.c
+++ b/arch/powerpc/platforms/iseries/vpdinfo.c
@@ -1,6 +1,4 @@
1/* 1/*
2 * File iSeries_vpdInfo.c created by Allan Trautman on Fri Feb 2 2001.
3 *
4 * This code gets the card location of the hardware 2 * This code gets the card location of the hardware
5 * Copyright (C) 2001 <Allan H Trautman> <IBM Corp> 3 * Copyright (C) 2001 <Allan H Trautman> <IBM Corp>
6 * Copyright (C) 2005 Stephen Rothwel, IBM Corp 4 * Copyright (C) 2005 Stephen Rothwel, IBM Corp
@@ -29,12 +27,15 @@
29#include <linux/init.h> 27#include <linux/init.h>
30#include <linux/module.h> 28#include <linux/module.h>
31#include <linux/pci.h> 29#include <linux/pci.h>
30
32#include <asm/types.h> 31#include <asm/types.h>
33#include <asm/resource.h> 32#include <asm/resource.h>
34 33#include <asm/abs_addr.h>
35#include <asm/iSeries/HvCallPci.h> 34#include <asm/pci-bridge.h>
36#include <asm/iSeries/HvTypes.h> 35#include <asm/iSeries/HvTypes.h>
37#include <asm/iSeries/iSeries_pci.h> 36
37#include "pci.h"
38#include "call_pci.h"
38 39
39/* 40/*
40 * Size of Bus VPD data 41 * Size of Bus VPD data
@@ -214,7 +215,7 @@ static void __init iSeries_Get_Location_Code(u16 bus, HvAgentId agent,
214 printk("PCI: Bus VPD Buffer allocation failure.\n"); 215 printk("PCI: Bus VPD Buffer allocation failure.\n");
215 return; 216 return;
216 } 217 }
217 BusVpdLen = HvCallPci_getBusVpd(bus, ISERIES_HV_ADDR(BusVpdPtr), 218 BusVpdLen = HvCallPci_getBusVpd(bus, iseries_hv_addr(BusVpdPtr),
218 BUS_VPDSIZE); 219 BUS_VPDSIZE);
219 if (BusVpdLen == 0) { 220 if (BusVpdLen == 0) {
220 printk("PCI: Bus VPD Buffer zero length.\n"); 221 printk("PCI: Bus VPD Buffer zero length.\n");
@@ -242,7 +243,8 @@ out_free:
242 */ 243 */
243void __init iSeries_Device_Information(struct pci_dev *PciDev, int count) 244void __init iSeries_Device_Information(struct pci_dev *PciDev, int count)
244{ 245{
245 struct iSeries_Device_Node *DevNode = PciDev->sysdata; 246 struct device_node *DevNode = PciDev->sysdata;
247 struct pci_dn *pdn;
246 u16 bus; 248 u16 bus;
247 u8 frame; 249 u8 frame;
248 char card[4]; 250 char card[4];
@@ -255,8 +257,9 @@ void __init iSeries_Device_Information(struct pci_dev *PciDev, int count)
255 return; 257 return;
256 } 258 }
257 259
258 bus = ISERIES_BUS(DevNode); 260 pdn = PCI_DN(DevNode);
259 subbus = ISERIES_SUBBUS(DevNode); 261 bus = pdn->busno;
262 subbus = pdn->bussubno;
260 agent = ISERIES_PCI_AGENTID(ISERIES_GET_DEVICE_FROM_SUBBUS(subbus), 263 agent = ISERIES_PCI_AGENTID(ISERIES_GET_DEVICE_FROM_SUBBUS(subbus),
261 ISERIES_GET_FUNCTION_FROM_SUBBUS(subbus)); 264 ISERIES_GET_FUNCTION_FROM_SUBBUS(subbus));
262 iSeries_Get_Location_Code(bus, agent, &frame, card); 265 iSeries_Get_Location_Code(bus, agent, &frame, card);
diff --git a/arch/powerpc/platforms/maple/Makefile b/arch/powerpc/platforms/maple/Makefile
new file mode 100644
index 000000000000..1be1a993c5f5
--- /dev/null
+++ b/arch/powerpc/platforms/maple/Makefile
@@ -0,0 +1 @@
obj-y += setup.o pci.o time.o
diff --git a/arch/powerpc/platforms/maple/maple.h b/arch/powerpc/platforms/maple/maple.h
new file mode 100644
index 000000000000..0657c579b840
--- /dev/null
+++ b/arch/powerpc/platforms/maple/maple.h
@@ -0,0 +1,12 @@
1/*
2 * Declarations for maple-specific code.
3 *
4 * Maple is the name of a PPC970 evaluation board.
5 */
6extern int maple_set_rtc_time(struct rtc_time *tm);
7extern void maple_get_rtc_time(struct rtc_time *tm);
8extern unsigned long maple_get_boot_time(void);
9extern void maple_calibrate_decr(void);
10extern void maple_pci_init(void);
11extern void maple_pcibios_fixup(void);
12extern int maple_pci_get_legacy_ide_irq(struct pci_dev *dev, int channel);
diff --git a/arch/ppc64/kernel/maple_pci.c b/arch/powerpc/platforms/maple/pci.c
index 1d297e0edfc0..340c21caeae2 100644
--- a/arch/ppc64/kernel/maple_pci.c
+++ b/arch/powerpc/platforms/maple/pci.c
@@ -23,8 +23,9 @@
23#include <asm/pci-bridge.h> 23#include <asm/pci-bridge.h>
24#include <asm/machdep.h> 24#include <asm/machdep.h>
25#include <asm/iommu.h> 25#include <asm/iommu.h>
26#include <asm/ppc-pci.h>
26 27
27#include "pci.h" 28#include "maple.h"
28 29
29#ifdef DEBUG 30#ifdef DEBUG
30#define DBG(x...) printk(x) 31#define DBG(x...) printk(x)
@@ -276,7 +277,7 @@ static void __init setup_u3_agp(struct pci_controller* hose)
276{ 277{
277 /* On G5, we move AGP up to high bus number so we don't need 278 /* On G5, we move AGP up to high bus number so we don't need
278 * to reassign bus numbers for HT. If we ever have P2P bridges 279 * to reassign bus numbers for HT. If we ever have P2P bridges
279 * on AGP, we'll have to move pci_assign_all_busses to the 280 * on AGP, we'll have to move pci_assign_all_buses to the
280 * pci_controller structure so we enable it for AGP and not for 281 * pci_controller structure so we enable it for AGP and not for
281 * HT childs. 282 * HT childs.
282 * We hard code the address because of the different size of 283 * We hard code the address because of the different size of
@@ -360,7 +361,7 @@ static int __init add_bridge(struct device_node *dev)
360 361
361 /* Interpret the "ranges" property */ 362 /* Interpret the "ranges" property */
362 /* This also maps the I/O region and sets isa_io/mem_base */ 363 /* This also maps the I/O region and sets isa_io/mem_base */
363 pci_process_bridge_OF_ranges(hose, dev); 364 pci_process_bridge_OF_ranges(hose, dev, primary);
364 pci_setup_phb_io(hose, primary); 365 pci_setup_phb_io(hose, primary);
365 366
366 /* Fixup "bus-range" OF property */ 367 /* Fixup "bus-range" OF property */
diff --git a/arch/ppc64/kernel/maple_setup.c b/arch/powerpc/platforms/maple/setup.c
index fc0567498a3a..7ece8983a105 100644
--- a/arch/ppc64/kernel/maple_setup.c
+++ b/arch/powerpc/platforms/maple/setup.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * arch/ppc64/kernel/maple_setup.c 2 * Maple (970 eval board) setup code
3 * 3 *
4 * (c) Copyright 2004 Benjamin Herrenschmidt (benh@kernel.crashing.org), 4 * (c) Copyright 2004 Benjamin Herrenschmidt (benh@kernel.crashing.org),
5 * IBM Corp. 5 * IBM Corp.
@@ -59,8 +59,10 @@
59#include <asm/time.h> 59#include <asm/time.h>
60#include <asm/of_device.h> 60#include <asm/of_device.h>
61#include <asm/lmb.h> 61#include <asm/lmb.h>
62#include <asm/mpic.h>
63#include <asm/udbg.h>
62 64
63#include "mpic.h" 65#include "maple.h"
64 66
65#ifdef DEBUG 67#ifdef DEBUG
66#define DBG(fmt...) udbg_printf(fmt) 68#define DBG(fmt...) udbg_printf(fmt)
@@ -68,13 +70,6 @@
68#define DBG(fmt...) 70#define DBG(fmt...)
69#endif 71#endif
70 72
71extern int maple_set_rtc_time(struct rtc_time *tm);
72extern void maple_get_rtc_time(struct rtc_time *tm);
73extern void maple_get_boot_time(struct rtc_time *tm);
74extern void maple_calibrate_decr(void);
75extern void maple_pci_init(void);
76extern void maple_pcibios_fixup(void);
77extern int maple_pci_get_legacy_ide_irq(struct pci_dev *dev, int channel);
78extern void generic_find_legacy_serial_ports(u64 *physport, 73extern void generic_find_legacy_serial_ports(u64 *physport,
79 unsigned int *default_speed); 74 unsigned int *default_speed);
80 75
diff --git a/arch/ppc64/kernel/maple_time.c b/arch/powerpc/platforms/maple/time.c
index d65210abcd03..40fc07a8e606 100644
--- a/arch/ppc64/kernel/maple_time.c
+++ b/arch/powerpc/platforms/maple/time.c
@@ -36,6 +36,8 @@
36#include <asm/machdep.h> 36#include <asm/machdep.h>
37#include <asm/time.h> 37#include <asm/time.h>
38 38
39#include "maple.h"
40
39#ifdef DEBUG 41#ifdef DEBUG
40#define DBG(x...) printk(x) 42#define DBG(x...) printk(x)
41#else 43#else
@@ -156,8 +158,9 @@ int maple_set_rtc_time(struct rtc_time *tm)
156 return 0; 158 return 0;
157} 159}
158 160
159void __init maple_get_boot_time(struct rtc_time *tm) 161unsigned long __init maple_get_boot_time(void)
160{ 162{
163 struct rtc_time tm;
161 struct device_node *rtcs; 164 struct device_node *rtcs;
162 165
163 rtcs = find_compatible_devices("rtc", "pnpPNP,b00"); 166 rtcs = find_compatible_devices("rtc", "pnpPNP,b00");
@@ -170,6 +173,8 @@ void __init maple_get_boot_time(struct rtc_time *tm)
170 "legacy address (0x%x)\n", maple_rtc_addr); 173 "legacy address (0x%x)\n", maple_rtc_addr);
171 } 174 }
172 175
173 maple_get_rtc_time(tm); 176 maple_get_rtc_time(&tm);
177 return mktime(tm.tm_year+1900, tm.tm_mon+1, tm.tm_mday,
178 tm.tm_hour, tm.tm_min, tm.tm_sec);
174} 179}
175 180
diff --git a/arch/powerpc/platforms/powermac/Makefile b/arch/powerpc/platforms/powermac/Makefile
new file mode 100644
index 000000000000..4369676f1d54
--- /dev/null
+++ b/arch/powerpc/platforms/powermac/Makefile
@@ -0,0 +1,8 @@
1obj-y += pic.o setup.o time.o feature.o pci.o \
2 sleep.o low_i2c.o cache.o
3obj-$(CONFIG_PMAC_BACKLIGHT) += backlight.o
4obj-$(CONFIG_CPU_FREQ_PMAC) += cpufreq.o
5obj-$(CONFIG_NVRAM) += nvram.o
6# ppc64 pmac doesn't define CONFIG_NVRAM but needs nvram stuff
7obj-$(CONFIG_PPC64) += nvram.o
8obj-$(CONFIG_SMP) += smp.o
diff --git a/arch/powerpc/platforms/powermac/backlight.c b/arch/powerpc/platforms/powermac/backlight.c
new file mode 100644
index 000000000000..8be2f7d071f0
--- /dev/null
+++ b/arch/powerpc/platforms/powermac/backlight.c
@@ -0,0 +1,202 @@
1/*
2 * Miscellaneous procedures for dealing with the PowerMac hardware.
3 * Contains support for the backlight.
4 *
5 * Copyright (C) 2000 Benjamin Herrenschmidt
6 *
7 */
8
9#include <linux/config.h>
10#include <linux/kernel.h>
11#include <linux/module.h>
12#include <linux/stddef.h>
13#include <linux/reboot.h>
14#include <linux/nvram.h>
15#include <linux/console.h>
16#include <asm/sections.h>
17#include <asm/ptrace.h>
18#include <asm/io.h>
19#include <asm/pgtable.h>
20#include <asm/system.h>
21#include <asm/prom.h>
22#include <asm/machdep.h>
23#include <asm/nvram.h>
24#include <asm/backlight.h>
25
26#include <linux/adb.h>
27#include <linux/pmu.h>
28
29static struct backlight_controller *backlighter;
30static void* backlighter_data;
31static int backlight_autosave;
32static int backlight_level = BACKLIGHT_MAX;
33static int backlight_enabled = 1;
34static int backlight_req_level = -1;
35static int backlight_req_enable = -1;
36
37static void backlight_callback(void *);
38static DECLARE_WORK(backlight_work, backlight_callback, NULL);
39
40void register_backlight_controller(struct backlight_controller *ctrler,
41 void *data, char *type)
42{
43 struct device_node* bk_node;
44 char *prop;
45 int valid = 0;
46
47 /* There's already a matching controller, bail out */
48 if (backlighter != NULL)
49 return;
50
51 bk_node = find_devices("backlight");
52
53#ifdef CONFIG_ADB_PMU
54 /* Special case for the old PowerBook since I can't test on it */
55 backlight_autosave = machine_is_compatible("AAPL,3400/2400")
56 || machine_is_compatible("AAPL,3500");
57 if ((backlight_autosave
58 || machine_is_compatible("AAPL,PowerBook1998")
59 || machine_is_compatible("PowerBook1,1"))
60 && !strcmp(type, "pmu"))
61 valid = 1;
62#endif
63 if (bk_node) {
64 prop = get_property(bk_node, "backlight-control", NULL);
65 if (prop && !strncmp(prop, type, strlen(type)))
66 valid = 1;
67 }
68 if (!valid)
69 return;
70 backlighter = ctrler;
71 backlighter_data = data;
72
73 if (bk_node && !backlight_autosave)
74 prop = get_property(bk_node, "bklt", NULL);
75 else
76 prop = NULL;
77 if (prop) {
78 backlight_level = ((*prop)+1) >> 1;
79 if (backlight_level > BACKLIGHT_MAX)
80 backlight_level = BACKLIGHT_MAX;
81 }
82
83#ifdef CONFIG_ADB_PMU
84 if (backlight_autosave) {
85 struct adb_request req;
86 pmu_request(&req, NULL, 2, 0xd9, 0);
87 while (!req.complete)
88 pmu_poll();
89 backlight_level = req.reply[0] >> 4;
90 }
91#endif
92 acquire_console_sem();
93 if (!backlighter->set_enable(1, backlight_level, data))
94 backlight_enabled = 1;
95 release_console_sem();
96
97 printk(KERN_INFO "Registered \"%s\" backlight controller,"
98 "level: %d/15\n", type, backlight_level);
99}
100EXPORT_SYMBOL(register_backlight_controller);
101
102void unregister_backlight_controller(struct backlight_controller
103 *ctrler, void *data)
104{
105 /* We keep the current backlight level (for now) */
106 if (ctrler == backlighter && data == backlighter_data)
107 backlighter = NULL;
108}
109EXPORT_SYMBOL(unregister_backlight_controller);
110
111static int __set_backlight_enable(int enable)
112{
113 int rc;
114
115 if (!backlighter)
116 return -ENODEV;
117 acquire_console_sem();
118 rc = backlighter->set_enable(enable, backlight_level,
119 backlighter_data);
120 if (!rc)
121 backlight_enabled = enable;
122 release_console_sem();
123 return rc;
124}
125int set_backlight_enable(int enable)
126{
127 if (!backlighter)
128 return -ENODEV;
129 backlight_req_enable = enable;
130 schedule_work(&backlight_work);
131 return 0;
132}
133
134EXPORT_SYMBOL(set_backlight_enable);
135
136int get_backlight_enable(void)
137{
138 if (!backlighter)
139 return -ENODEV;
140 return backlight_enabled;
141}
142EXPORT_SYMBOL(get_backlight_enable);
143
144static int __set_backlight_level(int level)
145{
146 int rc = 0;
147
148 if (!backlighter)
149 return -ENODEV;
150 if (level < BACKLIGHT_MIN)
151 level = BACKLIGHT_OFF;
152 if (level > BACKLIGHT_MAX)
153 level = BACKLIGHT_MAX;
154 acquire_console_sem();
155 if (backlight_enabled)
156 rc = backlighter->set_level(level, backlighter_data);
157 if (!rc)
158 backlight_level = level;
159 release_console_sem();
160 if (!rc && !backlight_autosave) {
161 level <<=1;
162 if (level & 0x10)
163 level |= 0x01;
164 // -- todo: save to property "bklt"
165 }
166 return rc;
167}
168int set_backlight_level(int level)
169{
170 if (!backlighter)
171 return -ENODEV;
172 backlight_req_level = level;
173 schedule_work(&backlight_work);
174 return 0;
175}
176
177EXPORT_SYMBOL(set_backlight_level);
178
179int get_backlight_level(void)
180{
181 if (!backlighter)
182 return -ENODEV;
183 return backlight_level;
184}
185EXPORT_SYMBOL(get_backlight_level);
186
187static void backlight_callback(void *dummy)
188{
189 int level, enable;
190
191 do {
192 level = backlight_req_level;
193 enable = backlight_req_enable;
194 mb();
195
196 if (level >= 0)
197 __set_backlight_level(level);
198 if (enable >= 0)
199 __set_backlight_enable(enable);
200 } while(cmpxchg(&backlight_req_level, level, -1) != level ||
201 cmpxchg(&backlight_req_enable, enable, -1) != enable);
202}
diff --git a/arch/powerpc/platforms/powermac/cache.S b/arch/powerpc/platforms/powermac/cache.S
new file mode 100644
index 000000000000..fb977de6b704
--- /dev/null
+++ b/arch/powerpc/platforms/powermac/cache.S
@@ -0,0 +1,359 @@
1/*
2 * This file contains low-level cache management functions
3 * used for sleep and CPU speed changes on Apple machines.
4 * (In fact the only thing that is Apple-specific is that we assume
5 * that we can read from ROM at physical address 0xfff00000.)
6 *
7 * Copyright (C) 2004 Paul Mackerras (paulus@samba.org) and
8 * Benjamin Herrenschmidt (benh@kernel.crashing.org)
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version
13 * 2 of the License, or (at your option) any later version.
14 *
15 */
16
17#include <linux/config.h>
18#include <asm/processor.h>
19#include <asm/ppc_asm.h>
20#include <asm/cputable.h>
21
22/*
23 * Flush and disable all data caches (dL1, L2, L3). This is used
24 * when going to sleep, when doing a PMU based cpufreq transition,
25 * or when "offlining" a CPU on SMP machines. This code is over
26 * paranoid, but I've had enough issues with various CPU revs and
27 * bugs that I decided it was worth beeing over cautious
28 */
29
30_GLOBAL(flush_disable_caches)
31#ifndef CONFIG_6xx
32 blr
33#else
34BEGIN_FTR_SECTION
35 b flush_disable_745x
36END_FTR_SECTION_IFSET(CPU_FTR_SPEC7450)
37BEGIN_FTR_SECTION
38 b flush_disable_75x
39END_FTR_SECTION_IFSET(CPU_FTR_L2CR)
40 b __flush_disable_L1
41
42/* This is the code for G3 and 74[01]0 */
43flush_disable_75x:
44 mflr r10
45
46 /* Turn off EE and DR in MSR */
47 mfmsr r11
48 rlwinm r0,r11,0,~MSR_EE
49 rlwinm r0,r0,0,~MSR_DR
50 sync
51 mtmsr r0
52 isync
53
54 /* Stop DST streams */
55BEGIN_FTR_SECTION
56 DSSALL
57 sync
58END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
59
60 /* Stop DPM */
61 mfspr r8,SPRN_HID0 /* Save SPRN_HID0 in r8 */
62 rlwinm r4,r8,0,12,10 /* Turn off HID0[DPM] */
63 sync
64 mtspr SPRN_HID0,r4 /* Disable DPM */
65 sync
66
67 /* Disp-flush L1. We have a weird problem here that I never
68 * totally figured out. On 750FX, using the ROM for the flush
69 * results in a non-working flush. We use that workaround for
70 * now until I finally understand what's going on. --BenH
71 */
72
73 /* ROM base by default */
74 lis r4,0xfff0
75 mfpvr r3
76 srwi r3,r3,16
77 cmplwi cr0,r3,0x7000
78 bne+ 1f
79 /* RAM base on 750FX */
80 li r4,0
811: li r4,0x4000
82 mtctr r4
831: lwz r0,0(r4)
84 addi r4,r4,32
85 bdnz 1b
86 sync
87 isync
88
89 /* Disable / invalidate / enable L1 data */
90 mfspr r3,SPRN_HID0
91 rlwinm r3,r3,0,~(HID0_DCE | HID0_ICE)
92 mtspr SPRN_HID0,r3
93 sync
94 isync
95 ori r3,r3,(HID0_DCE|HID0_DCI|HID0_ICE|HID0_ICFI)
96 sync
97 isync
98 mtspr SPRN_HID0,r3
99 xori r3,r3,(HID0_DCI|HID0_ICFI)
100 mtspr SPRN_HID0,r3
101 sync
102
103 /* Get the current enable bit of the L2CR into r4 */
104 mfspr r5,SPRN_L2CR
105 /* Set to data-only (pre-745x bit) */
106 oris r3,r5,L2CR_L2DO@h
107 b 2f
108 /* When disabling L2, code must be in L1 */
109 .balign 32
1101: mtspr SPRN_L2CR,r3
1113: sync
112 isync
113 b 1f
1142: b 3f
1153: sync
116 isync
117 b 1b
1181: /* disp-flush L2. The interesting thing here is that the L2 can be
119 * up to 2Mb ... so using the ROM, we'll end up wrapping back to memory
120 * but that is probbaly fine. We disp-flush over 4Mb to be safe
121 */
122 lis r4,2
123 mtctr r4
124 lis r4,0xfff0
1251: lwz r0,0(r4)
126 addi r4,r4,32
127 bdnz 1b
128 sync
129 isync
130 lis r4,2
131 mtctr r4
132 lis r4,0xfff0
1331: dcbf 0,r4
134 addi r4,r4,32
135 bdnz 1b
136 sync
137 isync
138
139 /* now disable L2 */
140 rlwinm r5,r5,0,~L2CR_L2E
141 b 2f
142 /* When disabling L2, code must be in L1 */
143 .balign 32
1441: mtspr SPRN_L2CR,r5
1453: sync
146 isync
147 b 1f
1482: b 3f
1493: sync
150 isync
151 b 1b
1521: sync
153 isync
154 /* Invalidate L2. This is pre-745x, we clear the L2I bit ourselves */
155 oris r4,r5,L2CR_L2I@h
156 mtspr SPRN_L2CR,r4
157 sync
158 isync
159
160 /* Wait for the invalidation to complete */
1611: mfspr r3,SPRN_L2CR
162 rlwinm. r0,r3,0,31,31
163 bne 1b
164
165 /* Clear L2I */
166 xoris r4,r4,L2CR_L2I@h
167 sync
168 mtspr SPRN_L2CR,r4
169 sync
170
171 /* now disable the L1 data cache */
172 mfspr r0,SPRN_HID0
173 rlwinm r0,r0,0,~(HID0_DCE|HID0_ICE)
174 mtspr SPRN_HID0,r0
175 sync
176 isync
177
178 /* Restore HID0[DPM] to whatever it was before */
179 sync
180 mfspr r0,SPRN_HID0
181 rlwimi r0,r8,0,11,11 /* Turn back HID0[DPM] */
182 mtspr SPRN_HID0,r0
183 sync
184
185 /* restore DR and EE */
186 sync
187 mtmsr r11
188 isync
189
190 mtlr r10
191 blr
192
193/* This code is for 745x processors */
194flush_disable_745x:
195 /* Turn off EE and DR in MSR */
196 mfmsr r11
197 rlwinm r0,r11,0,~MSR_EE
198 rlwinm r0,r0,0,~MSR_DR
199 sync
200 mtmsr r0
201 isync
202
203 /* Stop prefetch streams */
204 DSSALL
205 sync
206
207 /* Disable L2 prefetching */
208 mfspr r0,SPRN_MSSCR0
209 rlwinm r0,r0,0,0,29
210 mtspr SPRN_MSSCR0,r0
211 sync
212 isync
213 lis r4,0
214 dcbf 0,r4
215 dcbf 0,r4
216 dcbf 0,r4
217 dcbf 0,r4
218 dcbf 0,r4
219 dcbf 0,r4
220 dcbf 0,r4
221 dcbf 0,r4
222
223 /* Due to a bug with the HW flush on some CPU revs, we occasionally
224 * experience data corruption. I'm adding a displacement flush along
225 * with a dcbf loop over a few Mb to "help". The problem isn't totally
226 * fixed by this in theory, but at least, in practice, I couldn't reproduce
227 * it even with a big hammer...
228 */
229
230 lis r4,0x0002
231 mtctr r4
232 li r4,0
2331:
234 lwz r0,0(r4)
235 addi r4,r4,32 /* Go to start of next cache line */
236 bdnz 1b
237 isync
238
239 /* Now, flush the first 4MB of memory */
240 lis r4,0x0002
241 mtctr r4
242 li r4,0
243 sync
2441:
245 dcbf 0,r4
246 addi r4,r4,32 /* Go to start of next cache line */
247 bdnz 1b
248
249 /* Flush and disable the L1 data cache */
250 mfspr r6,SPRN_LDSTCR
251 lis r3,0xfff0 /* read from ROM for displacement flush */
252 li r4,0xfe /* start with only way 0 unlocked */
253 li r5,128 /* 128 lines in each way */
2541: mtctr r5
255 rlwimi r6,r4,0,24,31
256 mtspr SPRN_LDSTCR,r6
257 sync
258 isync
2592: lwz r0,0(r3) /* touch each cache line */
260 addi r3,r3,32
261 bdnz 2b
262 rlwinm r4,r4,1,24,30 /* move on to the next way */
263 ori r4,r4,1
264 cmpwi r4,0xff /* all done? */
265 bne 1b
266 /* now unlock the L1 data cache */
267 li r4,0
268 rlwimi r6,r4,0,24,31
269 sync
270 mtspr SPRN_LDSTCR,r6
271 sync
272 isync
273
274 /* Flush the L2 cache using the hardware assist */
275 mfspr r3,SPRN_L2CR
276 cmpwi r3,0 /* check if it is enabled first */
277 bge 4f
278 oris r0,r3,(L2CR_L2IO_745x|L2CR_L2DO_745x)@h
279 b 2f
280 /* When disabling/locking L2, code must be in L1 */
281 .balign 32
2821: mtspr SPRN_L2CR,r0 /* lock the L2 cache */
2833: sync
284 isync
285 b 1f
2862: b 3f
2873: sync
288 isync
289 b 1b
2901: sync
291 isync
292 ori r0,r3,L2CR_L2HWF_745x
293 sync
294 mtspr SPRN_L2CR,r0 /* set the hardware flush bit */
2953: mfspr r0,SPRN_L2CR /* wait for it to go to 0 */
296 andi. r0,r0,L2CR_L2HWF_745x
297 bne 3b
298 sync
299 rlwinm r3,r3,0,~L2CR_L2E
300 b 2f
301 /* When disabling L2, code must be in L1 */
302 .balign 32
3031: mtspr SPRN_L2CR,r3 /* disable the L2 cache */
3043: sync
305 isync
306 b 1f
3072: b 3f
3083: sync
309 isync
310 b 1b
3111: sync
312 isync
313 oris r4,r3,L2CR_L2I@h
314 mtspr SPRN_L2CR,r4
315 sync
316 isync
3171: mfspr r4,SPRN_L2CR
318 andis. r0,r4,L2CR_L2I@h
319 bne 1b
320 sync
321
322BEGIN_FTR_SECTION
323 /* Flush the L3 cache using the hardware assist */
3244: mfspr r3,SPRN_L3CR
325 cmpwi r3,0 /* check if it is enabled */
326 bge 6f
327 oris r0,r3,L3CR_L3IO@h
328 ori r0,r0,L3CR_L3DO
329 sync
330 mtspr SPRN_L3CR,r0 /* lock the L3 cache */
331 sync
332 isync
333 ori r0,r0,L3CR_L3HWF
334 sync
335 mtspr SPRN_L3CR,r0 /* set the hardware flush bit */
3365: mfspr r0,SPRN_L3CR /* wait for it to go to zero */
337 andi. r0,r0,L3CR_L3HWF
338 bne 5b
339 rlwinm r3,r3,0,~L3CR_L3E
340 sync
341 mtspr SPRN_L3CR,r3 /* disable the L3 cache */
342 sync
343 ori r4,r3,L3CR_L3I
344 mtspr SPRN_L3CR,r4
3451: mfspr r4,SPRN_L3CR
346 andi. r0,r4,L3CR_L3I
347 bne 1b
348 sync
349END_FTR_SECTION_IFSET(CPU_FTR_L3CR)
350
3516: mfspr r0,SPRN_HID0 /* now disable the L1 data cache */
352 rlwinm r0,r0,0,~HID0_DCE
353 mtspr SPRN_HID0,r0
354 sync
355 isync
356 mtmsr r11 /* restore DR and EE */
357 isync
358 blr
359#endif /* CONFIG_6xx */
diff --git a/arch/powerpc/platforms/powermac/cpufreq.c b/arch/powerpc/platforms/powermac/cpufreq.c
new file mode 100644
index 000000000000..c47f8b69725c
--- /dev/null
+++ b/arch/powerpc/platforms/powermac/cpufreq.c
@@ -0,0 +1,726 @@
1/*
2 * arch/ppc/platforms/pmac_cpufreq.c
3 *
4 * Copyright (C) 2002 - 2005 Benjamin Herrenschmidt <benh@kernel.crashing.org>
5 * Copyright (C) 2004 John Steele Scott <toojays@toojays.net>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * TODO: Need a big cleanup here. Basically, we need to have different
12 * cpufreq_driver structures for the different type of HW instead of the
13 * current mess. We also need to better deal with the detection of the
14 * type of machine.
15 *
16 */
17
18#include <linux/config.h>
19#include <linux/module.h>
20#include <linux/types.h>
21#include <linux/errno.h>
22#include <linux/kernel.h>
23#include <linux/delay.h>
24#include <linux/sched.h>
25#include <linux/adb.h>
26#include <linux/pmu.h>
27#include <linux/slab.h>
28#include <linux/cpufreq.h>
29#include <linux/init.h>
30#include <linux/sysdev.h>
31#include <linux/i2c.h>
32#include <linux/hardirq.h>
33#include <asm/prom.h>
34#include <asm/machdep.h>
35#include <asm/irq.h>
36#include <asm/pmac_feature.h>
37#include <asm/mmu_context.h>
38#include <asm/sections.h>
39#include <asm/cputable.h>
40#include <asm/time.h>
41#include <asm/system.h>
42#include <asm/mpic.h>
43#include <asm/keylargo.h>
44
45/* WARNING !!! This will cause calibrate_delay() to be called,
46 * but this is an __init function ! So you MUST go edit
47 * init/main.c to make it non-init before enabling DEBUG_FREQ
48 */
49#undef DEBUG_FREQ
50
51/*
52 * There is a problem with the core cpufreq code on SMP kernels,
53 * it won't recalculate the Bogomips properly
54 */
55#ifdef CONFIG_SMP
56#warning "WARNING, CPUFREQ not recommended on SMP kernels"
57#endif
58
59extern void low_choose_7447a_dfs(int dfs);
60extern void low_choose_750fx_pll(int pll);
61extern void low_sleep_handler(void);
62
63/*
64 * Currently, PowerMac cpufreq supports only high & low frequencies
65 * that are set by the firmware
66 */
67static unsigned int low_freq;
68static unsigned int hi_freq;
69static unsigned int cur_freq;
70static unsigned int sleep_freq;
71
72/*
73 * Different models uses different mecanisms to switch the frequency
74 */
75static int (*set_speed_proc)(int low_speed);
76static unsigned int (*get_speed_proc)(void);
77
78/*
79 * Some definitions used by the various speedprocs
80 */
81static u32 voltage_gpio;
82static u32 frequency_gpio;
83static u32 slew_done_gpio;
84static int no_schedule;
85static int has_cpu_l2lve;
86static int is_pmu_based;
87
88/* There are only two frequency states for each processor. Values
89 * are in kHz for the time being.
90 */
91#define CPUFREQ_HIGH 0
92#define CPUFREQ_LOW 1
93
94static struct cpufreq_frequency_table pmac_cpu_freqs[] = {
95 {CPUFREQ_HIGH, 0},
96 {CPUFREQ_LOW, 0},
97 {0, CPUFREQ_TABLE_END},
98};
99
100static struct freq_attr* pmac_cpu_freqs_attr[] = {
101 &cpufreq_freq_attr_scaling_available_freqs,
102 NULL,
103};
104
105static inline void local_delay(unsigned long ms)
106{
107 if (no_schedule)
108 mdelay(ms);
109 else
110 msleep(ms);
111}
112
113#ifdef DEBUG_FREQ
114static inline void debug_calc_bogomips(void)
115{
116 /* This will cause a recalc of bogomips and display the
117 * result. We backup/restore the value to avoid affecting the
118 * core cpufreq framework's own calculation.
119 */
120 extern void calibrate_delay(void);
121
122 unsigned long save_lpj = loops_per_jiffy;
123 calibrate_delay();
124 loops_per_jiffy = save_lpj;
125}
126#endif /* DEBUG_FREQ */
127
128/* Switch CPU speed under 750FX CPU control
129 */
130static int cpu_750fx_cpu_speed(int low_speed)
131{
132 u32 hid2;
133
134 if (low_speed == 0) {
135 /* ramping up, set voltage first */
136 pmac_call_feature(PMAC_FTR_WRITE_GPIO, NULL, voltage_gpio, 0x05);
137 /* Make sure we sleep for at least 1ms */
138 local_delay(10);
139
140 /* tweak L2 for high voltage */
141 if (has_cpu_l2lve) {
142 hid2 = mfspr(SPRN_HID2);
143 hid2 &= ~0x2000;
144 mtspr(SPRN_HID2, hid2);
145 }
146 }
147#ifdef CONFIG_6xx
148 low_choose_750fx_pll(low_speed);
149#endif
150 if (low_speed == 1) {
151 /* tweak L2 for low voltage */
152 if (has_cpu_l2lve) {
153 hid2 = mfspr(SPRN_HID2);
154 hid2 |= 0x2000;
155 mtspr(SPRN_HID2, hid2);
156 }
157
158 /* ramping down, set voltage last */
159 pmac_call_feature(PMAC_FTR_WRITE_GPIO, NULL, voltage_gpio, 0x04);
160 local_delay(10);
161 }
162
163 return 0;
164}
165
166static unsigned int cpu_750fx_get_cpu_speed(void)
167{
168 if (mfspr(SPRN_HID1) & HID1_PS)
169 return low_freq;
170 else
171 return hi_freq;
172}
173
174/* Switch CPU speed using DFS */
175static int dfs_set_cpu_speed(int low_speed)
176{
177 if (low_speed == 0) {
178 /* ramping up, set voltage first */
179 pmac_call_feature(PMAC_FTR_WRITE_GPIO, NULL, voltage_gpio, 0x05);
180 /* Make sure we sleep for at least 1ms */
181 local_delay(1);
182 }
183
184 /* set frequency */
185#ifdef CONFIG_6xx
186 low_choose_7447a_dfs(low_speed);
187#endif
188 udelay(100);
189
190 if (low_speed == 1) {
191 /* ramping down, set voltage last */
192 pmac_call_feature(PMAC_FTR_WRITE_GPIO, NULL, voltage_gpio, 0x04);
193 local_delay(1);
194 }
195
196 return 0;
197}
198
199static unsigned int dfs_get_cpu_speed(void)
200{
201 if (mfspr(SPRN_HID1) & HID1_DFS)
202 return low_freq;
203 else
204 return hi_freq;
205}
206
207
208/* Switch CPU speed using slewing GPIOs
209 */
210static int gpios_set_cpu_speed(int low_speed)
211{
212 int gpio, timeout = 0;
213
214 /* If ramping up, set voltage first */
215 if (low_speed == 0) {
216 pmac_call_feature(PMAC_FTR_WRITE_GPIO, NULL, voltage_gpio, 0x05);
217 /* Delay is way too big but it's ok, we schedule */
218 local_delay(10);
219 }
220
221 /* Set frequency */
222 gpio = pmac_call_feature(PMAC_FTR_READ_GPIO, NULL, frequency_gpio, 0);
223 if (low_speed == ((gpio & 0x01) == 0))
224 goto skip;
225
226 pmac_call_feature(PMAC_FTR_WRITE_GPIO, NULL, frequency_gpio,
227 low_speed ? 0x04 : 0x05);
228 udelay(200);
229 do {
230 if (++timeout > 100)
231 break;
232 local_delay(1);
233 gpio = pmac_call_feature(PMAC_FTR_READ_GPIO, NULL, slew_done_gpio, 0);
234 } while((gpio & 0x02) == 0);
235 skip:
236 /* If ramping down, set voltage last */
237 if (low_speed == 1) {
238 pmac_call_feature(PMAC_FTR_WRITE_GPIO, NULL, voltage_gpio, 0x04);
239 /* Delay is way too big but it's ok, we schedule */
240 local_delay(10);
241 }
242
243#ifdef DEBUG_FREQ
244 debug_calc_bogomips();
245#endif
246
247 return 0;
248}
249
250/* Switch CPU speed under PMU control
251 */
252static int pmu_set_cpu_speed(int low_speed)
253{
254 struct adb_request req;
255 unsigned long save_l2cr;
256 unsigned long save_l3cr;
257 unsigned int pic_prio;
258 unsigned long flags;
259
260 preempt_disable();
261
262#ifdef DEBUG_FREQ
263 printk(KERN_DEBUG "HID1, before: %x\n", mfspr(SPRN_HID1));
264#endif
265 pmu_suspend();
266
267 /* Disable all interrupt sources on openpic */
268 pic_prio = mpic_cpu_get_priority();
269 mpic_cpu_set_priority(0xf);
270
271 /* Make sure the decrementer won't interrupt us */
272 asm volatile("mtdec %0" : : "r" (0x7fffffff));
273 /* Make sure any pending DEC interrupt occuring while we did
274 * the above didn't re-enable the DEC */
275 mb();
276 asm volatile("mtdec %0" : : "r" (0x7fffffff));
277
278 /* We can now disable MSR_EE */
279 local_irq_save(flags);
280
281 /* Giveup the FPU & vec */
282 enable_kernel_fp();
283
284#ifdef CONFIG_ALTIVEC
285 if (cpu_has_feature(CPU_FTR_ALTIVEC))
286 enable_kernel_altivec();
287#endif /* CONFIG_ALTIVEC */
288
289 /* Save & disable L2 and L3 caches */
290 save_l3cr = _get_L3CR(); /* (returns -1 if not available) */
291 save_l2cr = _get_L2CR(); /* (returns -1 if not available) */
292
293 /* Send the new speed command. My assumption is that this command
294 * will cause PLL_CFG[0..3] to be changed next time CPU goes to sleep
295 */
296 pmu_request(&req, NULL, 6, PMU_CPU_SPEED, 'W', 'O', 'O', 'F', low_speed);
297 while (!req.complete)
298 pmu_poll();
299
300 /* Prepare the northbridge for the speed transition */
301 pmac_call_feature(PMAC_FTR_SLEEP_STATE,NULL,1,1);
302
303 /* Call low level code to backup CPU state and recover from
304 * hardware reset
305 */
306 low_sleep_handler();
307
308 /* Restore the northbridge */
309 pmac_call_feature(PMAC_FTR_SLEEP_STATE,NULL,1,0);
310
311 /* Restore L2 cache */
312 if (save_l2cr != 0xffffffff && (save_l2cr & L2CR_L2E) != 0)
313 _set_L2CR(save_l2cr);
314 /* Restore L3 cache */
315 if (save_l3cr != 0xffffffff && (save_l3cr & L3CR_L3E) != 0)
316 _set_L3CR(save_l3cr);
317
318 /* Restore userland MMU context */
319 set_context(current->active_mm->context, current->active_mm->pgd);
320
321#ifdef DEBUG_FREQ
322 printk(KERN_DEBUG "HID1, after: %x\n", mfspr(SPRN_HID1));
323#endif
324
325 /* Restore low level PMU operations */
326 pmu_unlock();
327
328 /* Restore decrementer */
329 wakeup_decrementer();
330
331 /* Restore interrupts */
332 mpic_cpu_set_priority(pic_prio);
333
334 /* Let interrupts flow again ... */
335 local_irq_restore(flags);
336
337#ifdef DEBUG_FREQ
338 debug_calc_bogomips();
339#endif
340
341 pmu_resume();
342
343 preempt_enable();
344
345 return 0;
346}
347
348static int do_set_cpu_speed(int speed_mode, int notify)
349{
350 struct cpufreq_freqs freqs;
351 unsigned long l3cr;
352 static unsigned long prev_l3cr;
353
354 freqs.old = cur_freq;
355 freqs.new = (speed_mode == CPUFREQ_HIGH) ? hi_freq : low_freq;
356 freqs.cpu = smp_processor_id();
357
358 if (freqs.old == freqs.new)
359 return 0;
360
361 if (notify)
362 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
363 if (speed_mode == CPUFREQ_LOW &&
364 cpu_has_feature(CPU_FTR_L3CR)) {
365 l3cr = _get_L3CR();
366 if (l3cr & L3CR_L3E) {
367 prev_l3cr = l3cr;
368 _set_L3CR(0);
369 }
370 }
371 set_speed_proc(speed_mode == CPUFREQ_LOW);
372 if (speed_mode == CPUFREQ_HIGH &&
373 cpu_has_feature(CPU_FTR_L3CR)) {
374 l3cr = _get_L3CR();
375 if ((prev_l3cr & L3CR_L3E) && l3cr != prev_l3cr)
376 _set_L3CR(prev_l3cr);
377 }
378 if (notify)
379 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
380 cur_freq = (speed_mode == CPUFREQ_HIGH) ? hi_freq : low_freq;
381
382 return 0;
383}
384
385static unsigned int pmac_cpufreq_get_speed(unsigned int cpu)
386{
387 return cur_freq;
388}
389
390static int pmac_cpufreq_verify(struct cpufreq_policy *policy)
391{
392 return cpufreq_frequency_table_verify(policy, pmac_cpu_freqs);
393}
394
395static int pmac_cpufreq_target( struct cpufreq_policy *policy,
396 unsigned int target_freq,
397 unsigned int relation)
398{
399 unsigned int newstate = 0;
400
401 if (cpufreq_frequency_table_target(policy, pmac_cpu_freqs,
402 target_freq, relation, &newstate))
403 return -EINVAL;
404
405 return do_set_cpu_speed(newstate, 1);
406}
407
408unsigned int pmac_get_one_cpufreq(int i)
409{
410 /* Supports only one CPU for now */
411 return (i == 0) ? cur_freq : 0;
412}
413
414static int pmac_cpufreq_cpu_init(struct cpufreq_policy *policy)
415{
416 if (policy->cpu != 0)
417 return -ENODEV;
418
419 policy->governor = CPUFREQ_DEFAULT_GOVERNOR;
420 policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL;
421 policy->cur = cur_freq;
422
423 cpufreq_frequency_table_get_attr(pmac_cpu_freqs, policy->cpu);
424 return cpufreq_frequency_table_cpuinfo(policy, pmac_cpu_freqs);
425}
426
427static u32 read_gpio(struct device_node *np)
428{
429 u32 *reg = (u32 *)get_property(np, "reg", NULL);
430 u32 offset;
431
432 if (reg == NULL)
433 return 0;
434 /* That works for all keylargos but shall be fixed properly
435 * some day... The problem is that it seems we can't rely
436 * on the "reg" property of the GPIO nodes, they are either
437 * relative to the base of KeyLargo or to the base of the
438 * GPIO space, and the device-tree doesn't help.
439 */
440 offset = *reg;
441 if (offset < KEYLARGO_GPIO_LEVELS0)
442 offset += KEYLARGO_GPIO_LEVELS0;
443 return offset;
444}
445
446static int pmac_cpufreq_suspend(struct cpufreq_policy *policy, pm_message_t pmsg)
447{
448 /* Ok, this could be made a bit smarter, but let's be robust for now. We
449 * always force a speed change to high speed before sleep, to make sure
450 * we have appropriate voltage and/or bus speed for the wakeup process,
451 * and to make sure our loops_per_jiffies are "good enough", that is will
452 * not cause too short delays if we sleep in low speed and wake in high
453 * speed..
454 */
455 no_schedule = 1;
456 sleep_freq = cur_freq;
457 if (cur_freq == low_freq && !is_pmu_based)
458 do_set_cpu_speed(CPUFREQ_HIGH, 0);
459 return 0;
460}
461
462static int pmac_cpufreq_resume(struct cpufreq_policy *policy)
463{
464 /* If we resume, first check if we have a get() function */
465 if (get_speed_proc)
466 cur_freq = get_speed_proc();
467 else
468 cur_freq = 0;
469
470 /* We don't, hrm... we don't really know our speed here, best
471 * is that we force a switch to whatever it was, which is
472 * probably high speed due to our suspend() routine
473 */
474 do_set_cpu_speed(sleep_freq == low_freq ?
475 CPUFREQ_LOW : CPUFREQ_HIGH, 0);
476
477 no_schedule = 0;
478 return 0;
479}
480
481static struct cpufreq_driver pmac_cpufreq_driver = {
482 .verify = pmac_cpufreq_verify,
483 .target = pmac_cpufreq_target,
484 .get = pmac_cpufreq_get_speed,
485 .init = pmac_cpufreq_cpu_init,
486 .suspend = pmac_cpufreq_suspend,
487 .resume = pmac_cpufreq_resume,
488 .flags = CPUFREQ_PM_NO_WARN,
489 .attr = pmac_cpu_freqs_attr,
490 .name = "powermac",
491 .owner = THIS_MODULE,
492};
493
494
495static int pmac_cpufreq_init_MacRISC3(struct device_node *cpunode)
496{
497 struct device_node *volt_gpio_np = of_find_node_by_name(NULL,
498 "voltage-gpio");
499 struct device_node *freq_gpio_np = of_find_node_by_name(NULL,
500 "frequency-gpio");
501 struct device_node *slew_done_gpio_np = of_find_node_by_name(NULL,
502 "slewing-done");
503 u32 *value;
504
505 /*
506 * Check to see if it's GPIO driven or PMU only
507 *
508 * The way we extract the GPIO address is slightly hackish, but it
509 * works well enough for now. We need to abstract the whole GPIO
510 * stuff sooner or later anyway
511 */
512
513 if (volt_gpio_np)
514 voltage_gpio = read_gpio(volt_gpio_np);
515 if (freq_gpio_np)
516 frequency_gpio = read_gpio(freq_gpio_np);
517 if (slew_done_gpio_np)
518 slew_done_gpio = read_gpio(slew_done_gpio_np);
519
520 /* If we use the frequency GPIOs, calculate the min/max speeds based
521 * on the bus frequencies
522 */
523 if (frequency_gpio && slew_done_gpio) {
524 int lenp, rc;
525 u32 *freqs, *ratio;
526
527 freqs = (u32 *)get_property(cpunode, "bus-frequencies", &lenp);
528 lenp /= sizeof(u32);
529 if (freqs == NULL || lenp != 2) {
530 printk(KERN_ERR "cpufreq: bus-frequencies incorrect or missing\n");
531 return 1;
532 }
533 ratio = (u32 *)get_property(cpunode, "processor-to-bus-ratio*2", NULL);
534 if (ratio == NULL) {
535 printk(KERN_ERR "cpufreq: processor-to-bus-ratio*2 missing\n");
536 return 1;
537 }
538
539 /* Get the min/max bus frequencies */
540 low_freq = min(freqs[0], freqs[1]);
541 hi_freq = max(freqs[0], freqs[1]);
542
543 /* Grrrr.. It _seems_ that the device-tree is lying on the low bus
544 * frequency, it claims it to be around 84Mhz on some models while
545 * it appears to be approx. 101Mhz on all. Let's hack around here...
546 * fortunately, we don't need to be too precise
547 */
548 if (low_freq < 98000000)
549 low_freq = 101000000;
550
551 /* Convert those to CPU core clocks */
552 low_freq = (low_freq * (*ratio)) / 2000;
553 hi_freq = (hi_freq * (*ratio)) / 2000;
554
555 /* Now we get the frequencies, we read the GPIO to see what is out current
556 * speed
557 */
558 rc = pmac_call_feature(PMAC_FTR_READ_GPIO, NULL, frequency_gpio, 0);
559 cur_freq = (rc & 0x01) ? hi_freq : low_freq;
560
561 set_speed_proc = gpios_set_cpu_speed;
562 return 1;
563 }
564
565 /* If we use the PMU, look for the min & max frequencies in the
566 * device-tree
567 */
568 value = (u32 *)get_property(cpunode, "min-clock-frequency", NULL);
569 if (!value)
570 return 1;
571 low_freq = (*value) / 1000;
572 /* The PowerBook G4 12" (PowerBook6,1) has an error in the device-tree
573 * here */
574 if (low_freq < 100000)
575 low_freq *= 10;
576
577 value = (u32 *)get_property(cpunode, "max-clock-frequency", NULL);
578 if (!value)
579 return 1;
580 hi_freq = (*value) / 1000;
581 set_speed_proc = pmu_set_cpu_speed;
582 is_pmu_based = 1;
583
584 return 0;
585}
586
587static int pmac_cpufreq_init_7447A(struct device_node *cpunode)
588{
589 struct device_node *volt_gpio_np;
590
591 if (get_property(cpunode, "dynamic-power-step", NULL) == NULL)
592 return 1;
593
594 volt_gpio_np = of_find_node_by_name(NULL, "cpu-vcore-select");
595 if (volt_gpio_np)
596 voltage_gpio = read_gpio(volt_gpio_np);
597 if (!voltage_gpio){
598 printk(KERN_ERR "cpufreq: missing cpu-vcore-select gpio\n");
599 return 1;
600 }
601
602 /* OF only reports the high frequency */
603 hi_freq = cur_freq;
604 low_freq = cur_freq/2;
605
606 /* Read actual frequency from CPU */
607 cur_freq = dfs_get_cpu_speed();
608 set_speed_proc = dfs_set_cpu_speed;
609 get_speed_proc = dfs_get_cpu_speed;
610
611 return 0;
612}
613
614static int pmac_cpufreq_init_750FX(struct device_node *cpunode)
615{
616 struct device_node *volt_gpio_np;
617 u32 pvr, *value;
618
619 if (get_property(cpunode, "dynamic-power-step", NULL) == NULL)
620 return 1;
621
622 hi_freq = cur_freq;
623 value = (u32 *)get_property(cpunode, "reduced-clock-frequency", NULL);
624 if (!value)
625 return 1;
626 low_freq = (*value) / 1000;
627
628 volt_gpio_np = of_find_node_by_name(NULL, "cpu-vcore-select");
629 if (volt_gpio_np)
630 voltage_gpio = read_gpio(volt_gpio_np);
631
632 pvr = mfspr(SPRN_PVR);
633 has_cpu_l2lve = !((pvr & 0xf00) == 0x100);
634
635 set_speed_proc = cpu_750fx_cpu_speed;
636 get_speed_proc = cpu_750fx_get_cpu_speed;
637 cur_freq = cpu_750fx_get_cpu_speed();
638
639 return 0;
640}
641
642/* Currently, we support the following machines:
643 *
644 * - Titanium PowerBook 1Ghz (PMU based, 667Mhz & 1Ghz)
645 * - Titanium PowerBook 800 (PMU based, 667Mhz & 800Mhz)
646 * - Titanium PowerBook 400 (PMU based, 300Mhz & 400Mhz)
647 * - Titanium PowerBook 500 (PMU based, 300Mhz & 500Mhz)
648 * - iBook2 500/600 (PMU based, 400Mhz & 500/600Mhz)
649 * - iBook2 700 (CPU based, 400Mhz & 700Mhz, support low voltage)
650 * - Recent MacRISC3 laptops
651 * - All new machines with 7447A CPUs
652 */
653static int __init pmac_cpufreq_setup(void)
654{
655 struct device_node *cpunode;
656 u32 *value;
657
658 if (strstr(cmd_line, "nocpufreq"))
659 return 0;
660
661 /* Assume only one CPU */
662 cpunode = find_type_devices("cpu");
663 if (!cpunode)
664 goto out;
665
666 /* Get current cpu clock freq */
667 value = (u32 *)get_property(cpunode, "clock-frequency", NULL);
668 if (!value)
669 goto out;
670 cur_freq = (*value) / 1000;
671
672 /* Check for 7447A based MacRISC3 */
673 if (machine_is_compatible("MacRISC3") &&
674 get_property(cpunode, "dynamic-power-step", NULL) &&
675 PVR_VER(mfspr(SPRN_PVR)) == 0x8003) {
676 pmac_cpufreq_init_7447A(cpunode);
677 /* Check for other MacRISC3 machines */
678 } else if (machine_is_compatible("PowerBook3,4") ||
679 machine_is_compatible("PowerBook3,5") ||
680 machine_is_compatible("MacRISC3")) {
681 pmac_cpufreq_init_MacRISC3(cpunode);
682 /* Else check for iBook2 500/600 */
683 } else if (machine_is_compatible("PowerBook4,1")) {
684 hi_freq = cur_freq;
685 low_freq = 400000;
686 set_speed_proc = pmu_set_cpu_speed;
687 is_pmu_based = 1;
688 }
689 /* Else check for TiPb 550 */
690 else if (machine_is_compatible("PowerBook3,3") && cur_freq == 550000) {
691 hi_freq = cur_freq;
692 low_freq = 500000;
693 set_speed_proc = pmu_set_cpu_speed;
694 is_pmu_based = 1;
695 }
696 /* Else check for TiPb 400 & 500 */
697 else if (machine_is_compatible("PowerBook3,2")) {
698 /* We only know about the 400 MHz and the 500Mhz model
699 * they both have 300 MHz as low frequency
700 */
701 if (cur_freq < 350000 || cur_freq > 550000)
702 goto out;
703 hi_freq = cur_freq;
704 low_freq = 300000;
705 set_speed_proc = pmu_set_cpu_speed;
706 is_pmu_based = 1;
707 }
708 /* Else check for 750FX */
709 else if (PVR_VER(mfspr(SPRN_PVR)) == 0x7000)
710 pmac_cpufreq_init_750FX(cpunode);
711out:
712 if (set_speed_proc == NULL)
713 return -ENODEV;
714
715 pmac_cpu_freqs[CPUFREQ_LOW].frequency = low_freq;
716 pmac_cpu_freqs[CPUFREQ_HIGH].frequency = hi_freq;
717
718 printk(KERN_INFO "Registering PowerMac CPU frequency driver\n");
719 printk(KERN_INFO "Low: %d Mhz, High: %d Mhz, Boot: %d Mhz\n",
720 low_freq/1000, hi_freq/1000, cur_freq/1000);
721
722 return cpufreq_register_driver(&pmac_cpufreq_driver);
723}
724
725module_init(pmac_cpufreq_setup);
726
diff --git a/arch/powerpc/platforms/powermac/feature.c b/arch/powerpc/platforms/powermac/feature.c
new file mode 100644
index 000000000000..10f1d942c661
--- /dev/null
+++ b/arch/powerpc/platforms/powermac/feature.c
@@ -0,0 +1,3063 @@
1/*
2 * arch/ppc/platforms/pmac_feature.c
3 *
4 * Copyright (C) 1996-2001 Paul Mackerras (paulus@cs.anu.edu.au)
5 * Ben. Herrenschmidt (benh@kernel.crashing.org)
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
11 *
12 * TODO:
13 *
14 * - Replace mdelay with some schedule loop if possible
15 * - Shorten some obfuscated delays on some routines (like modem
16 * power)
17 * - Refcount some clocks (see darwin)
18 * - Split split split...
19 *
20 */
21#include <linux/config.h>
22#include <linux/types.h>
23#include <linux/init.h>
24#include <linux/delay.h>
25#include <linux/kernel.h>
26#include <linux/sched.h>
27#include <linux/spinlock.h>
28#include <linux/adb.h>
29#include <linux/pmu.h>
30#include <linux/ioport.h>
31#include <linux/pci.h>
32#include <asm/sections.h>
33#include <asm/errno.h>
34#include <asm/ohare.h>
35#include <asm/heathrow.h>
36#include <asm/keylargo.h>
37#include <asm/uninorth.h>
38#include <asm/io.h>
39#include <asm/prom.h>
40#include <asm/machdep.h>
41#include <asm/pmac_feature.h>
42#include <asm/dbdma.h>
43#include <asm/pci-bridge.h>
44#include <asm/pmac_low_i2c.h>
45
46#undef DEBUG_FEATURE
47
48#ifdef DEBUG_FEATURE
49#define DBG(fmt...) printk(KERN_DEBUG fmt)
50#else
51#define DBG(fmt...)
52#endif
53
54#ifdef CONFIG_6xx
55extern int powersave_lowspeed;
56#endif
57
58extern int powersave_nap;
59extern struct device_node *k2_skiplist[2];
60
61
62/*
63 * We use a single global lock to protect accesses. Each driver has
64 * to take care of its own locking
65 */
66static DEFINE_SPINLOCK(feature_lock);
67
68#define LOCK(flags) spin_lock_irqsave(&feature_lock, flags);
69#define UNLOCK(flags) spin_unlock_irqrestore(&feature_lock, flags);
70
71
72/*
73 * Instance of some macio stuffs
74 */
75struct macio_chip macio_chips[MAX_MACIO_CHIPS];
76
77struct macio_chip *macio_find(struct device_node *child, int type)
78{
79 while(child) {
80 int i;
81
82 for (i=0; i < MAX_MACIO_CHIPS && macio_chips[i].of_node; i++)
83 if (child == macio_chips[i].of_node &&
84 (!type || macio_chips[i].type == type))
85 return &macio_chips[i];
86 child = child->parent;
87 }
88 return NULL;
89}
90EXPORT_SYMBOL_GPL(macio_find);
91
92static const char *macio_names[] =
93{
94 "Unknown",
95 "Grand Central",
96 "OHare",
97 "OHareII",
98 "Heathrow",
99 "Gatwick",
100 "Paddington",
101 "Keylargo",
102 "Pangea",
103 "Intrepid",
104 "K2"
105};
106
107
108
109/*
110 * Uninorth reg. access. Note that Uni-N regs are big endian
111 */
112
113#define UN_REG(r) (uninorth_base + ((r) >> 2))
114#define UN_IN(r) (in_be32(UN_REG(r)))
115#define UN_OUT(r,v) (out_be32(UN_REG(r), (v)))
116#define UN_BIS(r,v) (UN_OUT((r), UN_IN(r) | (v)))
117#define UN_BIC(r,v) (UN_OUT((r), UN_IN(r) & ~(v)))
118
119static struct device_node *uninorth_node;
120static u32 __iomem *uninorth_base;
121static u32 uninorth_rev;
122static int uninorth_u3;
123static void __iomem *u3_ht;
124
125/*
126 * For each motherboard family, we have a table of functions pointers
127 * that handle the various features.
128 */
129
130typedef long (*feature_call)(struct device_node *node, long param, long value);
131
132struct feature_table_entry {
133 unsigned int selector;
134 feature_call function;
135};
136
137struct pmac_mb_def
138{
139 const char* model_string;
140 const char* model_name;
141 int model_id;
142 struct feature_table_entry* features;
143 unsigned long board_flags;
144};
145static struct pmac_mb_def pmac_mb;
146
147/*
148 * Here are the chip specific feature functions
149 */
150
151static inline int simple_feature_tweak(struct device_node *node, int type,
152 int reg, u32 mask, int value)
153{
154 struct macio_chip* macio;
155 unsigned long flags;
156
157 macio = macio_find(node, type);
158 if (!macio)
159 return -ENODEV;
160 LOCK(flags);
161 if (value)
162 MACIO_BIS(reg, mask);
163 else
164 MACIO_BIC(reg, mask);
165 (void)MACIO_IN32(reg);
166 UNLOCK(flags);
167
168 return 0;
169}
170
171#ifndef CONFIG_POWER4
172
173static long ohare_htw_scc_enable(struct device_node *node, long param,
174 long value)
175{
176 struct macio_chip* macio;
177 unsigned long chan_mask;
178 unsigned long fcr;
179 unsigned long flags;
180 int htw, trans;
181 unsigned long rmask;
182
183 macio = macio_find(node, 0);
184 if (!macio)
185 return -ENODEV;
186 if (!strcmp(node->name, "ch-a"))
187 chan_mask = MACIO_FLAG_SCCA_ON;
188 else if (!strcmp(node->name, "ch-b"))
189 chan_mask = MACIO_FLAG_SCCB_ON;
190 else
191 return -ENODEV;
192
193 htw = (macio->type == macio_heathrow || macio->type == macio_paddington
194 || macio->type == macio_gatwick);
195 /* On these machines, the HRW_SCC_TRANS_EN_N bit mustn't be touched */
196 trans = (pmac_mb.model_id != PMAC_TYPE_YOSEMITE &&
197 pmac_mb.model_id != PMAC_TYPE_YIKES);
198 if (value) {
199#ifdef CONFIG_ADB_PMU
200 if ((param & 0xfff) == PMAC_SCC_IRDA)
201 pmu_enable_irled(1);
202#endif /* CONFIG_ADB_PMU */
203 LOCK(flags);
204 fcr = MACIO_IN32(OHARE_FCR);
205 /* Check if scc cell need enabling */
206 if (!(fcr & OH_SCC_ENABLE)) {
207 fcr |= OH_SCC_ENABLE;
208 if (htw) {
209 /* Side effect: this will also power up the
210 * modem, but it's too messy to figure out on which
211 * ports this controls the tranceiver and on which
212 * it controls the modem
213 */
214 if (trans)
215 fcr &= ~HRW_SCC_TRANS_EN_N;
216 MACIO_OUT32(OHARE_FCR, fcr);
217 fcr |= (rmask = HRW_RESET_SCC);
218 MACIO_OUT32(OHARE_FCR, fcr);
219 } else {
220 fcr |= (rmask = OH_SCC_RESET);
221 MACIO_OUT32(OHARE_FCR, fcr);
222 }
223 UNLOCK(flags);
224 (void)MACIO_IN32(OHARE_FCR);
225 mdelay(15);
226 LOCK(flags);
227 fcr &= ~rmask;
228 MACIO_OUT32(OHARE_FCR, fcr);
229 }
230 if (chan_mask & MACIO_FLAG_SCCA_ON)
231 fcr |= OH_SCCA_IO;
232 if (chan_mask & MACIO_FLAG_SCCB_ON)
233 fcr |= OH_SCCB_IO;
234 MACIO_OUT32(OHARE_FCR, fcr);
235 macio->flags |= chan_mask;
236 UNLOCK(flags);
237 if (param & PMAC_SCC_FLAG_XMON)
238 macio->flags |= MACIO_FLAG_SCC_LOCKED;
239 } else {
240 if (macio->flags & MACIO_FLAG_SCC_LOCKED)
241 return -EPERM;
242 LOCK(flags);
243 fcr = MACIO_IN32(OHARE_FCR);
244 if (chan_mask & MACIO_FLAG_SCCA_ON)
245 fcr &= ~OH_SCCA_IO;
246 if (chan_mask & MACIO_FLAG_SCCB_ON)
247 fcr &= ~OH_SCCB_IO;
248 MACIO_OUT32(OHARE_FCR, fcr);
249 if ((fcr & (OH_SCCA_IO | OH_SCCB_IO)) == 0) {
250 fcr &= ~OH_SCC_ENABLE;
251 if (htw && trans)
252 fcr |= HRW_SCC_TRANS_EN_N;
253 MACIO_OUT32(OHARE_FCR, fcr);
254 }
255 macio->flags &= ~(chan_mask);
256 UNLOCK(flags);
257 mdelay(10);
258#ifdef CONFIG_ADB_PMU
259 if ((param & 0xfff) == PMAC_SCC_IRDA)
260 pmu_enable_irled(0);
261#endif /* CONFIG_ADB_PMU */
262 }
263 return 0;
264}
265
266static long ohare_floppy_enable(struct device_node *node, long param,
267 long value)
268{
269 return simple_feature_tweak(node, macio_ohare,
270 OHARE_FCR, OH_FLOPPY_ENABLE, value);
271}
272
273static long ohare_mesh_enable(struct device_node *node, long param, long value)
274{
275 return simple_feature_tweak(node, macio_ohare,
276 OHARE_FCR, OH_MESH_ENABLE, value);
277}
278
279static long ohare_ide_enable(struct device_node *node, long param, long value)
280{
281 switch(param) {
282 case 0:
283 /* For some reason, setting the bit in set_initial_features()
284 * doesn't stick. I'm still investigating... --BenH.
285 */
286 if (value)
287 simple_feature_tweak(node, macio_ohare,
288 OHARE_FCR, OH_IOBUS_ENABLE, 1);
289 return simple_feature_tweak(node, macio_ohare,
290 OHARE_FCR, OH_IDE0_ENABLE, value);
291 case 1:
292 return simple_feature_tweak(node, macio_ohare,
293 OHARE_FCR, OH_BAY_IDE_ENABLE, value);
294 default:
295 return -ENODEV;
296 }
297}
298
299static long ohare_ide_reset(struct device_node *node, long param, long value)
300{
301 switch(param) {
302 case 0:
303 return simple_feature_tweak(node, macio_ohare,
304 OHARE_FCR, OH_IDE0_RESET_N, !value);
305 case 1:
306 return simple_feature_tweak(node, macio_ohare,
307 OHARE_FCR, OH_IDE1_RESET_N, !value);
308 default:
309 return -ENODEV;
310 }
311}
312
313static long ohare_sleep_state(struct device_node *node, long param, long value)
314{
315 struct macio_chip* macio = &macio_chips[0];
316
317 if ((pmac_mb.board_flags & PMAC_MB_CAN_SLEEP) == 0)
318 return -EPERM;
319 if (value == 1) {
320 MACIO_BIC(OHARE_FCR, OH_IOBUS_ENABLE);
321 } else if (value == 0) {
322 MACIO_BIS(OHARE_FCR, OH_IOBUS_ENABLE);
323 }
324
325 return 0;
326}
327
328static long heathrow_modem_enable(struct device_node *node, long param,
329 long value)
330{
331 struct macio_chip* macio;
332 u8 gpio;
333 unsigned long flags;
334
335 macio = macio_find(node, macio_unknown);
336 if (!macio)
337 return -ENODEV;
338 gpio = MACIO_IN8(HRW_GPIO_MODEM_RESET) & ~1;
339 if (!value) {
340 LOCK(flags);
341 MACIO_OUT8(HRW_GPIO_MODEM_RESET, gpio);
342 UNLOCK(flags);
343 (void)MACIO_IN8(HRW_GPIO_MODEM_RESET);
344 mdelay(250);
345 }
346 if (pmac_mb.model_id != PMAC_TYPE_YOSEMITE &&
347 pmac_mb.model_id != PMAC_TYPE_YIKES) {
348 LOCK(flags);
349 if (value)
350 MACIO_BIC(HEATHROW_FCR, HRW_SCC_TRANS_EN_N);
351 else
352 MACIO_BIS(HEATHROW_FCR, HRW_SCC_TRANS_EN_N);
353 UNLOCK(flags);
354 (void)MACIO_IN32(HEATHROW_FCR);
355 mdelay(250);
356 }
357 if (value) {
358 LOCK(flags);
359 MACIO_OUT8(HRW_GPIO_MODEM_RESET, gpio | 1);
360 (void)MACIO_IN8(HRW_GPIO_MODEM_RESET);
361 UNLOCK(flags); mdelay(250); LOCK(flags);
362 MACIO_OUT8(HRW_GPIO_MODEM_RESET, gpio);
363 (void)MACIO_IN8(HRW_GPIO_MODEM_RESET);
364 UNLOCK(flags); mdelay(250); LOCK(flags);
365 MACIO_OUT8(HRW_GPIO_MODEM_RESET, gpio | 1);
366 (void)MACIO_IN8(HRW_GPIO_MODEM_RESET);
367 UNLOCK(flags); mdelay(250);
368 }
369 return 0;
370}
371
372static long heathrow_floppy_enable(struct device_node *node, long param,
373 long value)
374{
375 return simple_feature_tweak(node, macio_unknown,
376 HEATHROW_FCR,
377 HRW_SWIM_ENABLE|HRW_BAY_FLOPPY_ENABLE,
378 value);
379}
380
381static long heathrow_mesh_enable(struct device_node *node, long param,
382 long value)
383{
384 struct macio_chip* macio;
385 unsigned long flags;
386
387 macio = macio_find(node, macio_unknown);
388 if (!macio)
389 return -ENODEV;
390 LOCK(flags);
391 /* Set clear mesh cell enable */
392 if (value)
393 MACIO_BIS(HEATHROW_FCR, HRW_MESH_ENABLE);
394 else
395 MACIO_BIC(HEATHROW_FCR, HRW_MESH_ENABLE);
396 (void)MACIO_IN32(HEATHROW_FCR);
397 udelay(10);
398 /* Set/Clear termination power */
399 if (value)
400 MACIO_BIC(HEATHROW_MBCR, 0x04000000);
401 else
402 MACIO_BIS(HEATHROW_MBCR, 0x04000000);
403 (void)MACIO_IN32(HEATHROW_MBCR);
404 udelay(10);
405 UNLOCK(flags);
406
407 return 0;
408}
409
410static long heathrow_ide_enable(struct device_node *node, long param,
411 long value)
412{
413 switch(param) {
414 case 0:
415 return simple_feature_tweak(node, macio_unknown,
416 HEATHROW_FCR, HRW_IDE0_ENABLE, value);
417 case 1:
418 return simple_feature_tweak(node, macio_unknown,
419 HEATHROW_FCR, HRW_BAY_IDE_ENABLE, value);
420 default:
421 return -ENODEV;
422 }
423}
424
425static long heathrow_ide_reset(struct device_node *node, long param,
426 long value)
427{
428 switch(param) {
429 case 0:
430 return simple_feature_tweak(node, macio_unknown,
431 HEATHROW_FCR, HRW_IDE0_RESET_N, !value);
432 case 1:
433 return simple_feature_tweak(node, macio_unknown,
434 HEATHROW_FCR, HRW_IDE1_RESET_N, !value);
435 default:
436 return -ENODEV;
437 }
438}
439
440static long heathrow_bmac_enable(struct device_node *node, long param,
441 long value)
442{
443 struct macio_chip* macio;
444 unsigned long flags;
445
446 macio = macio_find(node, 0);
447 if (!macio)
448 return -ENODEV;
449 if (value) {
450 LOCK(flags);
451 MACIO_BIS(HEATHROW_FCR, HRW_BMAC_IO_ENABLE);
452 MACIO_BIS(HEATHROW_FCR, HRW_BMAC_RESET);
453 UNLOCK(flags);
454 (void)MACIO_IN32(HEATHROW_FCR);
455 mdelay(10);
456 LOCK(flags);
457 MACIO_BIC(HEATHROW_FCR, HRW_BMAC_RESET);
458 UNLOCK(flags);
459 (void)MACIO_IN32(HEATHROW_FCR);
460 mdelay(10);
461 } else {
462 LOCK(flags);
463 MACIO_BIC(HEATHROW_FCR, HRW_BMAC_IO_ENABLE);
464 UNLOCK(flags);
465 }
466 return 0;
467}
468
469static long heathrow_sound_enable(struct device_node *node, long param,
470 long value)
471{
472 struct macio_chip* macio;
473 unsigned long flags;
474
475 /* B&W G3 and Yikes don't support that properly (the
476 * sound appear to never come back after beeing shut down).
477 */
478 if (pmac_mb.model_id == PMAC_TYPE_YOSEMITE ||
479 pmac_mb.model_id == PMAC_TYPE_YIKES)
480 return 0;
481
482 macio = macio_find(node, 0);
483 if (!macio)
484 return -ENODEV;
485 if (value) {
486 LOCK(flags);
487 MACIO_BIS(HEATHROW_FCR, HRW_SOUND_CLK_ENABLE);
488 MACIO_BIC(HEATHROW_FCR, HRW_SOUND_POWER_N);
489 UNLOCK(flags);
490 (void)MACIO_IN32(HEATHROW_FCR);
491 } else {
492 LOCK(flags);
493 MACIO_BIS(HEATHROW_FCR, HRW_SOUND_POWER_N);
494 MACIO_BIC(HEATHROW_FCR, HRW_SOUND_CLK_ENABLE);
495 UNLOCK(flags);
496 }
497 return 0;
498}
499
500static u32 save_fcr[6];
501static u32 save_mbcr;
502static u32 save_gpio_levels[2];
503static u8 save_gpio_extint[KEYLARGO_GPIO_EXTINT_CNT];
504static u8 save_gpio_normal[KEYLARGO_GPIO_CNT];
505static u32 save_unin_clock_ctl;
506static struct dbdma_regs save_dbdma[13];
507static struct dbdma_regs save_alt_dbdma[13];
508
509static void dbdma_save(struct macio_chip *macio, struct dbdma_regs *save)
510{
511 int i;
512
513 /* Save state & config of DBDMA channels */
514 for (i = 0; i < 13; i++) {
515 volatile struct dbdma_regs __iomem * chan = (void __iomem *)
516 (macio->base + ((0x8000+i*0x100)>>2));
517 save[i].cmdptr_hi = in_le32(&chan->cmdptr_hi);
518 save[i].cmdptr = in_le32(&chan->cmdptr);
519 save[i].intr_sel = in_le32(&chan->intr_sel);
520 save[i].br_sel = in_le32(&chan->br_sel);
521 save[i].wait_sel = in_le32(&chan->wait_sel);
522 }
523}
524
525static void dbdma_restore(struct macio_chip *macio, struct dbdma_regs *save)
526{
527 int i;
528
529 /* Save state & config of DBDMA channels */
530 for (i = 0; i < 13; i++) {
531 volatile struct dbdma_regs __iomem * chan = (void __iomem *)
532 (macio->base + ((0x8000+i*0x100)>>2));
533 out_le32(&chan->control, (ACTIVE|DEAD|WAKE|FLUSH|PAUSE|RUN)<<16);
534 while (in_le32(&chan->status) & ACTIVE)
535 mb();
536 out_le32(&chan->cmdptr_hi, save[i].cmdptr_hi);
537 out_le32(&chan->cmdptr, save[i].cmdptr);
538 out_le32(&chan->intr_sel, save[i].intr_sel);
539 out_le32(&chan->br_sel, save[i].br_sel);
540 out_le32(&chan->wait_sel, save[i].wait_sel);
541 }
542}
543
544static void heathrow_sleep(struct macio_chip *macio, int secondary)
545{
546 if (secondary) {
547 dbdma_save(macio, save_alt_dbdma);
548 save_fcr[2] = MACIO_IN32(0x38);
549 save_fcr[3] = MACIO_IN32(0x3c);
550 } else {
551 dbdma_save(macio, save_dbdma);
552 save_fcr[0] = MACIO_IN32(0x38);
553 save_fcr[1] = MACIO_IN32(0x3c);
554 save_mbcr = MACIO_IN32(0x34);
555 /* Make sure sound is shut down */
556 MACIO_BIS(HEATHROW_FCR, HRW_SOUND_POWER_N);
557 MACIO_BIC(HEATHROW_FCR, HRW_SOUND_CLK_ENABLE);
558 /* This seems to be necessary as well or the fan
559 * keeps coming up and battery drains fast */
560 MACIO_BIC(HEATHROW_FCR, HRW_IOBUS_ENABLE);
561 MACIO_BIC(HEATHROW_FCR, HRW_IDE0_RESET_N);
562 /* Make sure eth is down even if module or sleep
563 * won't work properly */
564 MACIO_BIC(HEATHROW_FCR, HRW_BMAC_IO_ENABLE | HRW_BMAC_RESET);
565 }
566 /* Make sure modem is shut down */
567 MACIO_OUT8(HRW_GPIO_MODEM_RESET,
568 MACIO_IN8(HRW_GPIO_MODEM_RESET) & ~1);
569 MACIO_BIS(HEATHROW_FCR, HRW_SCC_TRANS_EN_N);
570 MACIO_BIC(HEATHROW_FCR, OH_SCCA_IO|OH_SCCB_IO|HRW_SCC_ENABLE);
571
572 /* Let things settle */
573 (void)MACIO_IN32(HEATHROW_FCR);
574}
575
576static void heathrow_wakeup(struct macio_chip *macio, int secondary)
577{
578 if (secondary) {
579 MACIO_OUT32(0x38, save_fcr[2]);
580 (void)MACIO_IN32(0x38);
581 mdelay(1);
582 MACIO_OUT32(0x3c, save_fcr[3]);
583 (void)MACIO_IN32(0x38);
584 mdelay(10);
585 dbdma_restore(macio, save_alt_dbdma);
586 } else {
587 MACIO_OUT32(0x38, save_fcr[0] | HRW_IOBUS_ENABLE);
588 (void)MACIO_IN32(0x38);
589 mdelay(1);
590 MACIO_OUT32(0x3c, save_fcr[1]);
591 (void)MACIO_IN32(0x38);
592 mdelay(1);
593 MACIO_OUT32(0x34, save_mbcr);
594 (void)MACIO_IN32(0x38);
595 mdelay(10);
596 dbdma_restore(macio, save_dbdma);
597 }
598}
599
600static long heathrow_sleep_state(struct device_node *node, long param,
601 long value)
602{
603 if ((pmac_mb.board_flags & PMAC_MB_CAN_SLEEP) == 0)
604 return -EPERM;
605 if (value == 1) {
606 if (macio_chips[1].type == macio_gatwick)
607 heathrow_sleep(&macio_chips[0], 1);
608 heathrow_sleep(&macio_chips[0], 0);
609 } else if (value == 0) {
610 heathrow_wakeup(&macio_chips[0], 0);
611 if (macio_chips[1].type == macio_gatwick)
612 heathrow_wakeup(&macio_chips[0], 1);
613 }
614 return 0;
615}
616
617static long core99_scc_enable(struct device_node *node, long param, long value)
618{
619 struct macio_chip* macio;
620 unsigned long flags;
621 unsigned long chan_mask;
622 u32 fcr;
623
624 macio = macio_find(node, 0);
625 if (!macio)
626 return -ENODEV;
627 if (!strcmp(node->name, "ch-a"))
628 chan_mask = MACIO_FLAG_SCCA_ON;
629 else if (!strcmp(node->name, "ch-b"))
630 chan_mask = MACIO_FLAG_SCCB_ON;
631 else
632 return -ENODEV;
633
634 if (value) {
635 int need_reset_scc = 0;
636 int need_reset_irda = 0;
637
638 LOCK(flags);
639 fcr = MACIO_IN32(KEYLARGO_FCR0);
640 /* Check if scc cell need enabling */
641 if (!(fcr & KL0_SCC_CELL_ENABLE)) {
642 fcr |= KL0_SCC_CELL_ENABLE;
643 need_reset_scc = 1;
644 }
645 if (chan_mask & MACIO_FLAG_SCCA_ON) {
646 fcr |= KL0_SCCA_ENABLE;
647 /* Don't enable line drivers for I2S modem */
648 if ((param & 0xfff) == PMAC_SCC_I2S1)
649 fcr &= ~KL0_SCC_A_INTF_ENABLE;
650 else
651 fcr |= KL0_SCC_A_INTF_ENABLE;
652 }
653 if (chan_mask & MACIO_FLAG_SCCB_ON) {
654 fcr |= KL0_SCCB_ENABLE;
655 /* Perform irda specific inits */
656 if ((param & 0xfff) == PMAC_SCC_IRDA) {
657 fcr &= ~KL0_SCC_B_INTF_ENABLE;
658 fcr |= KL0_IRDA_ENABLE;
659 fcr |= KL0_IRDA_CLK32_ENABLE | KL0_IRDA_CLK19_ENABLE;
660 fcr |= KL0_IRDA_SOURCE1_SEL;
661 fcr &= ~(KL0_IRDA_FAST_CONNECT|KL0_IRDA_DEFAULT1|KL0_IRDA_DEFAULT0);
662 fcr &= ~(KL0_IRDA_SOURCE2_SEL|KL0_IRDA_HIGH_BAND);
663 need_reset_irda = 1;
664 } else
665 fcr |= KL0_SCC_B_INTF_ENABLE;
666 }
667 MACIO_OUT32(KEYLARGO_FCR0, fcr);
668 macio->flags |= chan_mask;
669 if (need_reset_scc) {
670 MACIO_BIS(KEYLARGO_FCR0, KL0_SCC_RESET);
671 (void)MACIO_IN32(KEYLARGO_FCR0);
672 UNLOCK(flags);
673 mdelay(15);
674 LOCK(flags);
675 MACIO_BIC(KEYLARGO_FCR0, KL0_SCC_RESET);
676 }
677 if (need_reset_irda) {
678 MACIO_BIS(KEYLARGO_FCR0, KL0_IRDA_RESET);
679 (void)MACIO_IN32(KEYLARGO_FCR0);
680 UNLOCK(flags);
681 mdelay(15);
682 LOCK(flags);
683 MACIO_BIC(KEYLARGO_FCR0, KL0_IRDA_RESET);
684 }
685 UNLOCK(flags);
686 if (param & PMAC_SCC_FLAG_XMON)
687 macio->flags |= MACIO_FLAG_SCC_LOCKED;
688 } else {
689 if (macio->flags & MACIO_FLAG_SCC_LOCKED)
690 return -EPERM;
691 LOCK(flags);
692 fcr = MACIO_IN32(KEYLARGO_FCR0);
693 if (chan_mask & MACIO_FLAG_SCCA_ON)
694 fcr &= ~KL0_SCCA_ENABLE;
695 if (chan_mask & MACIO_FLAG_SCCB_ON) {
696 fcr &= ~KL0_SCCB_ENABLE;
697 /* Perform irda specific clears */
698 if ((param & 0xfff) == PMAC_SCC_IRDA) {
699 fcr &= ~KL0_IRDA_ENABLE;
700 fcr &= ~(KL0_IRDA_CLK32_ENABLE | KL0_IRDA_CLK19_ENABLE);
701 fcr &= ~(KL0_IRDA_FAST_CONNECT|KL0_IRDA_DEFAULT1|KL0_IRDA_DEFAULT0);
702 fcr &= ~(KL0_IRDA_SOURCE1_SEL|KL0_IRDA_SOURCE2_SEL|KL0_IRDA_HIGH_BAND);
703 }
704 }
705 MACIO_OUT32(KEYLARGO_FCR0, fcr);
706 if ((fcr & (KL0_SCCA_ENABLE | KL0_SCCB_ENABLE)) == 0) {
707 fcr &= ~KL0_SCC_CELL_ENABLE;
708 MACIO_OUT32(KEYLARGO_FCR0, fcr);
709 }
710 macio->flags &= ~(chan_mask);
711 UNLOCK(flags);
712 mdelay(10);
713 }
714 return 0;
715}
716
717static long
718core99_modem_enable(struct device_node *node, long param, long value)
719{
720 struct macio_chip* macio;
721 u8 gpio;
722 unsigned long flags;
723
724 /* Hack for internal USB modem */
725 if (node == NULL) {
726 if (macio_chips[0].type != macio_keylargo)
727 return -ENODEV;
728 node = macio_chips[0].of_node;
729 }
730 macio = macio_find(node, 0);
731 if (!macio)
732 return -ENODEV;
733 gpio = MACIO_IN8(KL_GPIO_MODEM_RESET);
734 gpio |= KEYLARGO_GPIO_OUTPUT_ENABLE;
735 gpio &= ~KEYLARGO_GPIO_OUTOUT_DATA;
736
737 if (!value) {
738 LOCK(flags);
739 MACIO_OUT8(KL_GPIO_MODEM_RESET, gpio);
740 UNLOCK(flags);
741 (void)MACIO_IN8(KL_GPIO_MODEM_RESET);
742 mdelay(250);
743 }
744 LOCK(flags);
745 if (value) {
746 MACIO_BIC(KEYLARGO_FCR2, KL2_ALT_DATA_OUT);
747 UNLOCK(flags);
748 (void)MACIO_IN32(KEYLARGO_FCR2);
749 mdelay(250);
750 } else {
751 MACIO_BIS(KEYLARGO_FCR2, KL2_ALT_DATA_OUT);
752 UNLOCK(flags);
753 }
754 if (value) {
755 LOCK(flags);
756 MACIO_OUT8(KL_GPIO_MODEM_RESET, gpio | KEYLARGO_GPIO_OUTOUT_DATA);
757 (void)MACIO_IN8(KL_GPIO_MODEM_RESET);
758 UNLOCK(flags); mdelay(250); LOCK(flags);
759 MACIO_OUT8(KL_GPIO_MODEM_RESET, gpio);
760 (void)MACIO_IN8(KL_GPIO_MODEM_RESET);
761 UNLOCK(flags); mdelay(250); LOCK(flags);
762 MACIO_OUT8(KL_GPIO_MODEM_RESET, gpio | KEYLARGO_GPIO_OUTOUT_DATA);
763 (void)MACIO_IN8(KL_GPIO_MODEM_RESET);
764 UNLOCK(flags); mdelay(250);
765 }
766 return 0;
767}
768
769static long
770pangea_modem_enable(struct device_node *node, long param, long value)
771{
772 struct macio_chip* macio;
773 u8 gpio;
774 unsigned long flags;
775
776 /* Hack for internal USB modem */
777 if (node == NULL) {
778 if (macio_chips[0].type != macio_pangea &&
779 macio_chips[0].type != macio_intrepid)
780 return -ENODEV;
781 node = macio_chips[0].of_node;
782 }
783 macio = macio_find(node, 0);
784 if (!macio)
785 return -ENODEV;
786 gpio = MACIO_IN8(KL_GPIO_MODEM_RESET);
787 gpio |= KEYLARGO_GPIO_OUTPUT_ENABLE;
788 gpio &= ~KEYLARGO_GPIO_OUTOUT_DATA;
789
790 if (!value) {
791 LOCK(flags);
792 MACIO_OUT8(KL_GPIO_MODEM_RESET, gpio);
793 UNLOCK(flags);
794 (void)MACIO_IN8(KL_GPIO_MODEM_RESET);
795 mdelay(250);
796 }
797 LOCK(flags);
798 if (value) {
799 MACIO_OUT8(KL_GPIO_MODEM_POWER,
800 KEYLARGO_GPIO_OUTPUT_ENABLE);
801 UNLOCK(flags);
802 (void)MACIO_IN32(KEYLARGO_FCR2);
803 mdelay(250);
804 } else {
805 MACIO_OUT8(KL_GPIO_MODEM_POWER,
806 KEYLARGO_GPIO_OUTPUT_ENABLE | KEYLARGO_GPIO_OUTOUT_DATA);
807 UNLOCK(flags);
808 }
809 if (value) {
810 LOCK(flags);
811 MACIO_OUT8(KL_GPIO_MODEM_RESET, gpio | KEYLARGO_GPIO_OUTOUT_DATA);
812 (void)MACIO_IN8(KL_GPIO_MODEM_RESET);
813 UNLOCK(flags); mdelay(250); LOCK(flags);
814 MACIO_OUT8(KL_GPIO_MODEM_RESET, gpio);
815 (void)MACIO_IN8(KL_GPIO_MODEM_RESET);
816 UNLOCK(flags); mdelay(250); LOCK(flags);
817 MACIO_OUT8(KL_GPIO_MODEM_RESET, gpio | KEYLARGO_GPIO_OUTOUT_DATA);
818 (void)MACIO_IN8(KL_GPIO_MODEM_RESET);
819 UNLOCK(flags); mdelay(250);
820 }
821 return 0;
822}
823
824static long
825core99_ata100_enable(struct device_node *node, long value)
826{
827 unsigned long flags;
828 struct pci_dev *pdev = NULL;
829 u8 pbus, pid;
830
831 if (uninorth_rev < 0x24)
832 return -ENODEV;
833
834 LOCK(flags);
835 if (value)
836 UN_BIS(UNI_N_CLOCK_CNTL, UNI_N_CLOCK_CNTL_ATA100);
837 else
838 UN_BIC(UNI_N_CLOCK_CNTL, UNI_N_CLOCK_CNTL_ATA100);
839 (void)UN_IN(UNI_N_CLOCK_CNTL);
840 UNLOCK(flags);
841 udelay(20);
842
843 if (value) {
844 if (pci_device_from_OF_node(node, &pbus, &pid) == 0)
845 pdev = pci_find_slot(pbus, pid);
846 if (pdev == NULL)
847 return 0;
848 pci_enable_device(pdev);
849 pci_set_master(pdev);
850 }
851 return 0;
852}
853
854static long
855core99_ide_enable(struct device_node *node, long param, long value)
856{
857 /* Bus ID 0 to 2 are KeyLargo based IDE, busID 3 is U2
858 * based ata-100
859 */
860 switch(param) {
861 case 0:
862 return simple_feature_tweak(node, macio_unknown,
863 KEYLARGO_FCR1, KL1_EIDE0_ENABLE, value);
864 case 1:
865 return simple_feature_tweak(node, macio_unknown,
866 KEYLARGO_FCR1, KL1_EIDE1_ENABLE, value);
867 case 2:
868 return simple_feature_tweak(node, macio_unknown,
869 KEYLARGO_FCR1, KL1_UIDE_ENABLE, value);
870 case 3:
871 return core99_ata100_enable(node, value);
872 default:
873 return -ENODEV;
874 }
875}
876
877static long
878core99_ide_reset(struct device_node *node, long param, long value)
879{
880 switch(param) {
881 case 0:
882 return simple_feature_tweak(node, macio_unknown,
883 KEYLARGO_FCR1, KL1_EIDE0_RESET_N, !value);
884 case 1:
885 return simple_feature_tweak(node, macio_unknown,
886 KEYLARGO_FCR1, KL1_EIDE1_RESET_N, !value);
887 case 2:
888 return simple_feature_tweak(node, macio_unknown,
889 KEYLARGO_FCR1, KL1_UIDE_RESET_N, !value);
890 default:
891 return -ENODEV;
892 }
893}
894
895static long
896core99_gmac_enable(struct device_node *node, long param, long value)
897{
898 unsigned long flags;
899
900 LOCK(flags);
901 if (value)
902 UN_BIS(UNI_N_CLOCK_CNTL, UNI_N_CLOCK_CNTL_GMAC);
903 else
904 UN_BIC(UNI_N_CLOCK_CNTL, UNI_N_CLOCK_CNTL_GMAC);
905 (void)UN_IN(UNI_N_CLOCK_CNTL);
906 UNLOCK(flags);
907 udelay(20);
908
909 return 0;
910}
911
912static long
913core99_gmac_phy_reset(struct device_node *node, long param, long value)
914{
915 unsigned long flags;
916 struct macio_chip *macio;
917
918 macio = &macio_chips[0];
919 if (macio->type != macio_keylargo && macio->type != macio_pangea &&
920 macio->type != macio_intrepid)
921 return -ENODEV;
922
923 LOCK(flags);
924 MACIO_OUT8(KL_GPIO_ETH_PHY_RESET, KEYLARGO_GPIO_OUTPUT_ENABLE);
925 (void)MACIO_IN8(KL_GPIO_ETH_PHY_RESET);
926 UNLOCK(flags);
927 mdelay(10);
928 LOCK(flags);
929 MACIO_OUT8(KL_GPIO_ETH_PHY_RESET, /*KEYLARGO_GPIO_OUTPUT_ENABLE | */
930 KEYLARGO_GPIO_OUTOUT_DATA);
931 UNLOCK(flags);
932 mdelay(10);
933
934 return 0;
935}
936
937static long
938core99_sound_chip_enable(struct device_node *node, long param, long value)
939{
940 struct macio_chip* macio;
941 unsigned long flags;
942
943 macio = macio_find(node, 0);
944 if (!macio)
945 return -ENODEV;
946
947 /* Do a better probe code, screamer G4 desktops &
948 * iMacs can do that too, add a recalibrate in
949 * the driver as well
950 */
951 if (pmac_mb.model_id == PMAC_TYPE_PISMO ||
952 pmac_mb.model_id == PMAC_TYPE_TITANIUM) {
953 LOCK(flags);
954 if (value)
955 MACIO_OUT8(KL_GPIO_SOUND_POWER,
956 KEYLARGO_GPIO_OUTPUT_ENABLE |
957 KEYLARGO_GPIO_OUTOUT_DATA);
958 else
959 MACIO_OUT8(KL_GPIO_SOUND_POWER,
960 KEYLARGO_GPIO_OUTPUT_ENABLE);
961 (void)MACIO_IN8(KL_GPIO_SOUND_POWER);
962 UNLOCK(flags);
963 }
964 return 0;
965}
966
967static long
968core99_airport_enable(struct device_node *node, long param, long value)
969{
970 struct macio_chip* macio;
971 unsigned long flags;
972 int state;
973
974 macio = macio_find(node, 0);
975 if (!macio)
976 return -ENODEV;
977
978 /* Hint: we allow passing of macio itself for the sake of the
979 * sleep code
980 */
981 if (node != macio->of_node &&
982 (!node->parent || node->parent != macio->of_node))
983 return -ENODEV;
984 state = (macio->flags & MACIO_FLAG_AIRPORT_ON) != 0;
985 if (value == state)
986 return 0;
987 if (value) {
988 /* This code is a reproduction of OF enable-cardslot
989 * and init-wireless methods, slightly hacked until
990 * I got it working.
991 */
992 LOCK(flags);
993 MACIO_OUT8(KEYLARGO_GPIO_0+0xf, 5);
994 (void)MACIO_IN8(KEYLARGO_GPIO_0+0xf);
995 UNLOCK(flags);
996 mdelay(10);
997 LOCK(flags);
998 MACIO_OUT8(KEYLARGO_GPIO_0+0xf, 4);
999 (void)MACIO_IN8(KEYLARGO_GPIO_0+0xf);
1000 UNLOCK(flags);
1001
1002 mdelay(10);
1003
1004 LOCK(flags);
1005 MACIO_BIC(KEYLARGO_FCR2, KL2_CARDSEL_16);
1006 (void)MACIO_IN32(KEYLARGO_FCR2);
1007 udelay(10);
1008 MACIO_OUT8(KEYLARGO_GPIO_EXTINT_0+0xb, 0);
1009 (void)MACIO_IN8(KEYLARGO_GPIO_EXTINT_0+0xb);
1010 udelay(10);
1011 MACIO_OUT8(KEYLARGO_GPIO_EXTINT_0+0xa, 0x28);
1012 (void)MACIO_IN8(KEYLARGO_GPIO_EXTINT_0+0xa);
1013 udelay(10);
1014 MACIO_OUT8(KEYLARGO_GPIO_EXTINT_0+0xd, 0x28);
1015 (void)MACIO_IN8(KEYLARGO_GPIO_EXTINT_0+0xd);
1016 udelay(10);
1017 MACIO_OUT8(KEYLARGO_GPIO_0+0xd, 0x28);
1018 (void)MACIO_IN8(KEYLARGO_GPIO_0+0xd);
1019 udelay(10);
1020 MACIO_OUT8(KEYLARGO_GPIO_0+0xe, 0x28);
1021 (void)MACIO_IN8(KEYLARGO_GPIO_0+0xe);
1022 UNLOCK(flags);
1023 udelay(10);
1024 MACIO_OUT32(0x1c000, 0);
1025 mdelay(1);
1026 MACIO_OUT8(0x1a3e0, 0x41);
1027 (void)MACIO_IN8(0x1a3e0);
1028 udelay(10);
1029 LOCK(flags);
1030 MACIO_BIS(KEYLARGO_FCR2, KL2_CARDSEL_16);
1031 (void)MACIO_IN32(KEYLARGO_FCR2);
1032 UNLOCK(flags);
1033 mdelay(100);
1034
1035 macio->flags |= MACIO_FLAG_AIRPORT_ON;
1036 } else {
1037 LOCK(flags);
1038 MACIO_BIC(KEYLARGO_FCR2, KL2_CARDSEL_16);
1039 (void)MACIO_IN32(KEYLARGO_FCR2);
1040 MACIO_OUT8(KL_GPIO_AIRPORT_0, 0);
1041 MACIO_OUT8(KL_GPIO_AIRPORT_1, 0);
1042 MACIO_OUT8(KL_GPIO_AIRPORT_2, 0);
1043 MACIO_OUT8(KL_GPIO_AIRPORT_3, 0);
1044 MACIO_OUT8(KL_GPIO_AIRPORT_4, 0);
1045 (void)MACIO_IN8(KL_GPIO_AIRPORT_4);
1046 UNLOCK(flags);
1047
1048 macio->flags &= ~MACIO_FLAG_AIRPORT_ON;
1049 }
1050 return 0;
1051}
1052
1053#ifdef CONFIG_SMP
1054static long
1055core99_reset_cpu(struct device_node *node, long param, long value)
1056{
1057 unsigned int reset_io = 0;
1058 unsigned long flags;
1059 struct macio_chip *macio;
1060 struct device_node *np;
1061 const int dflt_reset_lines[] = { KL_GPIO_RESET_CPU0,
1062 KL_GPIO_RESET_CPU1,
1063 KL_GPIO_RESET_CPU2,
1064 KL_GPIO_RESET_CPU3 };
1065
1066 macio = &macio_chips[0];
1067 if (macio->type != macio_keylargo)
1068 return -ENODEV;
1069
1070 np = find_path_device("/cpus");
1071 if (np == NULL)
1072 return -ENODEV;
1073 for (np = np->child; np != NULL; np = np->sibling) {
1074 u32 *num = (u32 *)get_property(np, "reg", NULL);
1075 u32 *rst = (u32 *)get_property(np, "soft-reset", NULL);
1076 if (num == NULL || rst == NULL)
1077 continue;
1078 if (param == *num) {
1079 reset_io = *rst;
1080 break;
1081 }
1082 }
1083 if (np == NULL || reset_io == 0)
1084 reset_io = dflt_reset_lines[param];
1085
1086 LOCK(flags);
1087 MACIO_OUT8(reset_io, KEYLARGO_GPIO_OUTPUT_ENABLE);
1088 (void)MACIO_IN8(reset_io);
1089 udelay(1);
1090 MACIO_OUT8(reset_io, 0);
1091 (void)MACIO_IN8(reset_io);
1092 UNLOCK(flags);
1093
1094 return 0;
1095}
1096#endif /* CONFIG_SMP */
1097
1098static long
1099core99_usb_enable(struct device_node *node, long param, long value)
1100{
1101 struct macio_chip *macio;
1102 unsigned long flags;
1103 char *prop;
1104 int number;
1105 u32 reg;
1106
1107 macio = &macio_chips[0];
1108 if (macio->type != macio_keylargo && macio->type != macio_pangea &&
1109 macio->type != macio_intrepid)
1110 return -ENODEV;
1111
1112 prop = (char *)get_property(node, "AAPL,clock-id", NULL);
1113 if (!prop)
1114 return -ENODEV;
1115 if (strncmp(prop, "usb0u048", 8) == 0)
1116 number = 0;
1117 else if (strncmp(prop, "usb1u148", 8) == 0)
1118 number = 2;
1119 else if (strncmp(prop, "usb2u248", 8) == 0)
1120 number = 4;
1121 else
1122 return -ENODEV;
1123
1124 /* Sorry for the brute-force locking, but this is only used during
1125 * sleep and the timing seem to be critical
1126 */
1127 LOCK(flags);
1128 if (value) {
1129 /* Turn ON */
1130 if (number == 0) {
1131 MACIO_BIC(KEYLARGO_FCR0, (KL0_USB0_PAD_SUSPEND0 | KL0_USB0_PAD_SUSPEND1));
1132 (void)MACIO_IN32(KEYLARGO_FCR0);
1133 UNLOCK(flags);
1134 mdelay(1);
1135 LOCK(flags);
1136 MACIO_BIS(KEYLARGO_FCR0, KL0_USB0_CELL_ENABLE);
1137 } else if (number == 2) {
1138 MACIO_BIC(KEYLARGO_FCR0, (KL0_USB1_PAD_SUSPEND0 | KL0_USB1_PAD_SUSPEND1));
1139 UNLOCK(flags);
1140 (void)MACIO_IN32(KEYLARGO_FCR0);
1141 mdelay(1);
1142 LOCK(flags);
1143 MACIO_BIS(KEYLARGO_FCR0, KL0_USB1_CELL_ENABLE);
1144 } else if (number == 4) {
1145 MACIO_BIC(KEYLARGO_FCR1, (KL1_USB2_PAD_SUSPEND0 | KL1_USB2_PAD_SUSPEND1));
1146 UNLOCK(flags);
1147 (void)MACIO_IN32(KEYLARGO_FCR1);
1148 mdelay(1);
1149 LOCK(flags);
1150 MACIO_BIS(KEYLARGO_FCR1, KL1_USB2_CELL_ENABLE);
1151 }
1152 if (number < 4) {
1153 reg = MACIO_IN32(KEYLARGO_FCR4);
1154 reg &= ~(KL4_PORT_WAKEUP_ENABLE(number) | KL4_PORT_RESUME_WAKE_EN(number) |
1155 KL4_PORT_CONNECT_WAKE_EN(number) | KL4_PORT_DISCONNECT_WAKE_EN(number));
1156 reg &= ~(KL4_PORT_WAKEUP_ENABLE(number+1) | KL4_PORT_RESUME_WAKE_EN(number+1) |
1157 KL4_PORT_CONNECT_WAKE_EN(number+1) | KL4_PORT_DISCONNECT_WAKE_EN(number+1));
1158 MACIO_OUT32(KEYLARGO_FCR4, reg);
1159 (void)MACIO_IN32(KEYLARGO_FCR4);
1160 udelay(10);
1161 } else {
1162 reg = MACIO_IN32(KEYLARGO_FCR3);
1163 reg &= ~(KL3_IT_PORT_WAKEUP_ENABLE(0) | KL3_IT_PORT_RESUME_WAKE_EN(0) |
1164 KL3_IT_PORT_CONNECT_WAKE_EN(0) | KL3_IT_PORT_DISCONNECT_WAKE_EN(0));
1165 reg &= ~(KL3_IT_PORT_WAKEUP_ENABLE(1) | KL3_IT_PORT_RESUME_WAKE_EN(1) |
1166 KL3_IT_PORT_CONNECT_WAKE_EN(1) | KL3_IT_PORT_DISCONNECT_WAKE_EN(1));
1167 MACIO_OUT32(KEYLARGO_FCR3, reg);
1168 (void)MACIO_IN32(KEYLARGO_FCR3);
1169 udelay(10);
1170 }
1171 if (macio->type == macio_intrepid) {
1172 /* wait for clock stopped bits to clear */
1173 u32 test0 = 0, test1 = 0;
1174 u32 status0, status1;
1175 int timeout = 1000;
1176
1177 UNLOCK(flags);
1178 switch (number) {
1179 case 0:
1180 test0 = UNI_N_CLOCK_STOPPED_USB0;
1181 test1 = UNI_N_CLOCK_STOPPED_USB0PCI;
1182 break;
1183 case 2:
1184 test0 = UNI_N_CLOCK_STOPPED_USB1;
1185 test1 = UNI_N_CLOCK_STOPPED_USB1PCI;
1186 break;
1187 case 4:
1188 test0 = UNI_N_CLOCK_STOPPED_USB2;
1189 test1 = UNI_N_CLOCK_STOPPED_USB2PCI;
1190 break;
1191 }
1192 do {
1193 if (--timeout <= 0) {
1194 printk(KERN_ERR "core99_usb_enable: "
1195 "Timeout waiting for clocks\n");
1196 break;
1197 }
1198 mdelay(1);
1199 status0 = UN_IN(UNI_N_CLOCK_STOP_STATUS0);
1200 status1 = UN_IN(UNI_N_CLOCK_STOP_STATUS1);
1201 } while ((status0 & test0) | (status1 & test1));
1202 LOCK(flags);
1203 }
1204 } else {
1205 /* Turn OFF */
1206 if (number < 4) {
1207 reg = MACIO_IN32(KEYLARGO_FCR4);
1208 reg |= KL4_PORT_WAKEUP_ENABLE(number) | KL4_PORT_RESUME_WAKE_EN(number) |
1209 KL4_PORT_CONNECT_WAKE_EN(number) | KL4_PORT_DISCONNECT_WAKE_EN(number);
1210 reg |= KL4_PORT_WAKEUP_ENABLE(number+1) | KL4_PORT_RESUME_WAKE_EN(number+1) |
1211 KL4_PORT_CONNECT_WAKE_EN(number+1) | KL4_PORT_DISCONNECT_WAKE_EN(number+1);
1212 MACIO_OUT32(KEYLARGO_FCR4, reg);
1213 (void)MACIO_IN32(KEYLARGO_FCR4);
1214 udelay(1);
1215 } else {
1216 reg = MACIO_IN32(KEYLARGO_FCR3);
1217 reg |= KL3_IT_PORT_WAKEUP_ENABLE(0) | KL3_IT_PORT_RESUME_WAKE_EN(0) |
1218 KL3_IT_PORT_CONNECT_WAKE_EN(0) | KL3_IT_PORT_DISCONNECT_WAKE_EN(0);
1219 reg |= KL3_IT_PORT_WAKEUP_ENABLE(1) | KL3_IT_PORT_RESUME_WAKE_EN(1) |
1220 KL3_IT_PORT_CONNECT_WAKE_EN(1) | KL3_IT_PORT_DISCONNECT_WAKE_EN(1);
1221 MACIO_OUT32(KEYLARGO_FCR3, reg);
1222 (void)MACIO_IN32(KEYLARGO_FCR3);
1223 udelay(1);
1224 }
1225 if (number == 0) {
1226 if (macio->type != macio_intrepid)
1227 MACIO_BIC(KEYLARGO_FCR0, KL0_USB0_CELL_ENABLE);
1228 (void)MACIO_IN32(KEYLARGO_FCR0);
1229 udelay(1);
1230 MACIO_BIS(KEYLARGO_FCR0, (KL0_USB0_PAD_SUSPEND0 | KL0_USB0_PAD_SUSPEND1));
1231 (void)MACIO_IN32(KEYLARGO_FCR0);
1232 } else if (number == 2) {
1233 if (macio->type != macio_intrepid)
1234 MACIO_BIC(KEYLARGO_FCR0, KL0_USB1_CELL_ENABLE);
1235 (void)MACIO_IN32(KEYLARGO_FCR0);
1236 udelay(1);
1237 MACIO_BIS(KEYLARGO_FCR0, (KL0_USB1_PAD_SUSPEND0 | KL0_USB1_PAD_SUSPEND1));
1238 (void)MACIO_IN32(KEYLARGO_FCR0);
1239 } else if (number == 4) {
1240 udelay(1);
1241 MACIO_BIS(KEYLARGO_FCR1, (KL1_USB2_PAD_SUSPEND0 | KL1_USB2_PAD_SUSPEND1));
1242 (void)MACIO_IN32(KEYLARGO_FCR1);
1243 }
1244 udelay(1);
1245 }
1246 UNLOCK(flags);
1247
1248 return 0;
1249}
1250
1251static long
1252core99_firewire_enable(struct device_node *node, long param, long value)
1253{
1254 unsigned long flags;
1255 struct macio_chip *macio;
1256
1257 macio = &macio_chips[0];
1258 if (macio->type != macio_keylargo && macio->type != macio_pangea &&
1259 macio->type != macio_intrepid)
1260 return -ENODEV;
1261 if (!(macio->flags & MACIO_FLAG_FW_SUPPORTED))
1262 return -ENODEV;
1263
1264 LOCK(flags);
1265 if (value) {
1266 UN_BIS(UNI_N_CLOCK_CNTL, UNI_N_CLOCK_CNTL_FW);
1267 (void)UN_IN(UNI_N_CLOCK_CNTL);
1268 } else {
1269 UN_BIC(UNI_N_CLOCK_CNTL, UNI_N_CLOCK_CNTL_FW);
1270 (void)UN_IN(UNI_N_CLOCK_CNTL);
1271 }
1272 UNLOCK(flags);
1273 mdelay(1);
1274
1275 return 0;
1276}
1277
1278static long
1279core99_firewire_cable_power(struct device_node *node, long param, long value)
1280{
1281 unsigned long flags;
1282 struct macio_chip *macio;
1283
1284 /* Trick: we allow NULL node */
1285 if ((pmac_mb.board_flags & PMAC_MB_HAS_FW_POWER) == 0)
1286 return -ENODEV;
1287 macio = &macio_chips[0];
1288 if (macio->type != macio_keylargo && macio->type != macio_pangea &&
1289 macio->type != macio_intrepid)
1290 return -ENODEV;
1291 if (!(macio->flags & MACIO_FLAG_FW_SUPPORTED))
1292 return -ENODEV;
1293
1294 LOCK(flags);
1295 if (value) {
1296 MACIO_OUT8(KL_GPIO_FW_CABLE_POWER , 0);
1297 MACIO_IN8(KL_GPIO_FW_CABLE_POWER);
1298 udelay(10);
1299 } else {
1300 MACIO_OUT8(KL_GPIO_FW_CABLE_POWER , 4);
1301 MACIO_IN8(KL_GPIO_FW_CABLE_POWER); udelay(10);
1302 }
1303 UNLOCK(flags);
1304 mdelay(1);
1305
1306 return 0;
1307}
1308
1309static long
1310intrepid_aack_delay_enable(struct device_node *node, long param, long value)
1311{
1312 unsigned long flags;
1313
1314 if (uninorth_rev < 0xd2)
1315 return -ENODEV;
1316
1317 LOCK(flags);
1318 if (param)
1319 UN_BIS(UNI_N_AACK_DELAY, UNI_N_AACK_DELAY_ENABLE);
1320 else
1321 UN_BIC(UNI_N_AACK_DELAY, UNI_N_AACK_DELAY_ENABLE);
1322 UNLOCK(flags);
1323
1324 return 0;
1325}
1326
1327
1328#endif /* CONFIG_POWER4 */
1329
1330static long
1331core99_read_gpio(struct device_node *node, long param, long value)
1332{
1333 struct macio_chip *macio = &macio_chips[0];
1334
1335 return MACIO_IN8(param);
1336}
1337
1338
1339static long
1340core99_write_gpio(struct device_node *node, long param, long value)
1341{
1342 struct macio_chip *macio = &macio_chips[0];
1343
1344 MACIO_OUT8(param, (u8)(value & 0xff));
1345 return 0;
1346}
1347
1348#ifdef CONFIG_POWER4
1349static long g5_gmac_enable(struct device_node *node, long param, long value)
1350{
1351 struct macio_chip *macio = &macio_chips[0];
1352 unsigned long flags;
1353
1354 if (node == NULL)
1355 return -ENODEV;
1356
1357 LOCK(flags);
1358 if (value) {
1359 MACIO_BIS(KEYLARGO_FCR1, K2_FCR1_GMAC_CLK_ENABLE);
1360 mb();
1361 k2_skiplist[0] = NULL;
1362 } else {
1363 k2_skiplist[0] = node;
1364 mb();
1365 MACIO_BIC(KEYLARGO_FCR1, K2_FCR1_GMAC_CLK_ENABLE);
1366 }
1367
1368 UNLOCK(flags);
1369 mdelay(1);
1370
1371 return 0;
1372}
1373
1374static long g5_fw_enable(struct device_node *node, long param, long value)
1375{
1376 struct macio_chip *macio = &macio_chips[0];
1377 unsigned long flags;
1378
1379 if (node == NULL)
1380 return -ENODEV;
1381
1382 LOCK(flags);
1383 if (value) {
1384 MACIO_BIS(KEYLARGO_FCR1, K2_FCR1_FW_CLK_ENABLE);
1385 mb();
1386 k2_skiplist[1] = NULL;
1387 } else {
1388 k2_skiplist[1] = node;
1389 mb();
1390 MACIO_BIC(KEYLARGO_FCR1, K2_FCR1_FW_CLK_ENABLE);
1391 }
1392
1393 UNLOCK(flags);
1394 mdelay(1);
1395
1396 return 0;
1397}
1398
1399static long g5_mpic_enable(struct device_node *node, long param, long value)
1400{
1401 unsigned long flags;
1402
1403 if (node->parent == NULL || strcmp(node->parent->name, "u3"))
1404 return 0;
1405
1406 LOCK(flags);
1407 UN_BIS(U3_TOGGLE_REG, U3_MPIC_RESET | U3_MPIC_OUTPUT_ENABLE);
1408 UNLOCK(flags);
1409
1410 return 0;
1411}
1412
1413static long g5_eth_phy_reset(struct device_node *node, long param, long value)
1414{
1415 struct macio_chip *macio = &macio_chips[0];
1416 struct device_node *phy;
1417 int need_reset;
1418
1419 /*
1420 * We must not reset the combo PHYs, only the BCM5221 found in
1421 * the iMac G5.
1422 */
1423 phy = of_get_next_child(node, NULL);
1424 if (!phy)
1425 return -ENODEV;
1426 need_reset = device_is_compatible(phy, "B5221");
1427 of_node_put(phy);
1428 if (!need_reset)
1429 return 0;
1430
1431 /* PHY reset is GPIO 29, not in device-tree unfortunately */
1432 MACIO_OUT8(K2_GPIO_EXTINT_0 + 29,
1433 KEYLARGO_GPIO_OUTPUT_ENABLE | KEYLARGO_GPIO_OUTOUT_DATA);
1434 /* Thankfully, this is now always called at a time when we can
1435 * schedule by sungem.
1436 */
1437 msleep(10);
1438 MACIO_OUT8(K2_GPIO_EXTINT_0 + 29, 0);
1439
1440 return 0;
1441}
1442
1443static long g5_i2s_enable(struct device_node *node, long param, long value)
1444{
1445 /* Very crude implementation for now */
1446 struct macio_chip *macio = &macio_chips[0];
1447 unsigned long flags;
1448
1449 if (value == 0)
1450 return 0; /* don't disable yet */
1451
1452 LOCK(flags);
1453 MACIO_BIS(KEYLARGO_FCR3, KL3_CLK45_ENABLE | KL3_CLK49_ENABLE |
1454 KL3_I2S0_CLK18_ENABLE);
1455 udelay(10);
1456 MACIO_BIS(KEYLARGO_FCR1, K2_FCR1_I2S0_CELL_ENABLE |
1457 K2_FCR1_I2S0_CLK_ENABLE_BIT | K2_FCR1_I2S0_ENABLE);
1458 udelay(10);
1459 MACIO_BIC(KEYLARGO_FCR1, K2_FCR1_I2S0_RESET);
1460 UNLOCK(flags);
1461 udelay(10);
1462
1463 return 0;
1464}
1465
1466
1467#ifdef CONFIG_SMP
1468static long g5_reset_cpu(struct device_node *node, long param, long value)
1469{
1470 unsigned int reset_io = 0;
1471 unsigned long flags;
1472 struct macio_chip *macio;
1473 struct device_node *np;
1474
1475 macio = &macio_chips[0];
1476 if (macio->type != macio_keylargo2)
1477 return -ENODEV;
1478
1479 np = find_path_device("/cpus");
1480 if (np == NULL)
1481 return -ENODEV;
1482 for (np = np->child; np != NULL; np = np->sibling) {
1483 u32 *num = (u32 *)get_property(np, "reg", NULL);
1484 u32 *rst = (u32 *)get_property(np, "soft-reset", NULL);
1485 if (num == NULL || rst == NULL)
1486 continue;
1487 if (param == *num) {
1488 reset_io = *rst;
1489 break;
1490 }
1491 }
1492 if (np == NULL || reset_io == 0)
1493 return -ENODEV;
1494
1495 LOCK(flags);
1496 MACIO_OUT8(reset_io, KEYLARGO_GPIO_OUTPUT_ENABLE);
1497 (void)MACIO_IN8(reset_io);
1498 udelay(1);
1499 MACIO_OUT8(reset_io, 0);
1500 (void)MACIO_IN8(reset_io);
1501 UNLOCK(flags);
1502
1503 return 0;
1504}
1505#endif /* CONFIG_SMP */
1506
1507/*
1508 * This can be called from pmac_smp so isn't static
1509 *
1510 * This takes the second CPU off the bus on dual CPU machines
1511 * running UP
1512 */
1513void g5_phy_disable_cpu1(void)
1514{
1515 UN_OUT(U3_API_PHY_CONFIG_1, 0);
1516}
1517#endif /* CONFIG_POWER4 */
1518
1519#ifndef CONFIG_POWER4
1520
1521static void
1522keylargo_shutdown(struct macio_chip *macio, int sleep_mode)
1523{
1524 u32 temp;
1525
1526 if (sleep_mode) {
1527 mdelay(1);
1528 MACIO_BIS(KEYLARGO_FCR0, KL0_USB_REF_SUSPEND);
1529 (void)MACIO_IN32(KEYLARGO_FCR0);
1530 mdelay(1);
1531 }
1532
1533 MACIO_BIC(KEYLARGO_FCR0,KL0_SCCA_ENABLE | KL0_SCCB_ENABLE |
1534 KL0_SCC_CELL_ENABLE |
1535 KL0_IRDA_ENABLE | KL0_IRDA_CLK32_ENABLE |
1536 KL0_IRDA_CLK19_ENABLE);
1537
1538 MACIO_BIC(KEYLARGO_MBCR, KL_MBCR_MB0_DEV_MASK);
1539 MACIO_BIS(KEYLARGO_MBCR, KL_MBCR_MB0_IDE_ENABLE);
1540
1541 MACIO_BIC(KEYLARGO_FCR1,
1542 KL1_AUDIO_SEL_22MCLK | KL1_AUDIO_CLK_ENABLE_BIT |
1543 KL1_AUDIO_CLK_OUT_ENABLE | KL1_AUDIO_CELL_ENABLE |
1544 KL1_I2S0_CELL_ENABLE | KL1_I2S0_CLK_ENABLE_BIT |
1545 KL1_I2S0_ENABLE | KL1_I2S1_CELL_ENABLE |
1546 KL1_I2S1_CLK_ENABLE_BIT | KL1_I2S1_ENABLE |
1547 KL1_EIDE0_ENABLE | KL1_EIDE0_RESET_N |
1548 KL1_EIDE1_ENABLE | KL1_EIDE1_RESET_N |
1549 KL1_UIDE_ENABLE);
1550
1551 MACIO_BIS(KEYLARGO_FCR2, KL2_ALT_DATA_OUT);
1552 MACIO_BIC(KEYLARGO_FCR2, KL2_IOBUS_ENABLE);
1553
1554 temp = MACIO_IN32(KEYLARGO_FCR3);
1555 if (macio->rev >= 2) {
1556 temp |= KL3_SHUTDOWN_PLL2X;
1557 if (sleep_mode)
1558 temp |= KL3_SHUTDOWN_PLL_TOTAL;
1559 }
1560
1561 temp |= KL3_SHUTDOWN_PLLKW6 | KL3_SHUTDOWN_PLLKW4 |
1562 KL3_SHUTDOWN_PLLKW35;
1563 if (sleep_mode)
1564 temp |= KL3_SHUTDOWN_PLLKW12;
1565 temp &= ~(KL3_CLK66_ENABLE | KL3_CLK49_ENABLE | KL3_CLK45_ENABLE
1566 | KL3_CLK31_ENABLE | KL3_I2S1_CLK18_ENABLE | KL3_I2S0_CLK18_ENABLE);
1567 if (sleep_mode)
1568 temp &= ~(KL3_TIMER_CLK18_ENABLE | KL3_VIA_CLK16_ENABLE);
1569 MACIO_OUT32(KEYLARGO_FCR3, temp);
1570
1571 /* Flush posted writes & wait a bit */
1572 (void)MACIO_IN32(KEYLARGO_FCR0); mdelay(1);
1573}
1574
1575static void
1576pangea_shutdown(struct macio_chip *macio, int sleep_mode)
1577{
1578 u32 temp;
1579
1580 MACIO_BIC(KEYLARGO_FCR0,KL0_SCCA_ENABLE | KL0_SCCB_ENABLE |
1581 KL0_SCC_CELL_ENABLE |
1582 KL0_USB0_CELL_ENABLE | KL0_USB1_CELL_ENABLE);
1583
1584 MACIO_BIC(KEYLARGO_FCR1,
1585 KL1_AUDIO_SEL_22MCLK | KL1_AUDIO_CLK_ENABLE_BIT |
1586 KL1_AUDIO_CLK_OUT_ENABLE | KL1_AUDIO_CELL_ENABLE |
1587 KL1_I2S0_CELL_ENABLE | KL1_I2S0_CLK_ENABLE_BIT |
1588 KL1_I2S0_ENABLE | KL1_I2S1_CELL_ENABLE |
1589 KL1_I2S1_CLK_ENABLE_BIT | KL1_I2S1_ENABLE |
1590 KL1_UIDE_ENABLE);
1591 if (pmac_mb.board_flags & PMAC_MB_MOBILE)
1592 MACIO_BIC(KEYLARGO_FCR1, KL1_UIDE_RESET_N);
1593
1594 MACIO_BIS(KEYLARGO_FCR2, KL2_ALT_DATA_OUT);
1595
1596 temp = MACIO_IN32(KEYLARGO_FCR3);
1597 temp |= KL3_SHUTDOWN_PLLKW6 | KL3_SHUTDOWN_PLLKW4 |
1598 KL3_SHUTDOWN_PLLKW35;
1599 temp &= ~(KL3_CLK49_ENABLE | KL3_CLK45_ENABLE | KL3_CLK31_ENABLE
1600 | KL3_I2S0_CLK18_ENABLE | KL3_I2S1_CLK18_ENABLE);
1601 if (sleep_mode)
1602 temp &= ~(KL3_VIA_CLK16_ENABLE | KL3_TIMER_CLK18_ENABLE);
1603 MACIO_OUT32(KEYLARGO_FCR3, temp);
1604
1605 /* Flush posted writes & wait a bit */
1606 (void)MACIO_IN32(KEYLARGO_FCR0); mdelay(1);
1607}
1608
1609static void
1610intrepid_shutdown(struct macio_chip *macio, int sleep_mode)
1611{
1612 u32 temp;
1613
1614 MACIO_BIC(KEYLARGO_FCR0,KL0_SCCA_ENABLE | KL0_SCCB_ENABLE |
1615 KL0_SCC_CELL_ENABLE);
1616
1617 MACIO_BIC(KEYLARGO_FCR1,
1618 /*KL1_USB2_CELL_ENABLE |*/
1619 KL1_I2S0_CELL_ENABLE | KL1_I2S0_CLK_ENABLE_BIT |
1620 KL1_I2S0_ENABLE | KL1_I2S1_CELL_ENABLE |
1621 KL1_I2S1_CLK_ENABLE_BIT | KL1_I2S1_ENABLE);
1622 if (pmac_mb.board_flags & PMAC_MB_MOBILE)
1623 MACIO_BIC(KEYLARGO_FCR1, KL1_UIDE_RESET_N);
1624
1625 temp = MACIO_IN32(KEYLARGO_FCR3);
1626 temp &= ~(KL3_CLK49_ENABLE | KL3_CLK45_ENABLE |
1627 KL3_I2S1_CLK18_ENABLE | KL3_I2S0_CLK18_ENABLE);
1628 if (sleep_mode)
1629 temp &= ~(KL3_TIMER_CLK18_ENABLE | KL3_IT_VIA_CLK32_ENABLE);
1630 MACIO_OUT32(KEYLARGO_FCR3, temp);
1631
1632 /* Flush posted writes & wait a bit */
1633 (void)MACIO_IN32(KEYLARGO_FCR0);
1634 mdelay(10);
1635}
1636
1637
1638void pmac_tweak_clock_spreading(int enable)
1639{
1640 struct macio_chip *macio = &macio_chips[0];
1641
1642 /* Hack for doing clock spreading on some machines PowerBooks and
1643 * iBooks. This implements the "platform-do-clockspreading" OF
1644 * property as decoded manually on various models. For safety, we also
1645 * check the product ID in the device-tree in cases we'll whack the i2c
1646 * chip to make reasonably sure we won't set wrong values in there
1647 *
1648 * Of course, ultimately, we have to implement a real parser for
1649 * the platform-do-* stuff...
1650 */
1651
1652 if (macio->type == macio_intrepid) {
1653 if (enable)
1654 UN_OUT(UNI_N_CLOCK_SPREADING, 2);
1655 else
1656 UN_OUT(UNI_N_CLOCK_SPREADING, 0);
1657 mdelay(40);
1658 }
1659
1660 while (machine_is_compatible("PowerBook5,2") ||
1661 machine_is_compatible("PowerBook5,3") ||
1662 machine_is_compatible("PowerBook6,2") ||
1663 machine_is_compatible("PowerBook6,3")) {
1664 struct device_node *ui2c = of_find_node_by_type(NULL, "i2c");
1665 struct device_node *dt = of_find_node_by_name(NULL, "device-tree");
1666 u8 buffer[9];
1667 u32 *productID;
1668 int i, rc, changed = 0;
1669
1670 if (dt == NULL)
1671 break;
1672 productID = (u32 *)get_property(dt, "pid#", NULL);
1673 if (productID == NULL)
1674 break;
1675 while(ui2c) {
1676 struct device_node *p = of_get_parent(ui2c);
1677 if (p && !strcmp(p->name, "uni-n"))
1678 break;
1679 ui2c = of_find_node_by_type(ui2c, "i2c");
1680 }
1681 if (ui2c == NULL)
1682 break;
1683 DBG("Trying to bump clock speed for PID: %08x...\n", *productID);
1684 rc = pmac_low_i2c_open(ui2c, 1);
1685 if (rc != 0)
1686 break;
1687 pmac_low_i2c_setmode(ui2c, pmac_low_i2c_mode_combined);
1688 rc = pmac_low_i2c_xfer(ui2c, 0xd2 | pmac_low_i2c_read, 0x80, buffer, 9);
1689 DBG("read result: %d,", rc);
1690 if (rc != 0) {
1691 pmac_low_i2c_close(ui2c);
1692 break;
1693 }
1694 for (i=0; i<9; i++)
1695 DBG(" %02x", buffer[i]);
1696 DBG("\n");
1697
1698 switch(*productID) {
1699 case 0x1182: /* AlBook 12" rev 2 */
1700 case 0x1183: /* iBook G4 12" */
1701 buffer[0] = (buffer[0] & 0x8f) | 0x70;
1702 buffer[2] = (buffer[2] & 0x7f) | 0x00;
1703 buffer[5] = (buffer[5] & 0x80) | 0x31;
1704 buffer[6] = (buffer[6] & 0x40) | 0xb0;
1705 buffer[7] = (buffer[7] & 0x00) | (enable ? 0xc0 : 0xba);
1706 buffer[8] = (buffer[8] & 0x00) | 0x30;
1707 changed = 1;
1708 break;
1709 case 0x3142: /* AlBook 15" (ATI M10) */
1710 case 0x3143: /* AlBook 17" (ATI M10) */
1711 buffer[0] = (buffer[0] & 0xaf) | 0x50;
1712 buffer[2] = (buffer[2] & 0x7f) | 0x00;
1713 buffer[5] = (buffer[5] & 0x80) | 0x31;
1714 buffer[6] = (buffer[6] & 0x40) | 0xb0;
1715 buffer[7] = (buffer[7] & 0x00) | (enable ? 0xd0 : 0xc0);
1716 buffer[8] = (buffer[8] & 0x00) | 0x30;
1717 changed = 1;
1718 break;
1719 default:
1720 DBG("i2c-hwclock: Machine model not handled\n");
1721 break;
1722 }
1723 if (!changed) {
1724 pmac_low_i2c_close(ui2c);
1725 break;
1726 }
1727 pmac_low_i2c_setmode(ui2c, pmac_low_i2c_mode_stdsub);
1728 rc = pmac_low_i2c_xfer(ui2c, 0xd2 | pmac_low_i2c_write, 0x80, buffer, 9);
1729 DBG("write result: %d,", rc);
1730 pmac_low_i2c_setmode(ui2c, pmac_low_i2c_mode_combined);
1731 rc = pmac_low_i2c_xfer(ui2c, 0xd2 | pmac_low_i2c_read, 0x80, buffer, 9);
1732 DBG("read result: %d,", rc);
1733 if (rc != 0) {
1734 pmac_low_i2c_close(ui2c);
1735 break;
1736 }
1737 for (i=0; i<9; i++)
1738 DBG(" %02x", buffer[i]);
1739 pmac_low_i2c_close(ui2c);
1740 break;
1741 }
1742}
1743
1744
1745static int
1746core99_sleep(void)
1747{
1748 struct macio_chip *macio;
1749 int i;
1750
1751 macio = &macio_chips[0];
1752 if (macio->type != macio_keylargo && macio->type != macio_pangea &&
1753 macio->type != macio_intrepid)
1754 return -ENODEV;
1755
1756 /* We power off the wireless slot in case it was not done
1757 * by the driver. We don't power it on automatically however
1758 */
1759 if (macio->flags & MACIO_FLAG_AIRPORT_ON)
1760 core99_airport_enable(macio->of_node, 0, 0);
1761
1762 /* We power off the FW cable. Should be done by the driver... */
1763 if (macio->flags & MACIO_FLAG_FW_SUPPORTED) {
1764 core99_firewire_enable(NULL, 0, 0);
1765 core99_firewire_cable_power(NULL, 0, 0);
1766 }
1767
1768 /* We make sure int. modem is off (in case driver lost it) */
1769 if (macio->type == macio_keylargo)
1770 core99_modem_enable(macio->of_node, 0, 0);
1771 else
1772 pangea_modem_enable(macio->of_node, 0, 0);
1773
1774 /* We make sure the sound is off as well */
1775 core99_sound_chip_enable(macio->of_node, 0, 0);
1776
1777 /*
1778 * Save various bits of KeyLargo
1779 */
1780
1781 /* Save the state of the various GPIOs */
1782 save_gpio_levels[0] = MACIO_IN32(KEYLARGO_GPIO_LEVELS0);
1783 save_gpio_levels[1] = MACIO_IN32(KEYLARGO_GPIO_LEVELS1);
1784 for (i=0; i<KEYLARGO_GPIO_EXTINT_CNT; i++)
1785 save_gpio_extint[i] = MACIO_IN8(KEYLARGO_GPIO_EXTINT_0+i);
1786 for (i=0; i<KEYLARGO_GPIO_CNT; i++)
1787 save_gpio_normal[i] = MACIO_IN8(KEYLARGO_GPIO_0+i);
1788
1789 /* Save the FCRs */
1790 if (macio->type == macio_keylargo)
1791 save_mbcr = MACIO_IN32(KEYLARGO_MBCR);
1792 save_fcr[0] = MACIO_IN32(KEYLARGO_FCR0);
1793 save_fcr[1] = MACIO_IN32(KEYLARGO_FCR1);
1794 save_fcr[2] = MACIO_IN32(KEYLARGO_FCR2);
1795 save_fcr[3] = MACIO_IN32(KEYLARGO_FCR3);
1796 save_fcr[4] = MACIO_IN32(KEYLARGO_FCR4);
1797 if (macio->type == macio_pangea || macio->type == macio_intrepid)
1798 save_fcr[5] = MACIO_IN32(KEYLARGO_FCR5);
1799
1800 /* Save state & config of DBDMA channels */
1801 dbdma_save(macio, save_dbdma);
1802
1803 /*
1804 * Turn off as much as we can
1805 */
1806 if (macio->type == macio_pangea)
1807 pangea_shutdown(macio, 1);
1808 else if (macio->type == macio_intrepid)
1809 intrepid_shutdown(macio, 1);
1810 else if (macio->type == macio_keylargo)
1811 keylargo_shutdown(macio, 1);
1812
1813 /*
1814 * Put the host bridge to sleep
1815 */
1816
1817 save_unin_clock_ctl = UN_IN(UNI_N_CLOCK_CNTL);
1818 /* Note: do not switch GMAC off, driver does it when necessary, WOL must keep it
1819 * enabled !
1820 */
1821 UN_OUT(UNI_N_CLOCK_CNTL, save_unin_clock_ctl &
1822 ~(/*UNI_N_CLOCK_CNTL_GMAC|*/UNI_N_CLOCK_CNTL_FW/*|UNI_N_CLOCK_CNTL_PCI*/));
1823 udelay(100);
1824 UN_OUT(UNI_N_HWINIT_STATE, UNI_N_HWINIT_STATE_SLEEPING);
1825 UN_OUT(UNI_N_POWER_MGT, UNI_N_POWER_MGT_SLEEP);
1826 mdelay(10);
1827
1828 /*
1829 * FIXME: A bit of black magic with OpenPIC (don't ask me why)
1830 */
1831 if (pmac_mb.model_id == PMAC_TYPE_SAWTOOTH) {
1832 MACIO_BIS(0x506e0, 0x00400000);
1833 MACIO_BIS(0x506e0, 0x80000000);
1834 }
1835 return 0;
1836}
1837
1838static int
1839core99_wake_up(void)
1840{
1841 struct macio_chip *macio;
1842 int i;
1843
1844 macio = &macio_chips[0];
1845 if (macio->type != macio_keylargo && macio->type != macio_pangea &&
1846 macio->type != macio_intrepid)
1847 return -ENODEV;
1848
1849 /*
1850 * Wakeup the host bridge
1851 */
1852 UN_OUT(UNI_N_POWER_MGT, UNI_N_POWER_MGT_NORMAL);
1853 udelay(10);
1854 UN_OUT(UNI_N_HWINIT_STATE, UNI_N_HWINIT_STATE_RUNNING);
1855 udelay(10);
1856
1857 /*
1858 * Restore KeyLargo
1859 */
1860
1861 if (macio->type == macio_keylargo) {
1862 MACIO_OUT32(KEYLARGO_MBCR, save_mbcr);
1863 (void)MACIO_IN32(KEYLARGO_MBCR); udelay(10);
1864 }
1865 MACIO_OUT32(KEYLARGO_FCR0, save_fcr[0]);
1866 (void)MACIO_IN32(KEYLARGO_FCR0); udelay(10);
1867 MACIO_OUT32(KEYLARGO_FCR1, save_fcr[1]);
1868 (void)MACIO_IN32(KEYLARGO_FCR1); udelay(10);
1869 MACIO_OUT32(KEYLARGO_FCR2, save_fcr[2]);
1870 (void)MACIO_IN32(KEYLARGO_FCR2); udelay(10);
1871 MACIO_OUT32(KEYLARGO_FCR3, save_fcr[3]);
1872 (void)MACIO_IN32(KEYLARGO_FCR3); udelay(10);
1873 MACIO_OUT32(KEYLARGO_FCR4, save_fcr[4]);
1874 (void)MACIO_IN32(KEYLARGO_FCR4); udelay(10);
1875 if (macio->type == macio_pangea || macio->type == macio_intrepid) {
1876 MACIO_OUT32(KEYLARGO_FCR5, save_fcr[5]);
1877 (void)MACIO_IN32(KEYLARGO_FCR5); udelay(10);
1878 }
1879
1880 dbdma_restore(macio, save_dbdma);
1881
1882 MACIO_OUT32(KEYLARGO_GPIO_LEVELS0, save_gpio_levels[0]);
1883 MACIO_OUT32(KEYLARGO_GPIO_LEVELS1, save_gpio_levels[1]);
1884 for (i=0; i<KEYLARGO_GPIO_EXTINT_CNT; i++)
1885 MACIO_OUT8(KEYLARGO_GPIO_EXTINT_0+i, save_gpio_extint[i]);
1886 for (i=0; i<KEYLARGO_GPIO_CNT; i++)
1887 MACIO_OUT8(KEYLARGO_GPIO_0+i, save_gpio_normal[i]);
1888
1889 /* FIXME more black magic with OpenPIC ... */
1890 if (pmac_mb.model_id == PMAC_TYPE_SAWTOOTH) {
1891 MACIO_BIC(0x506e0, 0x00400000);
1892 MACIO_BIC(0x506e0, 0x80000000);
1893 }
1894
1895 UN_OUT(UNI_N_CLOCK_CNTL, save_unin_clock_ctl);
1896 udelay(100);
1897
1898 return 0;
1899}
1900
1901static long
1902core99_sleep_state(struct device_node *node, long param, long value)
1903{
1904 /* Param == 1 means to enter the "fake sleep" mode that is
1905 * used for CPU speed switch
1906 */
1907 if (param == 1) {
1908 if (value == 1) {
1909 UN_OUT(UNI_N_HWINIT_STATE, UNI_N_HWINIT_STATE_SLEEPING);
1910 UN_OUT(UNI_N_POWER_MGT, UNI_N_POWER_MGT_IDLE2);
1911 } else {
1912 UN_OUT(UNI_N_POWER_MGT, UNI_N_POWER_MGT_NORMAL);
1913 udelay(10);
1914 UN_OUT(UNI_N_HWINIT_STATE, UNI_N_HWINIT_STATE_RUNNING);
1915 udelay(10);
1916 }
1917 return 0;
1918 }
1919 if ((pmac_mb.board_flags & PMAC_MB_CAN_SLEEP) == 0)
1920 return -EPERM;
1921
1922 if (value == 1)
1923 return core99_sleep();
1924 else if (value == 0)
1925 return core99_wake_up();
1926 return 0;
1927}
1928
1929#endif /* CONFIG_POWER4 */
1930
1931static long
1932generic_dev_can_wake(struct device_node *node, long param, long value)
1933{
1934 /* Todo: eventually check we are really dealing with on-board
1935 * video device ...
1936 */
1937
1938 if (pmac_mb.board_flags & PMAC_MB_MAY_SLEEP)
1939 pmac_mb.board_flags |= PMAC_MB_CAN_SLEEP;
1940 return 0;
1941}
1942
1943static long generic_get_mb_info(struct device_node *node, long param, long value)
1944{
1945 switch(param) {
1946 case PMAC_MB_INFO_MODEL:
1947 return pmac_mb.model_id;
1948 case PMAC_MB_INFO_FLAGS:
1949 return pmac_mb.board_flags;
1950 case PMAC_MB_INFO_NAME:
1951 /* hack hack hack... but should work */
1952 *((const char **)value) = pmac_mb.model_name;
1953 return 0;
1954 }
1955 return -EINVAL;
1956}
1957
1958
1959/*
1960 * Table definitions
1961 */
1962
1963/* Used on any machine
1964 */
1965static struct feature_table_entry any_features[] = {
1966 { PMAC_FTR_GET_MB_INFO, generic_get_mb_info },
1967 { PMAC_FTR_DEVICE_CAN_WAKE, generic_dev_can_wake },
1968 { 0, NULL }
1969};
1970
1971#ifndef CONFIG_POWER4
1972
1973/* OHare based motherboards. Currently, we only use these on the
1974 * 2400,3400 and 3500 series powerbooks. Some older desktops seem
1975 * to have issues with turning on/off those asic cells
1976 */
1977static struct feature_table_entry ohare_features[] = {
1978 { PMAC_FTR_SCC_ENABLE, ohare_htw_scc_enable },
1979 { PMAC_FTR_SWIM3_ENABLE, ohare_floppy_enable },
1980 { PMAC_FTR_MESH_ENABLE, ohare_mesh_enable },
1981 { PMAC_FTR_IDE_ENABLE, ohare_ide_enable},
1982 { PMAC_FTR_IDE_RESET, ohare_ide_reset},
1983 { PMAC_FTR_SLEEP_STATE, ohare_sleep_state },
1984 { 0, NULL }
1985};
1986
1987/* Heathrow desktop machines (Beige G3).
1988 * Separated as some features couldn't be properly tested
1989 * and the serial port control bits appear to confuse it.
1990 */
1991static struct feature_table_entry heathrow_desktop_features[] = {
1992 { PMAC_FTR_SWIM3_ENABLE, heathrow_floppy_enable },
1993 { PMAC_FTR_MESH_ENABLE, heathrow_mesh_enable },
1994 { PMAC_FTR_IDE_ENABLE, heathrow_ide_enable },
1995 { PMAC_FTR_IDE_RESET, heathrow_ide_reset },
1996 { PMAC_FTR_BMAC_ENABLE, heathrow_bmac_enable },
1997 { 0, NULL }
1998};
1999
2000/* Heathrow based laptop, that is the Wallstreet and mainstreet
2001 * powerbooks.
2002 */
2003static struct feature_table_entry heathrow_laptop_features[] = {
2004 { PMAC_FTR_SCC_ENABLE, ohare_htw_scc_enable },
2005 { PMAC_FTR_MODEM_ENABLE, heathrow_modem_enable },
2006 { PMAC_FTR_SWIM3_ENABLE, heathrow_floppy_enable },
2007 { PMAC_FTR_MESH_ENABLE, heathrow_mesh_enable },
2008 { PMAC_FTR_IDE_ENABLE, heathrow_ide_enable },
2009 { PMAC_FTR_IDE_RESET, heathrow_ide_reset },
2010 { PMAC_FTR_BMAC_ENABLE, heathrow_bmac_enable },
2011 { PMAC_FTR_SOUND_CHIP_ENABLE, heathrow_sound_enable },
2012 { PMAC_FTR_SLEEP_STATE, heathrow_sleep_state },
2013 { 0, NULL }
2014};
2015
2016/* Paddington based machines
2017 * The lombard (101) powerbook, first iMac models, B&W G3 and Yikes G4.
2018 */
2019static struct feature_table_entry paddington_features[] = {
2020 { PMAC_FTR_SCC_ENABLE, ohare_htw_scc_enable },
2021 { PMAC_FTR_MODEM_ENABLE, heathrow_modem_enable },
2022 { PMAC_FTR_SWIM3_ENABLE, heathrow_floppy_enable },
2023 { PMAC_FTR_MESH_ENABLE, heathrow_mesh_enable },
2024 { PMAC_FTR_IDE_ENABLE, heathrow_ide_enable },
2025 { PMAC_FTR_IDE_RESET, heathrow_ide_reset },
2026 { PMAC_FTR_BMAC_ENABLE, heathrow_bmac_enable },
2027 { PMAC_FTR_SOUND_CHIP_ENABLE, heathrow_sound_enable },
2028 { PMAC_FTR_SLEEP_STATE, heathrow_sleep_state },
2029 { 0, NULL }
2030};
2031
2032/* Core99 & MacRISC 2 machines (all machines released since the
2033 * iBook (included), that is all AGP machines, except pangea
2034 * chipset. The pangea chipset is the "combo" UniNorth/KeyLargo
2035 * used on iBook2 & iMac "flow power".
2036 */
2037static struct feature_table_entry core99_features[] = {
2038 { PMAC_FTR_SCC_ENABLE, core99_scc_enable },
2039 { PMAC_FTR_MODEM_ENABLE, core99_modem_enable },
2040 { PMAC_FTR_IDE_ENABLE, core99_ide_enable },
2041 { PMAC_FTR_IDE_RESET, core99_ide_reset },
2042 { PMAC_FTR_GMAC_ENABLE, core99_gmac_enable },
2043 { PMAC_FTR_GMAC_PHY_RESET, core99_gmac_phy_reset },
2044 { PMAC_FTR_SOUND_CHIP_ENABLE, core99_sound_chip_enable },
2045 { PMAC_FTR_AIRPORT_ENABLE, core99_airport_enable },
2046 { PMAC_FTR_USB_ENABLE, core99_usb_enable },
2047 { PMAC_FTR_1394_ENABLE, core99_firewire_enable },
2048 { PMAC_FTR_1394_CABLE_POWER, core99_firewire_cable_power },
2049 { PMAC_FTR_SLEEP_STATE, core99_sleep_state },
2050#ifdef CONFIG_SMP
2051 { PMAC_FTR_RESET_CPU, core99_reset_cpu },
2052#endif /* CONFIG_SMP */
2053 { PMAC_FTR_READ_GPIO, core99_read_gpio },
2054 { PMAC_FTR_WRITE_GPIO, core99_write_gpio },
2055 { 0, NULL }
2056};
2057
2058/* RackMac
2059 */
2060static struct feature_table_entry rackmac_features[] = {
2061 { PMAC_FTR_SCC_ENABLE, core99_scc_enable },
2062 { PMAC_FTR_IDE_ENABLE, core99_ide_enable },
2063 { PMAC_FTR_IDE_RESET, core99_ide_reset },
2064 { PMAC_FTR_GMAC_ENABLE, core99_gmac_enable },
2065 { PMAC_FTR_GMAC_PHY_RESET, core99_gmac_phy_reset },
2066 { PMAC_FTR_USB_ENABLE, core99_usb_enable },
2067 { PMAC_FTR_1394_ENABLE, core99_firewire_enable },
2068 { PMAC_FTR_1394_CABLE_POWER, core99_firewire_cable_power },
2069 { PMAC_FTR_SLEEP_STATE, core99_sleep_state },
2070#ifdef CONFIG_SMP
2071 { PMAC_FTR_RESET_CPU, core99_reset_cpu },
2072#endif /* CONFIG_SMP */
2073 { PMAC_FTR_READ_GPIO, core99_read_gpio },
2074 { PMAC_FTR_WRITE_GPIO, core99_write_gpio },
2075 { 0, NULL }
2076};
2077
2078/* Pangea features
2079 */
2080static struct feature_table_entry pangea_features[] = {
2081 { PMAC_FTR_SCC_ENABLE, core99_scc_enable },
2082 { PMAC_FTR_MODEM_ENABLE, pangea_modem_enable },
2083 { PMAC_FTR_IDE_ENABLE, core99_ide_enable },
2084 { PMAC_FTR_IDE_RESET, core99_ide_reset },
2085 { PMAC_FTR_GMAC_ENABLE, core99_gmac_enable },
2086 { PMAC_FTR_GMAC_PHY_RESET, core99_gmac_phy_reset },
2087 { PMAC_FTR_SOUND_CHIP_ENABLE, core99_sound_chip_enable },
2088 { PMAC_FTR_AIRPORT_ENABLE, core99_airport_enable },
2089 { PMAC_FTR_USB_ENABLE, core99_usb_enable },
2090 { PMAC_FTR_1394_ENABLE, core99_firewire_enable },
2091 { PMAC_FTR_1394_CABLE_POWER, core99_firewire_cable_power },
2092 { PMAC_FTR_SLEEP_STATE, core99_sleep_state },
2093 { PMAC_FTR_READ_GPIO, core99_read_gpio },
2094 { PMAC_FTR_WRITE_GPIO, core99_write_gpio },
2095 { 0, NULL }
2096};
2097
2098/* Intrepid features
2099 */
2100static struct feature_table_entry intrepid_features[] = {
2101 { PMAC_FTR_SCC_ENABLE, core99_scc_enable },
2102 { PMAC_FTR_MODEM_ENABLE, pangea_modem_enable },
2103 { PMAC_FTR_IDE_ENABLE, core99_ide_enable },
2104 { PMAC_FTR_IDE_RESET, core99_ide_reset },
2105 { PMAC_FTR_GMAC_ENABLE, core99_gmac_enable },
2106 { PMAC_FTR_GMAC_PHY_RESET, core99_gmac_phy_reset },
2107 { PMAC_FTR_SOUND_CHIP_ENABLE, core99_sound_chip_enable },
2108 { PMAC_FTR_AIRPORT_ENABLE, core99_airport_enable },
2109 { PMAC_FTR_USB_ENABLE, core99_usb_enable },
2110 { PMAC_FTR_1394_ENABLE, core99_firewire_enable },
2111 { PMAC_FTR_1394_CABLE_POWER, core99_firewire_cable_power },
2112 { PMAC_FTR_SLEEP_STATE, core99_sleep_state },
2113 { PMAC_FTR_READ_GPIO, core99_read_gpio },
2114 { PMAC_FTR_WRITE_GPIO, core99_write_gpio },
2115 { PMAC_FTR_AACK_DELAY_ENABLE, intrepid_aack_delay_enable },
2116 { 0, NULL }
2117};
2118
2119#else /* CONFIG_POWER4 */
2120
2121/* G5 features
2122 */
2123static struct feature_table_entry g5_features[] = {
2124 { PMAC_FTR_GMAC_ENABLE, g5_gmac_enable },
2125 { PMAC_FTR_1394_ENABLE, g5_fw_enable },
2126 { PMAC_FTR_ENABLE_MPIC, g5_mpic_enable },
2127 { PMAC_FTR_GMAC_PHY_RESET, g5_eth_phy_reset },
2128 { PMAC_FTR_SOUND_CHIP_ENABLE, g5_i2s_enable },
2129#ifdef CONFIG_SMP
2130 { PMAC_FTR_RESET_CPU, g5_reset_cpu },
2131#endif /* CONFIG_SMP */
2132 { PMAC_FTR_READ_GPIO, core99_read_gpio },
2133 { PMAC_FTR_WRITE_GPIO, core99_write_gpio },
2134 { 0, NULL }
2135};
2136
2137#endif /* CONFIG_POWER4 */
2138
2139static struct pmac_mb_def pmac_mb_defs[] = {
2140#ifndef CONFIG_POWER4
2141 /*
2142 * Desktops
2143 */
2144
2145 { "AAPL,8500", "PowerMac 8500/8600",
2146 PMAC_TYPE_PSURGE, NULL,
2147 0
2148 },
2149 { "AAPL,9500", "PowerMac 9500/9600",
2150 PMAC_TYPE_PSURGE, NULL,
2151 0
2152 },
2153 { "AAPL,7200", "PowerMac 7200",
2154 PMAC_TYPE_PSURGE, NULL,
2155 0
2156 },
2157 { "AAPL,7300", "PowerMac 7200/7300",
2158 PMAC_TYPE_PSURGE, NULL,
2159 0
2160 },
2161 { "AAPL,7500", "PowerMac 7500",
2162 PMAC_TYPE_PSURGE, NULL,
2163 0
2164 },
2165 { "AAPL,ShinerESB", "Apple Network Server",
2166 PMAC_TYPE_ANS, NULL,
2167 0
2168 },
2169 { "AAPL,e407", "Alchemy",
2170 PMAC_TYPE_ALCHEMY, NULL,
2171 0
2172 },
2173 { "AAPL,e411", "Gazelle",
2174 PMAC_TYPE_GAZELLE, NULL,
2175 0
2176 },
2177 { "AAPL,Gossamer", "PowerMac G3 (Gossamer)",
2178 PMAC_TYPE_GOSSAMER, heathrow_desktop_features,
2179 0
2180 },
2181 { "AAPL,PowerMac G3", "PowerMac G3 (Silk)",
2182 PMAC_TYPE_SILK, heathrow_desktop_features,
2183 0
2184 },
2185 { "PowerMac1,1", "Blue&White G3",
2186 PMAC_TYPE_YOSEMITE, paddington_features,
2187 0
2188 },
2189 { "PowerMac1,2", "PowerMac G4 PCI Graphics",
2190 PMAC_TYPE_YIKES, paddington_features,
2191 0
2192 },
2193 { "PowerMac2,1", "iMac FireWire",
2194 PMAC_TYPE_FW_IMAC, core99_features,
2195 PMAC_MB_MAY_SLEEP | PMAC_MB_OLD_CORE99
2196 },
2197 { "PowerMac2,2", "iMac FireWire",
2198 PMAC_TYPE_FW_IMAC, core99_features,
2199 PMAC_MB_MAY_SLEEP | PMAC_MB_OLD_CORE99
2200 },
2201 { "PowerMac3,1", "PowerMac G4 AGP Graphics",
2202 PMAC_TYPE_SAWTOOTH, core99_features,
2203 PMAC_MB_OLD_CORE99
2204 },
2205 { "PowerMac3,2", "PowerMac G4 AGP Graphics",
2206 PMAC_TYPE_SAWTOOTH, core99_features,
2207 PMAC_MB_MAY_SLEEP | PMAC_MB_OLD_CORE99
2208 },
2209 { "PowerMac3,3", "PowerMac G4 AGP Graphics",
2210 PMAC_TYPE_SAWTOOTH, core99_features,
2211 PMAC_MB_MAY_SLEEP | PMAC_MB_OLD_CORE99
2212 },
2213 { "PowerMac3,4", "PowerMac G4 Silver",
2214 PMAC_TYPE_QUICKSILVER, core99_features,
2215 PMAC_MB_MAY_SLEEP
2216 },
2217 { "PowerMac3,5", "PowerMac G4 Silver",
2218 PMAC_TYPE_QUICKSILVER, core99_features,
2219 PMAC_MB_MAY_SLEEP
2220 },
2221 { "PowerMac3,6", "PowerMac G4 Windtunnel",
2222 PMAC_TYPE_WINDTUNNEL, core99_features,
2223 PMAC_MB_MAY_SLEEP,
2224 },
2225 { "PowerMac4,1", "iMac \"Flower Power\"",
2226 PMAC_TYPE_PANGEA_IMAC, pangea_features,
2227 PMAC_MB_MAY_SLEEP
2228 },
2229 { "PowerMac4,2", "Flat panel iMac",
2230 PMAC_TYPE_FLAT_PANEL_IMAC, pangea_features,
2231 PMAC_MB_CAN_SLEEP
2232 },
2233 { "PowerMac4,4", "eMac",
2234 PMAC_TYPE_EMAC, core99_features,
2235 PMAC_MB_MAY_SLEEP
2236 },
2237 { "PowerMac5,1", "PowerMac G4 Cube",
2238 PMAC_TYPE_CUBE, core99_features,
2239 PMAC_MB_MAY_SLEEP | PMAC_MB_OLD_CORE99
2240 },
2241 { "PowerMac6,1", "Flat panel iMac",
2242 PMAC_TYPE_UNKNOWN_INTREPID, intrepid_features,
2243 PMAC_MB_MAY_SLEEP,
2244 },
2245 { "PowerMac6,3", "Flat panel iMac",
2246 PMAC_TYPE_UNKNOWN_INTREPID, intrepid_features,
2247 PMAC_MB_MAY_SLEEP,
2248 },
2249 { "PowerMac6,4", "eMac",
2250 PMAC_TYPE_UNKNOWN_INTREPID, intrepid_features,
2251 PMAC_MB_MAY_SLEEP,
2252 },
2253 { "PowerMac10,1", "Mac mini",
2254 PMAC_TYPE_UNKNOWN_INTREPID, intrepid_features,
2255 PMAC_MB_MAY_SLEEP | PMAC_MB_HAS_FW_POWER,
2256 },
2257 { "iMac,1", "iMac (first generation)",
2258 PMAC_TYPE_ORIG_IMAC, paddington_features,
2259 0
2260 },
2261
2262 /*
2263 * Xserve's
2264 */
2265
2266 { "RackMac1,1", "XServe",
2267 PMAC_TYPE_RACKMAC, rackmac_features,
2268 0,
2269 },
2270 { "RackMac1,2", "XServe rev. 2",
2271 PMAC_TYPE_RACKMAC, rackmac_features,
2272 0,
2273 },
2274
2275 /*
2276 * Laptops
2277 */
2278
2279 { "AAPL,3400/2400", "PowerBook 3400",
2280 PMAC_TYPE_HOOPER, ohare_features,
2281 PMAC_MB_CAN_SLEEP | PMAC_MB_MOBILE
2282 },
2283 { "AAPL,3500", "PowerBook 3500",
2284 PMAC_TYPE_KANGA, ohare_features,
2285 PMAC_MB_CAN_SLEEP | PMAC_MB_MOBILE
2286 },
2287 { "AAPL,PowerBook1998", "PowerBook Wallstreet",
2288 PMAC_TYPE_WALLSTREET, heathrow_laptop_features,
2289 PMAC_MB_CAN_SLEEP | PMAC_MB_MOBILE
2290 },
2291 { "PowerBook1,1", "PowerBook 101 (Lombard)",
2292 PMAC_TYPE_101_PBOOK, paddington_features,
2293 PMAC_MB_CAN_SLEEP | PMAC_MB_MOBILE
2294 },
2295 { "PowerBook2,1", "iBook (first generation)",
2296 PMAC_TYPE_ORIG_IBOOK, core99_features,
2297 PMAC_MB_CAN_SLEEP | PMAC_MB_OLD_CORE99 | PMAC_MB_MOBILE
2298 },
2299 { "PowerBook2,2", "iBook FireWire",
2300 PMAC_TYPE_FW_IBOOK, core99_features,
2301 PMAC_MB_MAY_SLEEP | PMAC_MB_HAS_FW_POWER |
2302 PMAC_MB_OLD_CORE99 | PMAC_MB_MOBILE
2303 },
2304 { "PowerBook3,1", "PowerBook Pismo",
2305 PMAC_TYPE_PISMO, core99_features,
2306 PMAC_MB_MAY_SLEEP | PMAC_MB_HAS_FW_POWER |
2307 PMAC_MB_OLD_CORE99 | PMAC_MB_MOBILE
2308 },
2309 { "PowerBook3,2", "PowerBook Titanium",
2310 PMAC_TYPE_TITANIUM, core99_features,
2311 PMAC_MB_MAY_SLEEP | PMAC_MB_HAS_FW_POWER | PMAC_MB_MOBILE
2312 },
2313 { "PowerBook3,3", "PowerBook Titanium II",
2314 PMAC_TYPE_TITANIUM2, core99_features,
2315 PMAC_MB_MAY_SLEEP | PMAC_MB_HAS_FW_POWER | PMAC_MB_MOBILE
2316 },
2317 { "PowerBook3,4", "PowerBook Titanium III",
2318 PMAC_TYPE_TITANIUM3, core99_features,
2319 PMAC_MB_MAY_SLEEP | PMAC_MB_HAS_FW_POWER | PMAC_MB_MOBILE
2320 },
2321 { "PowerBook3,5", "PowerBook Titanium IV",
2322 PMAC_TYPE_TITANIUM4, core99_features,
2323 PMAC_MB_MAY_SLEEP | PMAC_MB_HAS_FW_POWER | PMAC_MB_MOBILE
2324 },
2325 { "PowerBook4,1", "iBook 2",
2326 PMAC_TYPE_IBOOK2, pangea_features,
2327 PMAC_MB_MAY_SLEEP | PMAC_MB_HAS_FW_POWER | PMAC_MB_MOBILE
2328 },
2329 { "PowerBook4,2", "iBook 2",
2330 PMAC_TYPE_IBOOK2, pangea_features,
2331 PMAC_MB_MAY_SLEEP | PMAC_MB_HAS_FW_POWER | PMAC_MB_MOBILE
2332 },
2333 { "PowerBook4,3", "iBook 2 rev. 2",
2334 PMAC_TYPE_IBOOK2, pangea_features,
2335 PMAC_MB_MAY_SLEEP | PMAC_MB_HAS_FW_POWER | PMAC_MB_MOBILE
2336 },
2337 { "PowerBook5,1", "PowerBook G4 17\"",
2338 PMAC_TYPE_UNKNOWN_INTREPID, intrepid_features,
2339 PMAC_MB_HAS_FW_POWER | PMAC_MB_MOBILE,
2340 },
2341 { "PowerBook5,2", "PowerBook G4 15\"",
2342 PMAC_TYPE_UNKNOWN_INTREPID, intrepid_features,
2343 PMAC_MB_MAY_SLEEP | PMAC_MB_HAS_FW_POWER | PMAC_MB_MOBILE,
2344 },
2345 { "PowerBook5,3", "PowerBook G4 17\"",
2346 PMAC_TYPE_UNKNOWN_INTREPID, intrepid_features,
2347 PMAC_MB_MAY_SLEEP | PMAC_MB_HAS_FW_POWER | PMAC_MB_MOBILE,
2348 },
2349 { "PowerBook5,4", "PowerBook G4 15\"",
2350 PMAC_TYPE_UNKNOWN_INTREPID, intrepid_features,
2351 PMAC_MB_MAY_SLEEP | PMAC_MB_HAS_FW_POWER | PMAC_MB_MOBILE,
2352 },
2353 { "PowerBook5,5", "PowerBook G4 17\"",
2354 PMAC_TYPE_UNKNOWN_INTREPID, intrepid_features,
2355 PMAC_MB_MAY_SLEEP | PMAC_MB_HAS_FW_POWER | PMAC_MB_MOBILE,
2356 },
2357 { "PowerBook5,6", "PowerBook G4 15\"",
2358 PMAC_TYPE_UNKNOWN_INTREPID, intrepid_features,
2359 PMAC_MB_MAY_SLEEP | PMAC_MB_HAS_FW_POWER | PMAC_MB_MOBILE,
2360 },
2361 { "PowerBook5,7", "PowerBook G4 17\"",
2362 PMAC_TYPE_UNKNOWN_INTREPID, intrepid_features,
2363 PMAC_MB_MAY_SLEEP | PMAC_MB_HAS_FW_POWER | PMAC_MB_MOBILE,
2364 },
2365 { "PowerBook6,1", "PowerBook G4 12\"",
2366 PMAC_TYPE_UNKNOWN_INTREPID, intrepid_features,
2367 PMAC_MB_MAY_SLEEP | PMAC_MB_HAS_FW_POWER | PMAC_MB_MOBILE,
2368 },
2369 { "PowerBook6,2", "PowerBook G4",
2370 PMAC_TYPE_UNKNOWN_INTREPID, intrepid_features,
2371 PMAC_MB_MAY_SLEEP | PMAC_MB_HAS_FW_POWER | PMAC_MB_MOBILE,
2372 },
2373 { "PowerBook6,3", "iBook G4",
2374 PMAC_TYPE_UNKNOWN_INTREPID, intrepid_features,
2375 PMAC_MB_MAY_SLEEP | PMAC_MB_HAS_FW_POWER | PMAC_MB_MOBILE,
2376 },
2377 { "PowerBook6,4", "PowerBook G4 12\"",
2378 PMAC_TYPE_UNKNOWN_INTREPID, intrepid_features,
2379 PMAC_MB_MAY_SLEEP | PMAC_MB_HAS_FW_POWER | PMAC_MB_MOBILE,
2380 },
2381 { "PowerBook6,5", "iBook G4",
2382 PMAC_TYPE_UNKNOWN_INTREPID, intrepid_features,
2383 PMAC_MB_MAY_SLEEP | PMAC_MB_HAS_FW_POWER | PMAC_MB_MOBILE,
2384 },
2385 { "PowerBook6,7", "iBook G4",
2386 PMAC_TYPE_UNKNOWN_INTREPID, intrepid_features,
2387 PMAC_MB_MAY_SLEEP | PMAC_MB_HAS_FW_POWER | PMAC_MB_MOBILE,
2388 },
2389 { "PowerBook6,8", "PowerBook G4 12\"",
2390 PMAC_TYPE_UNKNOWN_INTREPID, intrepid_features,
2391 PMAC_MB_MAY_SLEEP | PMAC_MB_HAS_FW_POWER | PMAC_MB_MOBILE,
2392 },
2393#else /* CONFIG_POWER4 */
2394 { "PowerMac7,2", "PowerMac G5",
2395 PMAC_TYPE_POWERMAC_G5, g5_features,
2396 0,
2397 },
2398#ifdef CONFIG_PPC64
2399 { "PowerMac7,3", "PowerMac G5",
2400 PMAC_TYPE_POWERMAC_G5, g5_features,
2401 0,
2402 },
2403 { "PowerMac8,1", "iMac G5",
2404 PMAC_TYPE_IMAC_G5, g5_features,
2405 0,
2406 },
2407 { "PowerMac9,1", "PowerMac G5",
2408 PMAC_TYPE_POWERMAC_G5_U3L, g5_features,
2409 0,
2410 },
2411 { "RackMac3,1", "XServe G5",
2412 PMAC_TYPE_XSERVE_G5, g5_features,
2413 0,
2414 },
2415#endif /* CONFIG_PPC64 */
2416#endif /* CONFIG_POWER4 */
2417};
2418
2419/*
2420 * The toplevel feature_call callback
2421 */
2422long pmac_do_feature_call(unsigned int selector, ...)
2423{
2424 struct device_node *node;
2425 long param, value;
2426 int i;
2427 feature_call func = NULL;
2428 va_list args;
2429
2430 if (pmac_mb.features)
2431 for (i=0; pmac_mb.features[i].function; i++)
2432 if (pmac_mb.features[i].selector == selector) {
2433 func = pmac_mb.features[i].function;
2434 break;
2435 }
2436 if (!func)
2437 for (i=0; any_features[i].function; i++)
2438 if (any_features[i].selector == selector) {
2439 func = any_features[i].function;
2440 break;
2441 }
2442 if (!func)
2443 return -ENODEV;
2444
2445 va_start(args, selector);
2446 node = (struct device_node*)va_arg(args, void*);
2447 param = va_arg(args, long);
2448 value = va_arg(args, long);
2449 va_end(args);
2450
2451 return func(node, param, value);
2452}
2453
2454static int __init probe_motherboard(void)
2455{
2456 int i;
2457 struct macio_chip *macio = &macio_chips[0];
2458 const char *model = NULL;
2459 struct device_node *dt;
2460
2461 /* Lookup known motherboard type in device-tree. First try an
2462 * exact match on the "model" property, then try a "compatible"
2463 * match is none is found.
2464 */
2465 dt = find_devices("device-tree");
2466 if (dt != NULL)
2467 model = (const char *) get_property(dt, "model", NULL);
2468 for(i=0; model && i<(sizeof(pmac_mb_defs)/sizeof(struct pmac_mb_def)); i++) {
2469 if (strcmp(model, pmac_mb_defs[i].model_string) == 0) {
2470 pmac_mb = pmac_mb_defs[i];
2471 goto found;
2472 }
2473 }
2474 for(i=0; i<(sizeof(pmac_mb_defs)/sizeof(struct pmac_mb_def)); i++) {
2475 if (machine_is_compatible(pmac_mb_defs[i].model_string)) {
2476 pmac_mb = pmac_mb_defs[i];
2477 goto found;
2478 }
2479 }
2480
2481 /* Fallback to selection depending on mac-io chip type */
2482 switch(macio->type) {
2483#ifndef CONFIG_POWER4
2484 case macio_grand_central:
2485 pmac_mb.model_id = PMAC_TYPE_PSURGE;
2486 pmac_mb.model_name = "Unknown PowerSurge";
2487 break;
2488 case macio_ohare:
2489 pmac_mb.model_id = PMAC_TYPE_UNKNOWN_OHARE;
2490 pmac_mb.model_name = "Unknown OHare-based";
2491 break;
2492 case macio_heathrow:
2493 pmac_mb.model_id = PMAC_TYPE_UNKNOWN_HEATHROW;
2494 pmac_mb.model_name = "Unknown Heathrow-based";
2495 pmac_mb.features = heathrow_desktop_features;
2496 break;
2497 case macio_paddington:
2498 pmac_mb.model_id = PMAC_TYPE_UNKNOWN_PADDINGTON;
2499 pmac_mb.model_name = "Unknown Paddington-based";
2500 pmac_mb.features = paddington_features;
2501 break;
2502 case macio_keylargo:
2503 pmac_mb.model_id = PMAC_TYPE_UNKNOWN_CORE99;
2504 pmac_mb.model_name = "Unknown Keylargo-based";
2505 pmac_mb.features = core99_features;
2506 break;
2507 case macio_pangea:
2508 pmac_mb.model_id = PMAC_TYPE_UNKNOWN_PANGEA;
2509 pmac_mb.model_name = "Unknown Pangea-based";
2510 pmac_mb.features = pangea_features;
2511 break;
2512 case macio_intrepid:
2513 pmac_mb.model_id = PMAC_TYPE_UNKNOWN_INTREPID;
2514 pmac_mb.model_name = "Unknown Intrepid-based";
2515 pmac_mb.features = intrepid_features;
2516 break;
2517#else /* CONFIG_POWER4 */
2518 case macio_keylargo2:
2519 pmac_mb.model_id = PMAC_TYPE_UNKNOWN_K2;
2520 pmac_mb.model_name = "Unknown K2-based";
2521 pmac_mb.features = g5_features;
2522 break;
2523#endif /* CONFIG_POWER4 */
2524 default:
2525 return -ENODEV;
2526 }
2527found:
2528#ifndef CONFIG_POWER4
2529 /* Fixup Hooper vs. Comet */
2530 if (pmac_mb.model_id == PMAC_TYPE_HOOPER) {
2531 u32 __iomem * mach_id_ptr = ioremap(0xf3000034, 4);
2532 if (!mach_id_ptr)
2533 return -ENODEV;
2534 /* Here, I used to disable the media-bay on comet. It
2535 * appears this is wrong, the floppy connector is actually
2536 * a kind of media-bay and works with the current driver.
2537 */
2538 if (__raw_readl(mach_id_ptr) & 0x20000000UL)
2539 pmac_mb.model_id = PMAC_TYPE_COMET;
2540 iounmap(mach_id_ptr);
2541 }
2542#endif /* CONFIG_POWER4 */
2543
2544#ifdef CONFIG_6xx
2545 /* Set default value of powersave_nap on machines that support it.
2546 * It appears that uninorth rev 3 has a problem with it, we don't
2547 * enable it on those. In theory, the flush-on-lock property is
2548 * supposed to be set when not supported, but I'm not very confident
2549 * that all Apple OF revs did it properly, I do it the paranoid way.
2550 */
2551 while (uninorth_base && uninorth_rev > 3) {
2552 struct device_node *np = find_path_device("/cpus");
2553 if (!np || !np->child) {
2554 printk(KERN_WARNING "Can't find CPU(s) in device tree !\n");
2555 break;
2556 }
2557 np = np->child;
2558 /* Nap mode not supported on SMP */
2559 if (np->sibling)
2560 break;
2561 /* Nap mode not supported if flush-on-lock property is present */
2562 if (get_property(np, "flush-on-lock", NULL))
2563 break;
2564 powersave_nap = 1;
2565 printk(KERN_INFO "Processor NAP mode on idle enabled.\n");
2566 break;
2567 }
2568
2569 /* On CPUs that support it (750FX), lowspeed by default during
2570 * NAP mode
2571 */
2572 powersave_lowspeed = 1;
2573#endif /* CONFIG_6xx */
2574#ifdef CONFIG_POWER4
2575 powersave_nap = 1;
2576#endif
2577 /* Check for "mobile" machine */
2578 if (model && (strncmp(model, "PowerBook", 9) == 0
2579 || strncmp(model, "iBook", 5) == 0))
2580 pmac_mb.board_flags |= PMAC_MB_MOBILE;
2581
2582
2583 printk(KERN_INFO "PowerMac motherboard: %s\n", pmac_mb.model_name);
2584 return 0;
2585}
2586
2587/* Initialize the Core99 UniNorth host bridge and memory controller
2588 */
2589static void __init probe_uninorth(void)
2590{
2591 unsigned long actrl;
2592
2593 /* Locate core99 Uni-N */
2594 uninorth_node = of_find_node_by_name(NULL, "uni-n");
2595 /* Locate G5 u3 */
2596 if (uninorth_node == NULL) {
2597 uninorth_node = of_find_node_by_name(NULL, "u3");
2598 uninorth_u3 = 1;
2599 }
2600 if (uninorth_node && uninorth_node->n_addrs > 0) {
2601 unsigned long address = uninorth_node->addrs[0].address;
2602 uninorth_base = ioremap(address, 0x40000);
2603 uninorth_rev = in_be32(UN_REG(UNI_N_VERSION));
2604 if (uninorth_u3)
2605 u3_ht = ioremap(address + U3_HT_CONFIG_BASE, 0x1000);
2606 } else
2607 uninorth_node = NULL;
2608
2609 if (!uninorth_node)
2610 return;
2611
2612 printk(KERN_INFO "Found %s memory controller & host bridge, revision: %d\n",
2613 uninorth_u3 ? "U3" : "UniNorth", uninorth_rev);
2614 printk(KERN_INFO "Mapped at 0x%08lx\n", (unsigned long)uninorth_base);
2615
2616 /* Set the arbitrer QAck delay according to what Apple does
2617 */
2618 if (uninorth_rev < 0x11) {
2619 actrl = UN_IN(UNI_N_ARB_CTRL) & ~UNI_N_ARB_CTRL_QACK_DELAY_MASK;
2620 actrl |= ((uninorth_rev < 3) ? UNI_N_ARB_CTRL_QACK_DELAY105 :
2621 UNI_N_ARB_CTRL_QACK_DELAY) << UNI_N_ARB_CTRL_QACK_DELAY_SHIFT;
2622 UN_OUT(UNI_N_ARB_CTRL, actrl);
2623 }
2624
2625 /* Some more magic as done by them in recent MacOS X on UniNorth
2626 * revs 1.5 to 2.O and Pangea. Seem to toggle the UniN Maxbus/PCI
2627 * memory timeout
2628 */
2629 if ((uninorth_rev >= 0x11 && uninorth_rev <= 0x24) || uninorth_rev == 0xc0)
2630 UN_OUT(0x2160, UN_IN(0x2160) & 0x00ffffff);
2631}
2632
2633static void __init probe_one_macio(const char *name, const char *compat, int type)
2634{
2635 struct device_node* node;
2636 int i;
2637 volatile u32 __iomem * base;
2638 u32* revp;
2639
2640 node = find_devices(name);
2641 if (!node || !node->n_addrs)
2642 return;
2643 if (compat)
2644 do {
2645 if (device_is_compatible(node, compat))
2646 break;
2647 node = node->next;
2648 } while (node);
2649 if (!node)
2650 return;
2651 for(i=0; i<MAX_MACIO_CHIPS; i++) {
2652 if (!macio_chips[i].of_node)
2653 break;
2654 if (macio_chips[i].of_node == node)
2655 return;
2656 }
2657 if (i >= MAX_MACIO_CHIPS) {
2658 printk(KERN_ERR "pmac_feature: Please increase MAX_MACIO_CHIPS !\n");
2659 printk(KERN_ERR "pmac_feature: %s skipped\n", node->full_name);
2660 return;
2661 }
2662 base = ioremap(node->addrs[0].address, node->addrs[0].size);
2663 if (!base) {
2664 printk(KERN_ERR "pmac_feature: Can't map mac-io chip !\n");
2665 return;
2666 }
2667 if (type == macio_keylargo) {
2668 u32 *did = (u32 *)get_property(node, "device-id", NULL);
2669 if (*did == 0x00000025)
2670 type = macio_pangea;
2671 if (*did == 0x0000003e)
2672 type = macio_intrepid;
2673 }
2674 macio_chips[i].of_node = node;
2675 macio_chips[i].type = type;
2676 macio_chips[i].base = base;
2677 macio_chips[i].flags = MACIO_FLAG_SCCB_ON | MACIO_FLAG_SCCB_ON;
2678 macio_chips[i].name = macio_names[type];
2679 revp = (u32 *)get_property(node, "revision-id", NULL);
2680 if (revp)
2681 macio_chips[i].rev = *revp;
2682 printk(KERN_INFO "Found a %s mac-io controller, rev: %d, mapped at 0x%p\n",
2683 macio_names[type], macio_chips[i].rev, macio_chips[i].base);
2684}
2685
2686static int __init
2687probe_macios(void)
2688{
2689 /* Warning, ordering is important */
2690 probe_one_macio("gc", NULL, macio_grand_central);
2691 probe_one_macio("ohare", NULL, macio_ohare);
2692 probe_one_macio("pci106b,7", NULL, macio_ohareII);
2693 probe_one_macio("mac-io", "keylargo", macio_keylargo);
2694 probe_one_macio("mac-io", "paddington", macio_paddington);
2695 probe_one_macio("mac-io", "gatwick", macio_gatwick);
2696 probe_one_macio("mac-io", "heathrow", macio_heathrow);
2697 probe_one_macio("mac-io", "K2-Keylargo", macio_keylargo2);
2698
2699 /* Make sure the "main" macio chip appear first */
2700 if (macio_chips[0].type == macio_gatwick
2701 && macio_chips[1].type == macio_heathrow) {
2702 struct macio_chip temp = macio_chips[0];
2703 macio_chips[0] = macio_chips[1];
2704 macio_chips[1] = temp;
2705 }
2706 if (macio_chips[0].type == macio_ohareII
2707 && macio_chips[1].type == macio_ohare) {
2708 struct macio_chip temp = macio_chips[0];
2709 macio_chips[0] = macio_chips[1];
2710 macio_chips[1] = temp;
2711 }
2712 macio_chips[0].lbus.index = 0;
2713 macio_chips[1].lbus.index = 1;
2714
2715 return (macio_chips[0].of_node == NULL) ? -ENODEV : 0;
2716}
2717
2718static void __init
2719initial_serial_shutdown(struct device_node *np)
2720{
2721 int len;
2722 struct slot_names_prop {
2723 int count;
2724 char name[1];
2725 } *slots;
2726 char *conn;
2727 int port_type = PMAC_SCC_ASYNC;
2728 int modem = 0;
2729
2730 slots = (struct slot_names_prop *)get_property(np, "slot-names", &len);
2731 conn = get_property(np, "AAPL,connector", &len);
2732 if (conn && (strcmp(conn, "infrared") == 0))
2733 port_type = PMAC_SCC_IRDA;
2734 else if (device_is_compatible(np, "cobalt"))
2735 modem = 1;
2736 else if (slots && slots->count > 0) {
2737 if (strcmp(slots->name, "IrDA") == 0)
2738 port_type = PMAC_SCC_IRDA;
2739 else if (strcmp(slots->name, "Modem") == 0)
2740 modem = 1;
2741 }
2742 if (modem)
2743 pmac_call_feature(PMAC_FTR_MODEM_ENABLE, np, 0, 0);
2744 pmac_call_feature(PMAC_FTR_SCC_ENABLE, np, port_type, 0);
2745}
2746
2747static void __init
2748set_initial_features(void)
2749{
2750 struct device_node *np;
2751
2752 /* That hack appears to be necessary for some StarMax motherboards
2753 * but I'm not too sure it was audited for side-effects on other
2754 * ohare based machines...
2755 * Since I still have difficulties figuring the right way to
2756 * differenciate them all and since that hack was there for a long
2757 * time, I'll keep it around
2758 */
2759 if (macio_chips[0].type == macio_ohare && !find_devices("via-pmu")) {
2760 struct macio_chip *macio = &macio_chips[0];
2761 MACIO_OUT32(OHARE_FCR, STARMAX_FEATURES);
2762 } else if (macio_chips[0].type == macio_ohare) {
2763 struct macio_chip *macio = &macio_chips[0];
2764 MACIO_BIS(OHARE_FCR, OH_IOBUS_ENABLE);
2765 } else if (macio_chips[1].type == macio_ohare) {
2766 struct macio_chip *macio = &macio_chips[1];
2767 MACIO_BIS(OHARE_FCR, OH_IOBUS_ENABLE);
2768 }
2769
2770#ifdef CONFIG_POWER4
2771 if (macio_chips[0].type == macio_keylargo2) {
2772#ifndef CONFIG_SMP
2773 /* On SMP machines running UP, we have the second CPU eating
2774 * bus cycles. We need to take it off the bus. This is done
2775 * from pmac_smp for SMP kernels running on one CPU
2776 */
2777 np = of_find_node_by_type(NULL, "cpu");
2778 if (np != NULL)
2779 np = of_find_node_by_type(np, "cpu");
2780 if (np != NULL) {
2781 g5_phy_disable_cpu1();
2782 of_node_put(np);
2783 }
2784#endif /* CONFIG_SMP */
2785 /* Enable GMAC for now for PCI probing. It will be disabled
2786 * later on after PCI probe
2787 */
2788 np = of_find_node_by_name(NULL, "ethernet");
2789 while(np) {
2790 if (device_is_compatible(np, "K2-GMAC"))
2791 g5_gmac_enable(np, 0, 1);
2792 np = of_find_node_by_name(np, "ethernet");
2793 }
2794
2795 /* Enable FW before PCI probe. Will be disabled later on
2796 * Note: We should have a batter way to check that we are
2797 * dealing with uninorth internal cell and not a PCI cell
2798 * on the external PCI. The code below works though.
2799 */
2800 np = of_find_node_by_name(NULL, "firewire");
2801 while(np) {
2802 if (device_is_compatible(np, "pci106b,5811")) {
2803 macio_chips[0].flags |= MACIO_FLAG_FW_SUPPORTED;
2804 g5_fw_enable(np, 0, 1);
2805 }
2806 np = of_find_node_by_name(np, "firewire");
2807 }
2808 }
2809#else /* CONFIG_POWER4 */
2810
2811 if (macio_chips[0].type == macio_keylargo ||
2812 macio_chips[0].type == macio_pangea ||
2813 macio_chips[0].type == macio_intrepid) {
2814 /* Enable GMAC for now for PCI probing. It will be disabled
2815 * later on after PCI probe
2816 */
2817 np = of_find_node_by_name(NULL, "ethernet");
2818 while(np) {
2819 if (np->parent
2820 && device_is_compatible(np->parent, "uni-north")
2821 && device_is_compatible(np, "gmac"))
2822 core99_gmac_enable(np, 0, 1);
2823 np = of_find_node_by_name(np, "ethernet");
2824 }
2825
2826 /* Enable FW before PCI probe. Will be disabled later on
2827 * Note: We should have a batter way to check that we are
2828 * dealing with uninorth internal cell and not a PCI cell
2829 * on the external PCI. The code below works though.
2830 */
2831 np = of_find_node_by_name(NULL, "firewire");
2832 while(np) {
2833 if (np->parent
2834 && device_is_compatible(np->parent, "uni-north")
2835 && (device_is_compatible(np, "pci106b,18") ||
2836 device_is_compatible(np, "pci106b,30") ||
2837 device_is_compatible(np, "pci11c1,5811"))) {
2838 macio_chips[0].flags |= MACIO_FLAG_FW_SUPPORTED;
2839 core99_firewire_enable(np, 0, 1);
2840 }
2841 np = of_find_node_by_name(np, "firewire");
2842 }
2843
2844 /* Enable ATA-100 before PCI probe. */
2845 np = of_find_node_by_name(NULL, "ata-6");
2846 while(np) {
2847 if (np->parent
2848 && device_is_compatible(np->parent, "uni-north")
2849 && device_is_compatible(np, "kauai-ata")) {
2850 core99_ata100_enable(np, 1);
2851 }
2852 np = of_find_node_by_name(np, "ata-6");
2853 }
2854
2855 /* Switch airport off */
2856 np = find_devices("radio");
2857 while(np) {
2858 if (np && np->parent == macio_chips[0].of_node) {
2859 macio_chips[0].flags |= MACIO_FLAG_AIRPORT_ON;
2860 core99_airport_enable(np, 0, 0);
2861 }
2862 np = np->next;
2863 }
2864 }
2865
2866 /* On all machines that support sound PM, switch sound off */
2867 if (macio_chips[0].of_node)
2868 pmac_do_feature_call(PMAC_FTR_SOUND_CHIP_ENABLE,
2869 macio_chips[0].of_node, 0, 0);
2870
2871 /* While on some desktop G3s, we turn it back on */
2872 if (macio_chips[0].of_node && macio_chips[0].type == macio_heathrow
2873 && (pmac_mb.model_id == PMAC_TYPE_GOSSAMER ||
2874 pmac_mb.model_id == PMAC_TYPE_SILK)) {
2875 struct macio_chip *macio = &macio_chips[0];
2876 MACIO_BIS(HEATHROW_FCR, HRW_SOUND_CLK_ENABLE);
2877 MACIO_BIC(HEATHROW_FCR, HRW_SOUND_POWER_N);
2878 }
2879
2880 /* Some machine models need the clock chip to be properly setup for
2881 * clock spreading now. This should be a platform function but we
2882 * don't do these at the moment
2883 */
2884 pmac_tweak_clock_spreading(1);
2885
2886#endif /* CONFIG_POWER4 */
2887
2888 /* On all machines, switch modem & serial ports off */
2889 np = find_devices("ch-a");
2890 while(np) {
2891 initial_serial_shutdown(np);
2892 np = np->next;
2893 }
2894 np = find_devices("ch-b");
2895 while(np) {
2896 initial_serial_shutdown(np);
2897 np = np->next;
2898 }
2899}
2900
2901void __init
2902pmac_feature_init(void)
2903{
2904 /* Detect the UniNorth memory controller */
2905 probe_uninorth();
2906
2907 /* Probe mac-io controllers */
2908 if (probe_macios()) {
2909 printk(KERN_WARNING "No mac-io chip found\n");
2910 return;
2911 }
2912
2913 /* Setup low-level i2c stuffs */
2914 pmac_init_low_i2c();
2915
2916 /* Probe machine type */
2917 if (probe_motherboard())
2918 printk(KERN_WARNING "Unknown PowerMac !\n");
2919
2920 /* Set some initial features (turn off some chips that will
2921 * be later turned on)
2922 */
2923 set_initial_features();
2924}
2925
2926int __init pmac_feature_late_init(void)
2927{
2928#if 0
2929 struct device_node *np;
2930
2931 /* Request some resources late */
2932 if (uninorth_node)
2933 request_OF_resource(uninorth_node, 0, NULL);
2934 np = find_devices("hammerhead");
2935 if (np)
2936 request_OF_resource(np, 0, NULL);
2937 np = find_devices("interrupt-controller");
2938 if (np)
2939 request_OF_resource(np, 0, NULL);
2940#endif
2941 return 0;
2942}
2943
2944device_initcall(pmac_feature_late_init);
2945
2946#if 0
2947static void dump_HT_speeds(char *name, u32 cfg, u32 frq)
2948{
2949 int freqs[16] = { 200,300,400,500,600,800,1000,0,0,0,0,0,0,0,0,0 };
2950 int bits[8] = { 8,16,0,32,2,4,0,0 };
2951 int freq = (frq >> 8) & 0xf;
2952
2953 if (freqs[freq] == 0)
2954 printk("%s: Unknown HT link frequency %x\n", name, freq);
2955 else
2956 printk("%s: %d MHz on main link, (%d in / %d out) bits width\n",
2957 name, freqs[freq],
2958 bits[(cfg >> 28) & 0x7], bits[(cfg >> 24) & 0x7]);
2959}
2960
2961void __init pmac_check_ht_link(void)
2962{
2963 u32 ufreq, freq, ucfg, cfg;
2964 struct device_node *pcix_node;
2965 u8 px_bus, px_devfn;
2966 struct pci_controller *px_hose;
2967
2968 (void)in_be32(u3_ht + U3_HT_LINK_COMMAND);
2969 ucfg = cfg = in_be32(u3_ht + U3_HT_LINK_CONFIG);
2970 ufreq = freq = in_be32(u3_ht + U3_HT_LINK_FREQ);
2971 dump_HT_speeds("U3 HyperTransport", cfg, freq);
2972
2973 pcix_node = of_find_compatible_node(NULL, "pci", "pci-x");
2974 if (pcix_node == NULL) {
2975 printk("No PCI-X bridge found\n");
2976 return;
2977 }
2978 if (pci_device_from_OF_node(pcix_node, &px_bus, &px_devfn) != 0) {
2979 printk("PCI-X bridge found but not matched to pci\n");
2980 return;
2981 }
2982 px_hose = pci_find_hose_for_OF_device(pcix_node);
2983 if (px_hose == NULL) {
2984 printk("PCI-X bridge found but not matched to host\n");
2985 return;
2986 }
2987 early_read_config_dword(px_hose, px_bus, px_devfn, 0xc4, &cfg);
2988 early_read_config_dword(px_hose, px_bus, px_devfn, 0xcc, &freq);
2989 dump_HT_speeds("PCI-X HT Uplink", cfg, freq);
2990 early_read_config_dword(px_hose, px_bus, px_devfn, 0xc8, &cfg);
2991 early_read_config_dword(px_hose, px_bus, px_devfn, 0xd0, &freq);
2992 dump_HT_speeds("PCI-X HT Downlink", cfg, freq);
2993}
2994#endif /* 0 */
2995
2996/*
2997 * Early video resume hook
2998 */
2999
3000static void (*pmac_early_vresume_proc)(void *data);
3001static void *pmac_early_vresume_data;
3002
3003void pmac_set_early_video_resume(void (*proc)(void *data), void *data)
3004{
3005 if (_machine != _MACH_Pmac)
3006 return;
3007 preempt_disable();
3008 pmac_early_vresume_proc = proc;
3009 pmac_early_vresume_data = data;
3010 preempt_enable();
3011}
3012EXPORT_SYMBOL(pmac_set_early_video_resume);
3013
3014void pmac_call_early_video_resume(void)
3015{
3016 if (pmac_early_vresume_proc)
3017 pmac_early_vresume_proc(pmac_early_vresume_data);
3018}
3019
3020/*
3021 * AGP related suspend/resume code
3022 */
3023
3024static struct pci_dev *pmac_agp_bridge;
3025static int (*pmac_agp_suspend)(struct pci_dev *bridge);
3026static int (*pmac_agp_resume)(struct pci_dev *bridge);
3027
3028void pmac_register_agp_pm(struct pci_dev *bridge,
3029 int (*suspend)(struct pci_dev *bridge),
3030 int (*resume)(struct pci_dev *bridge))
3031{
3032 if (suspend || resume) {
3033 pmac_agp_bridge = bridge;
3034 pmac_agp_suspend = suspend;
3035 pmac_agp_resume = resume;
3036 return;
3037 }
3038 if (bridge != pmac_agp_bridge)
3039 return;
3040 pmac_agp_suspend = pmac_agp_resume = NULL;
3041 return;
3042}
3043EXPORT_SYMBOL(pmac_register_agp_pm);
3044
3045void pmac_suspend_agp_for_card(struct pci_dev *dev)
3046{
3047 if (pmac_agp_bridge == NULL || pmac_agp_suspend == NULL)
3048 return;
3049 if (pmac_agp_bridge->bus != dev->bus)
3050 return;
3051 pmac_agp_suspend(pmac_agp_bridge);
3052}
3053EXPORT_SYMBOL(pmac_suspend_agp_for_card);
3054
3055void pmac_resume_agp_for_card(struct pci_dev *dev)
3056{
3057 if (pmac_agp_bridge == NULL || pmac_agp_resume == NULL)
3058 return;
3059 if (pmac_agp_bridge->bus != dev->bus)
3060 return;
3061 pmac_agp_resume(pmac_agp_bridge);
3062}
3063EXPORT_SYMBOL(pmac_resume_agp_for_card);
diff --git a/arch/ppc64/kernel/pmac_low_i2c.c b/arch/powerpc/platforms/powermac/low_i2c.c
index f3f39e8e337a..f3f39e8e337a 100644
--- a/arch/ppc64/kernel/pmac_low_i2c.c
+++ b/arch/powerpc/platforms/powermac/low_i2c.c
diff --git a/arch/ppc64/kernel/pmac_nvram.c b/arch/powerpc/platforms/powermac/nvram.c
index e32a902236e3..4042e2f06ee0 100644
--- a/arch/ppc64/kernel/pmac_nvram.c
+++ b/arch/powerpc/platforms/powermac/nvram.c
@@ -15,10 +15,13 @@
15#include <linux/kernel.h> 15#include <linux/kernel.h>
16#include <linux/stddef.h> 16#include <linux/stddef.h>
17#include <linux/string.h> 17#include <linux/string.h>
18#include <linux/nvram.h>
18#include <linux/init.h> 19#include <linux/init.h>
19#include <linux/slab.h> 20#include <linux/slab.h>
20#include <linux/delay.h> 21#include <linux/delay.h>
21#include <linux/errno.h> 22#include <linux/errno.h>
23#include <linux/adb.h>
24#include <linux/pmu.h>
22#include <linux/bootmem.h> 25#include <linux/bootmem.h>
23#include <linux/completion.h> 26#include <linux/completion.h>
24#include <linux/spinlock.h> 27#include <linux/spinlock.h>
@@ -72,20 +75,38 @@ struct core99_header {
72/* 75/*
73 * Read and write the non-volatile RAM on PowerMacs and CHRP machines. 76 * Read and write the non-volatile RAM on PowerMacs and CHRP machines.
74 */ 77 */
78static int nvram_naddrs;
75static volatile unsigned char *nvram_data; 79static volatile unsigned char *nvram_data;
80static int is_core_99;
76static int core99_bank = 0; 81static int core99_bank = 0;
82static int nvram_partitions[3];
77// XXX Turn that into a sem 83// XXX Turn that into a sem
78static DEFINE_SPINLOCK(nv_lock); 84static DEFINE_SPINLOCK(nv_lock);
79 85
86extern int pmac_newworld;
80extern int system_running; 87extern int system_running;
81 88
82static int (*core99_write_bank)(int bank, u8* datas); 89static int (*core99_write_bank)(int bank, u8* datas);
83static int (*core99_erase_bank)(int bank); 90static int (*core99_erase_bank)(int bank);
84 91
85static char *nvram_image __pmacdata; 92static char *nvram_image;
86 93
87 94
88static ssize_t __pmac core99_nvram_read(char *buf, size_t count, loff_t *index) 95static unsigned char core99_nvram_read_byte(int addr)
96{
97 if (nvram_image == NULL)
98 return 0xff;
99 return nvram_image[addr];
100}
101
102static void core99_nvram_write_byte(int addr, unsigned char val)
103{
104 if (nvram_image == NULL)
105 return;
106 nvram_image[addr] = val;
107}
108
109static ssize_t core99_nvram_read(char *buf, size_t count, loff_t *index)
89{ 110{
90 int i; 111 int i;
91 112
@@ -103,7 +124,7 @@ static ssize_t __pmac core99_nvram_read(char *buf, size_t count, loff_t *index)
103 return count; 124 return count;
104} 125}
105 126
106static ssize_t __pmac core99_nvram_write(char *buf, size_t count, loff_t *index) 127static ssize_t core99_nvram_write(char *buf, size_t count, loff_t *index)
107{ 128{
108 int i; 129 int i;
109 130
@@ -121,14 +142,95 @@ static ssize_t __pmac core99_nvram_write(char *buf, size_t count, loff_t *index)
121 return count; 142 return count;
122} 143}
123 144
124static ssize_t __pmac core99_nvram_size(void) 145static ssize_t core99_nvram_size(void)
125{ 146{
126 if (nvram_image == NULL) 147 if (nvram_image == NULL)
127 return -ENODEV; 148 return -ENODEV;
128 return NVRAM_SIZE; 149 return NVRAM_SIZE;
129} 150}
130 151
131static u8 __pmac chrp_checksum(struct chrp_header* hdr) 152#ifdef CONFIG_PPC32
153static volatile unsigned char *nvram_addr;
154static int nvram_mult;
155
156static unsigned char direct_nvram_read_byte(int addr)
157{
158 return in_8(&nvram_data[(addr & (NVRAM_SIZE - 1)) * nvram_mult]);
159}
160
161static void direct_nvram_write_byte(int addr, unsigned char val)
162{
163 out_8(&nvram_data[(addr & (NVRAM_SIZE - 1)) * nvram_mult], val);
164}
165
166
167static unsigned char indirect_nvram_read_byte(int addr)
168{
169 unsigned char val;
170 unsigned long flags;
171
172 spin_lock_irqsave(&nv_lock, flags);
173 out_8(nvram_addr, addr >> 5);
174 val = in_8(&nvram_data[(addr & 0x1f) << 4]);
175 spin_unlock_irqrestore(&nv_lock, flags);
176
177 return val;
178}
179
180static void indirect_nvram_write_byte(int addr, unsigned char val)
181{
182 unsigned long flags;
183
184 spin_lock_irqsave(&nv_lock, flags);
185 out_8(nvram_addr, addr >> 5);
186 out_8(&nvram_data[(addr & 0x1f) << 4], val);
187 spin_unlock_irqrestore(&nv_lock, flags);
188}
189
190
191#ifdef CONFIG_ADB_PMU
192
193static void pmu_nvram_complete(struct adb_request *req)
194{
195 if (req->arg)
196 complete((struct completion *)req->arg);
197}
198
199static unsigned char pmu_nvram_read_byte(int addr)
200{
201 struct adb_request req;
202 DECLARE_COMPLETION(req_complete);
203
204 req.arg = system_state == SYSTEM_RUNNING ? &req_complete : NULL;
205 if (pmu_request(&req, pmu_nvram_complete, 3, PMU_READ_NVRAM,
206 (addr >> 8) & 0xff, addr & 0xff))
207 return 0xff;
208 if (system_state == SYSTEM_RUNNING)
209 wait_for_completion(&req_complete);
210 while (!req.complete)
211 pmu_poll();
212 return req.reply[0];
213}
214
215static void pmu_nvram_write_byte(int addr, unsigned char val)
216{
217 struct adb_request req;
218 DECLARE_COMPLETION(req_complete);
219
220 req.arg = system_state == SYSTEM_RUNNING ? &req_complete : NULL;
221 if (pmu_request(&req, pmu_nvram_complete, 4, PMU_WRITE_NVRAM,
222 (addr >> 8) & 0xff, addr & 0xff, val))
223 return;
224 if (system_state == SYSTEM_RUNNING)
225 wait_for_completion(&req_complete);
226 while (!req.complete)
227 pmu_poll();
228}
229
230#endif /* CONFIG_ADB_PMU */
231#endif /* CONFIG_PPC32 */
232
233static u8 chrp_checksum(struct chrp_header* hdr)
132{ 234{
133 u8 *ptr; 235 u8 *ptr;
134 u16 sum = hdr->signature; 236 u16 sum = hdr->signature;
@@ -139,7 +241,7 @@ static u8 __pmac chrp_checksum(struct chrp_header* hdr)
139 return sum; 241 return sum;
140} 242}
141 243
142static u32 __pmac core99_calc_adler(u8 *buffer) 244static u32 core99_calc_adler(u8 *buffer)
143{ 245{
144 int cnt; 246 int cnt;
145 u32 low, high; 247 u32 low, high;
@@ -161,7 +263,7 @@ static u32 __pmac core99_calc_adler(u8 *buffer)
161 return (high << 16) | low; 263 return (high << 16) | low;
162} 264}
163 265
164static u32 __pmac core99_check(u8* datas) 266static u32 core99_check(u8* datas)
165{ 267{
166 struct core99_header* hdr99 = (struct core99_header*)datas; 268 struct core99_header* hdr99 = (struct core99_header*)datas;
167 269
@@ -180,7 +282,7 @@ static u32 __pmac core99_check(u8* datas)
180 return hdr99->generation; 282 return hdr99->generation;
181} 283}
182 284
183static int __pmac sm_erase_bank(int bank) 285static int sm_erase_bank(int bank)
184{ 286{
185 int stat, i; 287 int stat, i;
186 unsigned long timeout; 288 unsigned long timeout;
@@ -194,7 +296,7 @@ static int __pmac sm_erase_bank(int bank)
194 timeout = 0; 296 timeout = 0;
195 do { 297 do {
196 if (++timeout > 1000000) { 298 if (++timeout > 1000000) {
197 printk(KERN_ERR "nvram: Sharp/Miron flash erase timeout !\n"); 299 printk(KERN_ERR "nvram: Sharp/Micron flash erase timeout !\n");
198 break; 300 break;
199 } 301 }
200 out_8(base, SM_FLASH_CMD_READ_STATUS); 302 out_8(base, SM_FLASH_CMD_READ_STATUS);
@@ -212,7 +314,7 @@ static int __pmac sm_erase_bank(int bank)
212 return 0; 314 return 0;
213} 315}
214 316
215static int __pmac sm_write_bank(int bank, u8* datas) 317static int sm_write_bank(int bank, u8* datas)
216{ 318{
217 int i, stat = 0; 319 int i, stat = 0;
218 unsigned long timeout; 320 unsigned long timeout;
@@ -247,7 +349,7 @@ static int __pmac sm_write_bank(int bank, u8* datas)
247 return 0; 349 return 0;
248} 350}
249 351
250static int __pmac amd_erase_bank(int bank) 352static int amd_erase_bank(int bank)
251{ 353{
252 int i, stat = 0; 354 int i, stat = 0;
253 unsigned long timeout; 355 unsigned long timeout;
@@ -294,7 +396,7 @@ static int __pmac amd_erase_bank(int bank)
294 return 0; 396 return 0;
295} 397}
296 398
297static int __pmac amd_write_bank(int bank, u8* datas) 399static int amd_write_bank(int bank, u8* datas)
298{ 400{
299 int i, stat = 0; 401 int i, stat = 0;
300 unsigned long timeout; 402 unsigned long timeout;
@@ -340,12 +442,49 @@ static int __pmac amd_write_bank(int bank, u8* datas)
340 return 0; 442 return 0;
341} 443}
342 444
445static void __init lookup_partitions(void)
446{
447 u8 buffer[17];
448 int i, offset;
449 struct chrp_header* hdr;
450
451 if (pmac_newworld) {
452 nvram_partitions[pmac_nvram_OF] = -1;
453 nvram_partitions[pmac_nvram_XPRAM] = -1;
454 nvram_partitions[pmac_nvram_NR] = -1;
455 hdr = (struct chrp_header *)buffer;
456
457 offset = 0;
458 buffer[16] = 0;
459 do {
460 for (i=0;i<16;i++)
461 buffer[i] = ppc_md.nvram_read_val(offset+i);
462 if (!strcmp(hdr->name, "common"))
463 nvram_partitions[pmac_nvram_OF] = offset + 0x10;
464 if (!strcmp(hdr->name, "APL,MacOS75")) {
465 nvram_partitions[pmac_nvram_XPRAM] = offset + 0x10;
466 nvram_partitions[pmac_nvram_NR] = offset + 0x110;
467 }
468 offset += (hdr->len * 0x10);
469 } while(offset < NVRAM_SIZE);
470 } else {
471 nvram_partitions[pmac_nvram_OF] = 0x1800;
472 nvram_partitions[pmac_nvram_XPRAM] = 0x1300;
473 nvram_partitions[pmac_nvram_NR] = 0x1400;
474 }
475 DBG("nvram: OF partition at 0x%x\n", nvram_partitions[pmac_nvram_OF]);
476 DBG("nvram: XP partition at 0x%x\n", nvram_partitions[pmac_nvram_XPRAM]);
477 DBG("nvram: NR partition at 0x%x\n", nvram_partitions[pmac_nvram_NR]);
478}
343 479
344static int __pmac core99_nvram_sync(void) 480static void core99_nvram_sync(void)
345{ 481{
346 struct core99_header* hdr99; 482 struct core99_header* hdr99;
347 unsigned long flags; 483 unsigned long flags;
348 484
485 if (!is_core_99 || !nvram_data || !nvram_image)
486 return;
487
349 spin_lock_irqsave(&nv_lock, flags); 488 spin_lock_irqsave(&nv_lock, flags);
350 if (!memcmp(nvram_image, (u8*)nvram_data + core99_bank*NVRAM_SIZE, 489 if (!memcmp(nvram_image, (u8*)nvram_data + core99_bank*NVRAM_SIZE,
351 NVRAM_SIZE)) 490 NVRAM_SIZE))
@@ -370,32 +509,28 @@ static int __pmac core99_nvram_sync(void)
370 bail: 509 bail:
371 spin_unlock_irqrestore(&nv_lock, flags); 510 spin_unlock_irqrestore(&nv_lock, flags);
372 511
373 return 0; 512#ifdef DEBUG
513 mdelay(2000);
514#endif
374} 515}
375 516
376int __init pmac_nvram_init(void) 517static int __init core99_nvram_setup(struct device_node *dp)
377{ 518{
378 struct device_node *dp;
379 u32 gen_bank0, gen_bank1;
380 int i; 519 int i;
520 u32 gen_bank0, gen_bank1;
381 521
382 dp = find_devices("nvram"); 522 if (nvram_naddrs < 1) {
383 if (dp == NULL) { 523 printk(KERN_ERR "nvram: no address\n");
384 printk(KERN_ERR "Can't find NVRAM device\n"); 524 return -EINVAL;
385 return -ENODEV;
386 }
387 if (!device_is_compatible(dp, "nvram,flash")) {
388 printk(KERN_ERR "Incompatible type of NVRAM\n");
389 return -ENXIO;
390 } 525 }
391
392 nvram_image = alloc_bootmem(NVRAM_SIZE); 526 nvram_image = alloc_bootmem(NVRAM_SIZE);
393 if (nvram_image == NULL) { 527 if (nvram_image == NULL) {
394 printk(KERN_ERR "nvram: can't allocate ram image\n"); 528 printk(KERN_ERR "nvram: can't allocate ram image\n");
395 return -ENOMEM; 529 return -ENOMEM;
396 } 530 }
397 nvram_data = ioremap(dp->addrs[0].address, NVRAM_SIZE*2); 531 nvram_data = ioremap(dp->addrs[0].address, NVRAM_SIZE*2);
398 532 nvram_naddrs = 1; /* Make sure we get the correct case */
533
399 DBG("nvram: Checking bank 0...\n"); 534 DBG("nvram: Checking bank 0...\n");
400 535
401 gen_bank0 = core99_check((u8 *)nvram_data); 536 gen_bank0 = core99_check((u8 *)nvram_data);
@@ -408,11 +543,12 @@ int __init pmac_nvram_init(void)
408 for (i=0; i<NVRAM_SIZE; i++) 543 for (i=0; i<NVRAM_SIZE; i++)
409 nvram_image[i] = nvram_data[i + core99_bank*NVRAM_SIZE]; 544 nvram_image[i] = nvram_data[i + core99_bank*NVRAM_SIZE];
410 545
546 ppc_md.nvram_read_val = core99_nvram_read_byte;
547 ppc_md.nvram_write_val = core99_nvram_write_byte;
411 ppc_md.nvram_read = core99_nvram_read; 548 ppc_md.nvram_read = core99_nvram_read;
412 ppc_md.nvram_write = core99_nvram_write; 549 ppc_md.nvram_write = core99_nvram_write;
413 ppc_md.nvram_size = core99_nvram_size; 550 ppc_md.nvram_size = core99_nvram_size;
414 ppc_md.nvram_sync = core99_nvram_sync; 551 ppc_md.nvram_sync = core99_nvram_sync;
415
416 /* 552 /*
417 * Maybe we could be smarter here though making an exclusive list 553 * Maybe we could be smarter here though making an exclusive list
418 * of known flash chips is a bit nasty as older OF didn't provide us 554 * of known flash chips is a bit nasty as older OF didn't provide us
@@ -427,67 +563,81 @@ int __init pmac_nvram_init(void)
427 core99_erase_bank = sm_erase_bank; 563 core99_erase_bank = sm_erase_bank;
428 core99_write_bank = sm_write_bank; 564 core99_write_bank = sm_write_bank;
429 } 565 }
430
431 return 0; 566 return 0;
432} 567}
433 568
434int __pmac pmac_get_partition(int partition) 569int __init pmac_nvram_init(void)
435{ 570{
436 struct nvram_partition *part; 571 struct device_node *dp;
437 const char *name; 572 int err = 0;
438 int sig; 573
439 574 nvram_naddrs = 0;
440 switch(partition) { 575
441 case pmac_nvram_OF: 576 dp = find_devices("nvram");
442 name = "common"; 577 if (dp == NULL) {
443 sig = NVRAM_SIG_SYS; 578 printk(KERN_ERR "Can't find NVRAM device\n");
444 break;
445 case pmac_nvram_XPRAM:
446 name = "APL,MacOS75";
447 sig = NVRAM_SIG_OS;
448 break;
449 case pmac_nvram_NR:
450 default:
451 /* Oldworld stuff */
452 return -ENODEV; 579 return -ENODEV;
453 } 580 }
581 nvram_naddrs = dp->n_addrs;
582 is_core_99 = device_is_compatible(dp, "nvram,flash");
583 if (is_core_99)
584 err = core99_nvram_setup(dp);
585#ifdef CONFIG_PPC32
586 else if (_machine == _MACH_chrp && nvram_naddrs == 1) {
587 nvram_data = ioremap(dp->addrs[0].address + isa_mem_base,
588 dp->addrs[0].size);
589 nvram_mult = 1;
590 ppc_md.nvram_read_val = direct_nvram_read_byte;
591 ppc_md.nvram_write_val = direct_nvram_write_byte;
592 } else if (nvram_naddrs == 1) {
593 nvram_data = ioremap(dp->addrs[0].address, dp->addrs[0].size);
594 nvram_mult = (dp->addrs[0].size + NVRAM_SIZE - 1) / NVRAM_SIZE;
595 ppc_md.nvram_read_val = direct_nvram_read_byte;
596 ppc_md.nvram_write_val = direct_nvram_write_byte;
597 } else if (nvram_naddrs == 2) {
598 nvram_addr = ioremap(dp->addrs[0].address, dp->addrs[0].size);
599 nvram_data = ioremap(dp->addrs[1].address, dp->addrs[1].size);
600 ppc_md.nvram_read_val = indirect_nvram_read_byte;
601 ppc_md.nvram_write_val = indirect_nvram_write_byte;
602 } else if (nvram_naddrs == 0 && sys_ctrler == SYS_CTRLER_PMU) {
603#ifdef CONFIG_ADB_PMU
604 nvram_naddrs = -1;
605 ppc_md.nvram_read_val = pmu_nvram_read_byte;
606 ppc_md.nvram_write_val = pmu_nvram_write_byte;
607#endif /* CONFIG_ADB_PMU */
608 }
609#endif
610 else {
611 printk(KERN_ERR "Incompatible type of NVRAM\n");
612 return -ENXIO;
613 }
614 lookup_partitions();
615 return err;
616}
454 617
455 part = nvram_find_partition(sig, name); 618int pmac_get_partition(int partition)
456 if (part == NULL) 619{
457 return 0; 620 return nvram_partitions[partition];
458
459 return part->index;
460} 621}
461 622
462u8 __pmac pmac_xpram_read(int xpaddr) 623u8 pmac_xpram_read(int xpaddr)
463{ 624{
464 int offset = pmac_get_partition(pmac_nvram_XPRAM); 625 int offset = pmac_get_partition(pmac_nvram_XPRAM);
465 loff_t index;
466 u8 buf;
467 ssize_t count;
468 626
469 if (offset < 0 || xpaddr < 0 || xpaddr > 0x100) 627 if (offset < 0 || xpaddr < 0 || xpaddr > 0x100)
470 return 0xff; 628 return 0xff;
471 index = offset + xpaddr;
472 629
473 count = ppc_md.nvram_read(&buf, 1, &index); 630 return ppc_md.nvram_read_val(xpaddr + offset);
474 if (count != 1)
475 return 0xff;
476 return buf;
477} 631}
478 632
479void __pmac pmac_xpram_write(int xpaddr, u8 data) 633void pmac_xpram_write(int xpaddr, u8 data)
480{ 634{
481 int offset = pmac_get_partition(pmac_nvram_XPRAM); 635 int offset = pmac_get_partition(pmac_nvram_XPRAM);
482 loff_t index;
483 u8 buf;
484 636
485 if (offset < 0 || xpaddr < 0 || xpaddr > 0x100) 637 if (offset < 0 || xpaddr < 0 || xpaddr > 0x100)
486 return; 638 return;
487 index = offset + xpaddr;
488 buf = data;
489 639
490 ppc_md.nvram_write(&buf, 1, &index); 640 ppc_md.nvram_write_val(xpaddr + offset, data);
491} 641}
492 642
493EXPORT_SYMBOL(pmac_get_partition); 643EXPORT_SYMBOL(pmac_get_partition);
diff --git a/arch/powerpc/platforms/powermac/pci.c b/arch/powerpc/platforms/powermac/pci.c
new file mode 100644
index 000000000000..8f818d092e2b
--- /dev/null
+++ b/arch/powerpc/platforms/powermac/pci.c
@@ -0,0 +1,1170 @@
1/*
2 * Support for PCI bridges found on Power Macintoshes.
3 *
4 * Copyright (C) 2003 Benjamin Herrenschmuidt (benh@kernel.crashing.org)
5 * Copyright (C) 1997 Paul Mackerras (paulus@samba.org)
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
11 */
12
13#include <linux/kernel.h>
14#include <linux/pci.h>
15#include <linux/delay.h>
16#include <linux/string.h>
17#include <linux/init.h>
18#include <linux/bootmem.h>
19
20#include <asm/sections.h>
21#include <asm/io.h>
22#include <asm/prom.h>
23#include <asm/pci-bridge.h>
24#include <asm/machdep.h>
25#include <asm/pmac_feature.h>
26#include <asm/grackle.h>
27#ifdef CONFIG_PPC64
28#include <asm/iommu.h>
29#include <asm/ppc-pci.h>
30#endif
31
32#undef DEBUG
33
34#ifdef DEBUG
35#define DBG(x...) printk(x)
36#else
37#define DBG(x...)
38#endif
39
40static int add_bridge(struct device_node *dev);
41
42/* XXX Could be per-controller, but I don't think we risk anything by
43 * assuming we won't have both UniNorth and Bandit */
44static int has_uninorth;
45#ifdef CONFIG_PPC64
46static struct pci_controller *u3_agp;
47static struct pci_controller *u3_ht;
48#endif /* CONFIG_PPC64 */
49
50extern u8 pci_cache_line_size;
51extern int pcibios_assign_bus_offset;
52
53struct device_node *k2_skiplist[2];
54
55/*
56 * Magic constants for enabling cache coherency in the bandit/PSX bridge.
57 */
58#define BANDIT_DEVID_2 8
59#define BANDIT_REVID 3
60
61#define BANDIT_DEVNUM 11
62#define BANDIT_MAGIC 0x50
63#define BANDIT_COHERENT 0x40
64
65static int __init fixup_one_level_bus_range(struct device_node *node, int higher)
66{
67 for (; node != 0;node = node->sibling) {
68 int * bus_range;
69 unsigned int *class_code;
70 int len;
71
72 /* For PCI<->PCI bridges or CardBus bridges, we go down */
73 class_code = (unsigned int *) get_property(node, "class-code", NULL);
74 if (!class_code || ((*class_code >> 8) != PCI_CLASS_BRIDGE_PCI &&
75 (*class_code >> 8) != PCI_CLASS_BRIDGE_CARDBUS))
76 continue;
77 bus_range = (int *) get_property(node, "bus-range", &len);
78 if (bus_range != NULL && len > 2 * sizeof(int)) {
79 if (bus_range[1] > higher)
80 higher = bus_range[1];
81 }
82 higher = fixup_one_level_bus_range(node->child, higher);
83 }
84 return higher;
85}
86
87/* This routine fixes the "bus-range" property of all bridges in the
88 * system since they tend to have their "last" member wrong on macs
89 *
90 * Note that the bus numbers manipulated here are OF bus numbers, they
91 * are not Linux bus numbers.
92 */
93static void __init fixup_bus_range(struct device_node *bridge)
94{
95 int * bus_range;
96 int len;
97
98 /* Lookup the "bus-range" property for the hose */
99 bus_range = (int *) get_property(bridge, "bus-range", &len);
100 if (bus_range == NULL || len < 2 * sizeof(int)) {
101 printk(KERN_WARNING "Can't get bus-range for %s\n",
102 bridge->full_name);
103 return;
104 }
105 bus_range[1] = fixup_one_level_bus_range(bridge->child, bus_range[1]);
106}
107
108/*
109 * Apple MacRISC (U3, UniNorth, Bandit, Chaos) PCI controllers.
110 *
111 * The "Bandit" version is present in all early PCI PowerMacs,
112 * and up to the first ones using Grackle. Some machines may
113 * have 2 bandit controllers (2 PCI busses).
114 *
115 * "Chaos" is used in some "Bandit"-type machines as a bridge
116 * for the separate display bus. It is accessed the same
117 * way as bandit, but cannot be probed for devices. It therefore
118 * has its own config access functions.
119 *
120 * The "UniNorth" version is present in all Core99 machines
121 * (iBook, G4, new IMacs, and all the recent Apple machines).
122 * It contains 3 controllers in one ASIC.
123 *
124 * The U3 is the bridge used on G5 machines. It contains an
125 * AGP bus which is dealt with the old UniNorth access routines
126 * and a HyperTransport bus which uses its own set of access
127 * functions.
128 */
129
130#define MACRISC_CFA0(devfn, off) \
131 ((1 << (unsigned long)PCI_SLOT(dev_fn)) \
132 | (((unsigned long)PCI_FUNC(dev_fn)) << 8) \
133 | (((unsigned long)(off)) & 0xFCUL))
134
135#define MACRISC_CFA1(bus, devfn, off) \
136 ((((unsigned long)(bus)) << 16) \
137 |(((unsigned long)(devfn)) << 8) \
138 |(((unsigned long)(off)) & 0xFCUL) \
139 |1UL)
140
141static unsigned long macrisc_cfg_access(struct pci_controller* hose,
142 u8 bus, u8 dev_fn, u8 offset)
143{
144 unsigned int caddr;
145
146 if (bus == hose->first_busno) {
147 if (dev_fn < (11 << 3))
148 return 0;
149 caddr = MACRISC_CFA0(dev_fn, offset);
150 } else
151 caddr = MACRISC_CFA1(bus, dev_fn, offset);
152
153 /* Uninorth will return garbage if we don't read back the value ! */
154 do {
155 out_le32(hose->cfg_addr, caddr);
156 } while (in_le32(hose->cfg_addr) != caddr);
157
158 offset &= has_uninorth ? 0x07 : 0x03;
159 return ((unsigned long)hose->cfg_data) + offset;
160}
161
162static int macrisc_read_config(struct pci_bus *bus, unsigned int devfn,
163 int offset, int len, u32 *val)
164{
165 struct pci_controller *hose;
166 unsigned long addr;
167
168 hose = pci_bus_to_host(bus);
169 if (hose == NULL)
170 return PCIBIOS_DEVICE_NOT_FOUND;
171
172 addr = macrisc_cfg_access(hose, bus->number, devfn, offset);
173 if (!addr)
174 return PCIBIOS_DEVICE_NOT_FOUND;
175 /*
176 * Note: the caller has already checked that offset is
177 * suitably aligned and that len is 1, 2 or 4.
178 */
179 switch (len) {
180 case 1:
181 *val = in_8((u8 *)addr);
182 break;
183 case 2:
184 *val = in_le16((u16 *)addr);
185 break;
186 default:
187 *val = in_le32((u32 *)addr);
188 break;
189 }
190 return PCIBIOS_SUCCESSFUL;
191}
192
193static int macrisc_write_config(struct pci_bus *bus, unsigned int devfn,
194 int offset, int len, u32 val)
195{
196 struct pci_controller *hose;
197 unsigned long addr;
198
199 hose = pci_bus_to_host(bus);
200 if (hose == NULL)
201 return PCIBIOS_DEVICE_NOT_FOUND;
202
203 addr = macrisc_cfg_access(hose, bus->number, devfn, offset);
204 if (!addr)
205 return PCIBIOS_DEVICE_NOT_FOUND;
206 /*
207 * Note: the caller has already checked that offset is
208 * suitably aligned and that len is 1, 2 or 4.
209 */
210 switch (len) {
211 case 1:
212 out_8((u8 *)addr, val);
213 (void) in_8((u8 *)addr);
214 break;
215 case 2:
216 out_le16((u16 *)addr, val);
217 (void) in_le16((u16 *)addr);
218 break;
219 default:
220 out_le32((u32 *)addr, val);
221 (void) in_le32((u32 *)addr);
222 break;
223 }
224 return PCIBIOS_SUCCESSFUL;
225}
226
227static struct pci_ops macrisc_pci_ops =
228{
229 macrisc_read_config,
230 macrisc_write_config
231};
232
233#ifdef CONFIG_PPC32
234/*
235 * Verify that a specific (bus, dev_fn) exists on chaos
236 */
237static int
238chaos_validate_dev(struct pci_bus *bus, int devfn, int offset)
239{
240 struct device_node *np;
241 u32 *vendor, *device;
242
243 np = pci_busdev_to_OF_node(bus, devfn);
244 if (np == NULL)
245 return PCIBIOS_DEVICE_NOT_FOUND;
246
247 vendor = (u32 *)get_property(np, "vendor-id", NULL);
248 device = (u32 *)get_property(np, "device-id", NULL);
249 if (vendor == NULL || device == NULL)
250 return PCIBIOS_DEVICE_NOT_FOUND;
251
252 if ((*vendor == 0x106b) && (*device == 3) && (offset >= 0x10)
253 && (offset != 0x14) && (offset != 0x18) && (offset <= 0x24))
254 return PCIBIOS_BAD_REGISTER_NUMBER;
255
256 return PCIBIOS_SUCCESSFUL;
257}
258
259static int
260chaos_read_config(struct pci_bus *bus, unsigned int devfn, int offset,
261 int len, u32 *val)
262{
263 int result = chaos_validate_dev(bus, devfn, offset);
264 if (result == PCIBIOS_BAD_REGISTER_NUMBER)
265 *val = ~0U;
266 if (result != PCIBIOS_SUCCESSFUL)
267 return result;
268 return macrisc_read_config(bus, devfn, offset, len, val);
269}
270
271static int
272chaos_write_config(struct pci_bus *bus, unsigned int devfn, int offset,
273 int len, u32 val)
274{
275 int result = chaos_validate_dev(bus, devfn, offset);
276 if (result != PCIBIOS_SUCCESSFUL)
277 return result;
278 return macrisc_write_config(bus, devfn, offset, len, val);
279}
280
281static struct pci_ops chaos_pci_ops =
282{
283 chaos_read_config,
284 chaos_write_config
285};
286
287static void __init setup_chaos(struct pci_controller *hose,
288 struct reg_property *addr)
289{
290 /* assume a `chaos' bridge */
291 hose->ops = &chaos_pci_ops;
292 hose->cfg_addr = ioremap(addr->address + 0x800000, 0x1000);
293 hose->cfg_data = ioremap(addr->address + 0xc00000, 0x1000);
294}
295#else
296#define setup_chaos(hose, addr)
297#endif /* CONFIG_PPC32 */
298
299#ifdef CONFIG_PPC64
300/*
301 * These versions of U3 HyperTransport config space access ops do not
302 * implement self-view of the HT host yet
303 */
304
305/*
306 * This function deals with some "special cases" devices.
307 *
308 * 0 -> No special case
309 * 1 -> Skip the device but act as if the access was successfull
310 * (return 0xff's on reads, eventually, cache config space
311 * accesses in a later version)
312 * -1 -> Hide the device (unsuccessful acess)
313 */
314static int u3_ht_skip_device(struct pci_controller *hose,
315 struct pci_bus *bus, unsigned int devfn)
316{
317 struct device_node *busdn, *dn;
318 int i;
319
320 /* We only allow config cycles to devices that are in OF device-tree
321 * as we are apparently having some weird things going on with some
322 * revs of K2 on recent G5s
323 */
324 if (bus->self)
325 busdn = pci_device_to_OF_node(bus->self);
326 else
327 busdn = hose->arch_data;
328 for (dn = busdn->child; dn; dn = dn->sibling)
329 if (dn->data && PCI_DN(dn)->devfn == devfn)
330 break;
331 if (dn == NULL)
332 return -1;
333
334 /*
335 * When a device in K2 is powered down, we die on config
336 * cycle accesses. Fix that here.
337 */
338 for (i=0; i<2; i++)
339 if (k2_skiplist[i] == dn)
340 return 1;
341
342 return 0;
343}
344
345#define U3_HT_CFA0(devfn, off) \
346 ((((unsigned long)devfn) << 8) | offset)
347#define U3_HT_CFA1(bus, devfn, off) \
348 (U3_HT_CFA0(devfn, off) \
349 + (((unsigned long)bus) << 16) \
350 + 0x01000000UL)
351
352static unsigned long u3_ht_cfg_access(struct pci_controller* hose,
353 u8 bus, u8 devfn, u8 offset)
354{
355 if (bus == hose->first_busno) {
356 /* For now, we don't self probe U3 HT bridge */
357 if (PCI_SLOT(devfn) == 0)
358 return 0;
359 return ((unsigned long)hose->cfg_data) + U3_HT_CFA0(devfn, offset);
360 } else
361 return ((unsigned long)hose->cfg_data) + U3_HT_CFA1(bus, devfn, offset);
362}
363
364static int u3_ht_read_config(struct pci_bus *bus, unsigned int devfn,
365 int offset, int len, u32 *val)
366{
367 struct pci_controller *hose;
368 unsigned long addr;
369
370 hose = pci_bus_to_host(bus);
371 if (hose == NULL)
372 return PCIBIOS_DEVICE_NOT_FOUND;
373
374 addr = u3_ht_cfg_access(hose, bus->number, devfn, offset);
375 if (!addr)
376 return PCIBIOS_DEVICE_NOT_FOUND;
377
378 switch (u3_ht_skip_device(hose, bus, devfn)) {
379 case 0:
380 break;
381 case 1:
382 switch (len) {
383 case 1:
384 *val = 0xff; break;
385 case 2:
386 *val = 0xffff; break;
387 default:
388 *val = 0xfffffffful; break;
389 }
390 return PCIBIOS_SUCCESSFUL;
391 default:
392 return PCIBIOS_DEVICE_NOT_FOUND;
393 }
394
395 /*
396 * Note: the caller has already checked that offset is
397 * suitably aligned and that len is 1, 2 or 4.
398 */
399 switch (len) {
400 case 1:
401 *val = in_8((u8 *)addr);
402 break;
403 case 2:
404 *val = in_le16((u16 *)addr);
405 break;
406 default:
407 *val = in_le32((u32 *)addr);
408 break;
409 }
410 return PCIBIOS_SUCCESSFUL;
411}
412
413static int u3_ht_write_config(struct pci_bus *bus, unsigned int devfn,
414 int offset, int len, u32 val)
415{
416 struct pci_controller *hose;
417 unsigned long addr;
418
419 hose = pci_bus_to_host(bus);
420 if (hose == NULL)
421 return PCIBIOS_DEVICE_NOT_FOUND;
422
423 addr = u3_ht_cfg_access(hose, bus->number, devfn, offset);
424 if (!addr)
425 return PCIBIOS_DEVICE_NOT_FOUND;
426
427 switch (u3_ht_skip_device(hose, bus, devfn)) {
428 case 0:
429 break;
430 case 1:
431 return PCIBIOS_SUCCESSFUL;
432 default:
433 return PCIBIOS_DEVICE_NOT_FOUND;
434 }
435
436 /*
437 * Note: the caller has already checked that offset is
438 * suitably aligned and that len is 1, 2 or 4.
439 */
440 switch (len) {
441 case 1:
442 out_8((u8 *)addr, val);
443 (void) in_8((u8 *)addr);
444 break;
445 case 2:
446 out_le16((u16 *)addr, val);
447 (void) in_le16((u16 *)addr);
448 break;
449 default:
450 out_le32((u32 *)addr, val);
451 (void) in_le32((u32 *)addr);
452 break;
453 }
454 return PCIBIOS_SUCCESSFUL;
455}
456
457static struct pci_ops u3_ht_pci_ops =
458{
459 u3_ht_read_config,
460 u3_ht_write_config
461};
462#endif /* CONFIG_PPC64 */
463
464#ifdef CONFIG_PPC32
465/*
466 * For a bandit bridge, turn on cache coherency if necessary.
467 * N.B. we could clean this up using the hose ops directly.
468 */
469static void __init init_bandit(struct pci_controller *bp)
470{
471 unsigned int vendev, magic;
472 int rev;
473
474 /* read the word at offset 0 in config space for device 11 */
475 out_le32(bp->cfg_addr, (1UL << BANDIT_DEVNUM) + PCI_VENDOR_ID);
476 udelay(2);
477 vendev = in_le32(bp->cfg_data);
478 if (vendev == (PCI_DEVICE_ID_APPLE_BANDIT << 16) +
479 PCI_VENDOR_ID_APPLE) {
480 /* read the revision id */
481 out_le32(bp->cfg_addr,
482 (1UL << BANDIT_DEVNUM) + PCI_REVISION_ID);
483 udelay(2);
484 rev = in_8(bp->cfg_data);
485 if (rev != BANDIT_REVID)
486 printk(KERN_WARNING
487 "Unknown revision %d for bandit\n", rev);
488 } else if (vendev != (BANDIT_DEVID_2 << 16) + PCI_VENDOR_ID_APPLE) {
489 printk(KERN_WARNING "bandit isn't? (%x)\n", vendev);
490 return;
491 }
492
493 /* read the word at offset 0x50 */
494 out_le32(bp->cfg_addr, (1UL << BANDIT_DEVNUM) + BANDIT_MAGIC);
495 udelay(2);
496 magic = in_le32(bp->cfg_data);
497 if ((magic & BANDIT_COHERENT) != 0)
498 return;
499 magic |= BANDIT_COHERENT;
500 udelay(2);
501 out_le32(bp->cfg_data, magic);
502 printk(KERN_INFO "Cache coherency enabled for bandit/PSX\n");
503}
504
505/*
506 * Tweak the PCI-PCI bridge chip on the blue & white G3s.
507 */
508static void __init init_p2pbridge(void)
509{
510 struct device_node *p2pbridge;
511 struct pci_controller* hose;
512 u8 bus, devfn;
513 u16 val;
514
515 /* XXX it would be better here to identify the specific
516 PCI-PCI bridge chip we have. */
517 if ((p2pbridge = find_devices("pci-bridge")) == 0
518 || p2pbridge->parent == NULL
519 || strcmp(p2pbridge->parent->name, "pci") != 0)
520 return;
521 if (pci_device_from_OF_node(p2pbridge, &bus, &devfn) < 0) {
522 DBG("Can't find PCI infos for PCI<->PCI bridge\n");
523 return;
524 }
525 /* Warning: At this point, we have not yet renumbered all busses.
526 * So we must use OF walking to find out hose
527 */
528 hose = pci_find_hose_for_OF_device(p2pbridge);
529 if (!hose) {
530 DBG("Can't find hose for PCI<->PCI bridge\n");
531 return;
532 }
533 if (early_read_config_word(hose, bus, devfn,
534 PCI_BRIDGE_CONTROL, &val) < 0) {
535 printk(KERN_ERR "init_p2pbridge: couldn't read bridge control\n");
536 return;
537 }
538 val &= ~PCI_BRIDGE_CTL_MASTER_ABORT;
539 early_write_config_word(hose, bus, devfn, PCI_BRIDGE_CONTROL, val);
540}
541
542/*
543 * Some Apple desktop machines have a NEC PD720100A USB2 controller
544 * on the motherboard. Open Firmware, on these, will disable the
545 * EHCI part of it so it behaves like a pair of OHCI's. This fixup
546 * code re-enables it ;)
547 */
548static void __init fixup_nec_usb2(void)
549{
550 struct device_node *nec;
551
552 for (nec = NULL; (nec = of_find_node_by_name(nec, "usb")) != NULL;) {
553 struct pci_controller *hose;
554 u32 data, *prop;
555 u8 bus, devfn;
556
557 prop = (u32 *)get_property(nec, "vendor-id", NULL);
558 if (prop == NULL)
559 continue;
560 if (0x1033 != *prop)
561 continue;
562 prop = (u32 *)get_property(nec, "device-id", NULL);
563 if (prop == NULL)
564 continue;
565 if (0x0035 != *prop)
566 continue;
567 prop = (u32 *)get_property(nec, "reg", NULL);
568 if (prop == NULL)
569 continue;
570 devfn = (prop[0] >> 8) & 0xff;
571 bus = (prop[0] >> 16) & 0xff;
572 if (PCI_FUNC(devfn) != 0)
573 continue;
574 hose = pci_find_hose_for_OF_device(nec);
575 if (!hose)
576 continue;
577 early_read_config_dword(hose, bus, devfn, 0xe4, &data);
578 if (data & 1UL) {
579 printk("Found NEC PD720100A USB2 chip with disabled EHCI, fixing up...\n");
580 data &= ~1UL;
581 early_write_config_dword(hose, bus, devfn, 0xe4, data);
582 early_write_config_byte(hose, bus, devfn | 2, PCI_INTERRUPT_LINE,
583 nec->intrs[0].line);
584 }
585 }
586}
587
588static void __init setup_bandit(struct pci_controller *hose,
589 struct reg_property *addr)
590{
591 hose->ops = &macrisc_pci_ops;
592 hose->cfg_addr = ioremap(addr->address + 0x800000, 0x1000);
593 hose->cfg_data = ioremap(addr->address + 0xc00000, 0x1000);
594 init_bandit(hose);
595}
596
597static int __init setup_uninorth(struct pci_controller *hose,
598 struct reg_property *addr)
599{
600 pci_assign_all_buses = 1;
601 has_uninorth = 1;
602 hose->ops = &macrisc_pci_ops;
603 hose->cfg_addr = ioremap(addr->address + 0x800000, 0x1000);
604 hose->cfg_data = ioremap(addr->address + 0xc00000, 0x1000);
605 /* We "know" that the bridge at f2000000 has the PCI slots. */
606 return addr->address == 0xf2000000;
607}
608#endif
609
610#ifdef CONFIG_PPC64
611static void __init setup_u3_agp(struct pci_controller* hose)
612{
613 /* On G5, we move AGP up to high bus number so we don't need
614 * to reassign bus numbers for HT. If we ever have P2P bridges
615 * on AGP, we'll have to move pci_assign_all_busses to the
616 * pci_controller structure so we enable it for AGP and not for
617 * HT childs.
618 * We hard code the address because of the different size of
619 * the reg address cell, we shall fix that by killing struct
620 * reg_property and using some accessor functions instead
621 */
622 hose->first_busno = 0xf0;
623 hose->last_busno = 0xff;
624 has_uninorth = 1;
625 hose->ops = &macrisc_pci_ops;
626 hose->cfg_addr = ioremap(0xf0000000 + 0x800000, 0x1000);
627 hose->cfg_data = ioremap(0xf0000000 + 0xc00000, 0x1000);
628
629 u3_agp = hose;
630}
631
632static void __init setup_u3_ht(struct pci_controller* hose)
633{
634 struct device_node *np = (struct device_node *)hose->arch_data;
635 int i, cur;
636
637 hose->ops = &u3_ht_pci_ops;
638
639 /* We hard code the address because of the different size of
640 * the reg address cell, we shall fix that by killing struct
641 * reg_property and using some accessor functions instead
642 */
643 hose->cfg_data = (volatile unsigned char *)ioremap(0xf2000000, 0x02000000);
644
645 /*
646 * /ht node doesn't expose a "ranges" property, so we "remove" regions that
647 * have been allocated to AGP. So far, this version of the code doesn't assign
648 * any of the 0xfxxxxxxx "fine" memory regions to /ht.
649 * We need to fix that sooner or later by either parsing all child "ranges"
650 * properties or figuring out the U3 address space decoding logic and
651 * then read its configuration register (if any).
652 */
653 hose->io_base_phys = 0xf4000000;
654 hose->pci_io_size = 0x00400000;
655 hose->io_resource.name = np->full_name;
656 hose->io_resource.start = 0;
657 hose->io_resource.end = 0x003fffff;
658 hose->io_resource.flags = IORESOURCE_IO;
659 hose->pci_mem_offset = 0;
660 hose->first_busno = 0;
661 hose->last_busno = 0xef;
662 hose->mem_resources[0].name = np->full_name;
663 hose->mem_resources[0].start = 0x80000000;
664 hose->mem_resources[0].end = 0xefffffff;
665 hose->mem_resources[0].flags = IORESOURCE_MEM;
666
667 u3_ht = hose;
668
669 if (u3_agp == NULL) {
670 DBG("U3 has no AGP, using full resource range\n");
671 return;
672 }
673
674 /* We "remove" the AGP resources from the resources allocated to HT, that
675 * is we create "holes". However, that code does assumptions that so far
676 * happen to be true (cross fingers...), typically that resources in the
677 * AGP node are properly ordered
678 */
679 cur = 0;
680 for (i=0; i<3; i++) {
681 struct resource *res = &u3_agp->mem_resources[i];
682 if (res->flags != IORESOURCE_MEM)
683 continue;
684 /* We don't care about "fine" resources */
685 if (res->start >= 0xf0000000)
686 continue;
687 /* Check if it's just a matter of "shrinking" us in one direction */
688 if (hose->mem_resources[cur].start == res->start) {
689 DBG("U3/HT: shrink start of %d, %08lx -> %08lx\n",
690 cur, hose->mem_resources[cur].start, res->end + 1);
691 hose->mem_resources[cur].start = res->end + 1;
692 continue;
693 }
694 if (hose->mem_resources[cur].end == res->end) {
695 DBG("U3/HT: shrink end of %d, %08lx -> %08lx\n",
696 cur, hose->mem_resources[cur].end, res->start - 1);
697 hose->mem_resources[cur].end = res->start - 1;
698 continue;
699 }
700 /* No, it's not the case, we need a hole */
701 if (cur == 2) {
702 /* not enough resources for a hole, we drop part of the range */
703 printk(KERN_WARNING "Running out of resources for /ht host !\n");
704 hose->mem_resources[cur].end = res->start - 1;
705 continue;
706 }
707 cur++;
708 DBG("U3/HT: hole, %d end at %08lx, %d start at %08lx\n",
709 cur-1, res->start - 1, cur, res->end + 1);
710 hose->mem_resources[cur].name = np->full_name;
711 hose->mem_resources[cur].flags = IORESOURCE_MEM;
712 hose->mem_resources[cur].start = res->end + 1;
713 hose->mem_resources[cur].end = hose->mem_resources[cur-1].end;
714 hose->mem_resources[cur-1].end = res->start - 1;
715 }
716}
717
718/* XXX this needs to be converged between ppc32 and ppc64... */
719static struct pci_controller * __init pcibios_alloc_controller(void)
720{
721 struct pci_controller *hose;
722
723 hose = alloc_bootmem(sizeof(struct pci_controller));
724 if (hose)
725 pci_setup_pci_controller(hose);
726 return hose;
727}
728#endif
729
730/*
731 * We assume that if we have a G3 powermac, we have one bridge called
732 * "pci" (a MPC106) and no bandit or chaos bridges, and contrariwise,
733 * if we have one or more bandit or chaos bridges, we don't have a MPC106.
734 */
735static int __init add_bridge(struct device_node *dev)
736{
737 int len;
738 struct pci_controller *hose;
739#ifdef CONFIG_PPC32
740 struct reg_property *addr;
741#endif
742 char *disp_name;
743 int *bus_range;
744 int primary = 1;
745
746 DBG("Adding PCI host bridge %s\n", dev->full_name);
747
748#ifdef CONFIG_PPC32
749 /* XXX fix this */
750 addr = (struct reg_property *) get_property(dev, "reg", &len);
751 if (addr == NULL || len < sizeof(*addr)) {
752 printk(KERN_WARNING "Can't use %s: no address\n",
753 dev->full_name);
754 return -ENODEV;
755 }
756#endif
757 bus_range = (int *) get_property(dev, "bus-range", &len);
758 if (bus_range == NULL || len < 2 * sizeof(int)) {
759 printk(KERN_WARNING "Can't get bus-range for %s, assume bus 0\n",
760 dev->full_name);
761 }
762
763 hose = pcibios_alloc_controller();
764 if (!hose)
765 return -ENOMEM;
766 hose->arch_data = dev;
767 hose->first_busno = bus_range ? bus_range[0] : 0;
768 hose->last_busno = bus_range ? bus_range[1] : 0xff;
769
770 disp_name = NULL;
771#ifdef CONFIG_POWER4
772 if (device_is_compatible(dev, "u3-agp")) {
773 setup_u3_agp(hose);
774 disp_name = "U3-AGP";
775 primary = 0;
776 } else if (device_is_compatible(dev, "u3-ht")) {
777 setup_u3_ht(hose);
778 disp_name = "U3-HT";
779 primary = 1;
780 }
781 printk(KERN_INFO "Found %s PCI host bridge. Firmware bus number: %d->%d\n",
782 disp_name, hose->first_busno, hose->last_busno);
783#else
784 if (device_is_compatible(dev, "uni-north")) {
785 primary = setup_uninorth(hose, addr);
786 disp_name = "UniNorth";
787 } else if (strcmp(dev->name, "pci") == 0) {
788 /* XXX assume this is a mpc106 (grackle) */
789 setup_grackle(hose);
790 disp_name = "Grackle (MPC106)";
791 } else if (strcmp(dev->name, "bandit") == 0) {
792 setup_bandit(hose, addr);
793 disp_name = "Bandit";
794 } else if (strcmp(dev->name, "chaos") == 0) {
795 setup_chaos(hose, addr);
796 disp_name = "Chaos";
797 primary = 0;
798 }
799 printk(KERN_INFO "Found %s PCI host bridge at 0x%08lx. Firmware bus number: %d->%d\n",
800 disp_name, addr->address, hose->first_busno, hose->last_busno);
801#endif
802 DBG(" ->Hose at 0x%p, cfg_addr=0x%p,cfg_data=0x%p\n",
803 hose, hose->cfg_addr, hose->cfg_data);
804
805 /* Interpret the "ranges" property */
806 /* This also maps the I/O region and sets isa_io/mem_base */
807 pci_process_bridge_OF_ranges(hose, dev, primary);
808
809 /* Fixup "bus-range" OF property */
810 fixup_bus_range(dev);
811
812 return 0;
813}
814
815static void __init
816pcibios_fixup_OF_interrupts(void)
817{
818 struct pci_dev* dev = NULL;
819
820 /*
821 * Open Firmware often doesn't initialize the
822 * PCI_INTERRUPT_LINE config register properly, so we
823 * should find the device node and apply the interrupt
824 * obtained from the OF device-tree
825 */
826 for_each_pci_dev(dev) {
827 struct device_node *node;
828 node = pci_device_to_OF_node(dev);
829 /* this is the node, see if it has interrupts */
830 if (node && node->n_intrs > 0)
831 dev->irq = node->intrs[0].line;
832 pci_write_config_byte(dev, PCI_INTERRUPT_LINE, dev->irq);
833 }
834}
835
836void __init
837pmac_pcibios_fixup(void)
838{
839 /* Fixup interrupts according to OF tree */
840 pcibios_fixup_OF_interrupts();
841}
842
843#ifdef CONFIG_PPC64
844static void __init pmac_fixup_phb_resources(void)
845{
846 struct pci_controller *hose, *tmp;
847
848 list_for_each_entry_safe(hose, tmp, &hose_list, list_node) {
849 printk(KERN_INFO "PCI Host %d, io start: %lx; io end: %lx\n",
850 hose->global_number,
851 hose->io_resource.start, hose->io_resource.end);
852 }
853}
854#endif
855
856void __init pmac_pci_init(void)
857{
858 struct device_node *np, *root;
859 struct device_node *ht = NULL;
860
861 root = of_find_node_by_path("/");
862 if (root == NULL) {
863 printk(KERN_CRIT "pmac_pci_init: can't find root "
864 "of device tree\n");
865 return;
866 }
867 for (np = NULL; (np = of_get_next_child(root, np)) != NULL;) {
868 if (np->name == NULL)
869 continue;
870 if (strcmp(np->name, "bandit") == 0
871 || strcmp(np->name, "chaos") == 0
872 || strcmp(np->name, "pci") == 0) {
873 if (add_bridge(np) == 0)
874 of_node_get(np);
875 }
876 if (strcmp(np->name, "ht") == 0) {
877 of_node_get(np);
878 ht = np;
879 }
880 }
881 of_node_put(root);
882
883#ifdef CONFIG_PPC64
884 /* Probe HT last as it relies on the agp resources to be already
885 * setup
886 */
887 if (ht && add_bridge(ht) != 0)
888 of_node_put(ht);
889
890 /*
891 * We need to call pci_setup_phb_io for the HT bridge first
892 * so it gets the I/O port numbers starting at 0, and we
893 * need to call it for the AGP bridge after that so it gets
894 * small positive I/O port numbers.
895 */
896 if (u3_ht)
897 pci_setup_phb_io(u3_ht, 1);
898 if (u3_agp)
899 pci_setup_phb_io(u3_agp, 0);
900
901 /*
902 * On ppc64, fixup the IO resources on our host bridges as
903 * the common code does it only for children of the host bridges
904 */
905 pmac_fixup_phb_resources();
906
907 /* Setup the linkage between OF nodes and PHBs */
908 pci_devs_phb_init();
909
910 /* Fixup the PCI<->OF mapping for U3 AGP due to bus renumbering. We
911 * assume there is no P2P bridge on the AGP bus, which should be a
912 * safe assumptions hopefully.
913 */
914 if (u3_agp) {
915 struct device_node *np = u3_agp->arch_data;
916 PCI_DN(np)->busno = 0xf0;
917 for (np = np->child; np; np = np->sibling)
918 PCI_DN(np)->busno = 0xf0;
919 }
920
921 /* map in PCI I/O space */
922 phbs_remap_io();
923
924 /* pmac_check_ht_link(); */
925
926 /* Tell pci.c to not use the common resource allocation mechanism */
927 pci_probe_only = 1;
928
929 /* Allow all IO */
930 io_page_mask = -1;
931
932#else /* CONFIG_PPC64 */
933 init_p2pbridge();
934 fixup_nec_usb2();
935
936 /* We are still having some issues with the Xserve G4, enabling
937 * some offset between bus number and domains for now when we
938 * assign all busses should help for now
939 */
940 if (pci_assign_all_buses)
941 pcibios_assign_bus_offset = 0x10;
942#endif
943}
944
945int
946pmac_pci_enable_device_hook(struct pci_dev *dev, int initial)
947{
948 struct device_node* node;
949 int updatecfg = 0;
950 int uninorth_child;
951
952 node = pci_device_to_OF_node(dev);
953
954 /* We don't want to enable USB controllers absent from the OF tree
955 * (iBook second controller)
956 */
957 if (dev->vendor == PCI_VENDOR_ID_APPLE
958 && (dev->class == ((PCI_CLASS_SERIAL_USB << 8) | 0x10))
959 && !node) {
960 printk(KERN_INFO "Apple USB OHCI %s disabled by firmware\n",
961 pci_name(dev));
962 return -EINVAL;
963 }
964
965 if (!node)
966 return 0;
967
968 uninorth_child = node->parent &&
969 device_is_compatible(node->parent, "uni-north");
970
971 /* Firewire & GMAC were disabled after PCI probe, the driver is
972 * claiming them, we must re-enable them now.
973 */
974 if (uninorth_child && !strcmp(node->name, "firewire") &&
975 (device_is_compatible(node, "pci106b,18") ||
976 device_is_compatible(node, "pci106b,30") ||
977 device_is_compatible(node, "pci11c1,5811"))) {
978 pmac_call_feature(PMAC_FTR_1394_CABLE_POWER, node, 0, 1);
979 pmac_call_feature(PMAC_FTR_1394_ENABLE, node, 0, 1);
980 updatecfg = 1;
981 }
982 if (uninorth_child && !strcmp(node->name, "ethernet") &&
983 device_is_compatible(node, "gmac")) {
984 pmac_call_feature(PMAC_FTR_GMAC_ENABLE, node, 0, 1);
985 updatecfg = 1;
986 }
987
988 if (updatecfg) {
989 u16 cmd;
990
991 /*
992 * Make sure PCI is correctly configured
993 *
994 * We use old pci_bios versions of the function since, by
995 * default, gmac is not powered up, and so will be absent
996 * from the kernel initial PCI lookup.
997 *
998 * Should be replaced by 2.4 new PCI mechanisms and really
999 * register the device.
1000 */
1001 pci_read_config_word(dev, PCI_COMMAND, &cmd);
1002 cmd |= PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER
1003 | PCI_COMMAND_INVALIDATE;
1004 pci_write_config_word(dev, PCI_COMMAND, cmd);
1005 pci_write_config_byte(dev, PCI_LATENCY_TIMER, 16);
1006 pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE,
1007 L1_CACHE_BYTES >> 2);
1008 }
1009
1010 return 0;
1011}
1012
1013/* We power down some devices after they have been probed. They'll
1014 * be powered back on later on
1015 */
1016void __init pmac_pcibios_after_init(void)
1017{
1018 struct device_node* nd;
1019
1020#ifdef CONFIG_BLK_DEV_IDE
1021 struct pci_dev *dev = NULL;
1022
1023 /* OF fails to initialize IDE controllers on macs
1024 * (and maybe other machines)
1025 *
1026 * Ideally, this should be moved to the IDE layer, but we need
1027 * to check specifically with Andre Hedrick how to do it cleanly
1028 * since the common IDE code seem to care about the fact that the
1029 * BIOS may have disabled a controller.
1030 *
1031 * -- BenH
1032 */
1033 for_each_pci_dev(dev) {
1034 if ((dev->class >> 16) == PCI_BASE_CLASS_STORAGE)
1035 pci_enable_device(dev);
1036 }
1037#endif /* CONFIG_BLK_DEV_IDE */
1038
1039 nd = find_devices("firewire");
1040 while (nd) {
1041 if (nd->parent && (device_is_compatible(nd, "pci106b,18") ||
1042 device_is_compatible(nd, "pci106b,30") ||
1043 device_is_compatible(nd, "pci11c1,5811"))
1044 && device_is_compatible(nd->parent, "uni-north")) {
1045 pmac_call_feature(PMAC_FTR_1394_ENABLE, nd, 0, 0);
1046 pmac_call_feature(PMAC_FTR_1394_CABLE_POWER, nd, 0, 0);
1047 }
1048 nd = nd->next;
1049 }
1050 nd = find_devices("ethernet");
1051 while (nd) {
1052 if (nd->parent && device_is_compatible(nd, "gmac")
1053 && device_is_compatible(nd->parent, "uni-north"))
1054 pmac_call_feature(PMAC_FTR_GMAC_ENABLE, nd, 0, 0);
1055 nd = nd->next;
1056 }
1057}
1058
1059#ifdef CONFIG_PPC32
1060void pmac_pci_fixup_cardbus(struct pci_dev* dev)
1061{
1062 if (_machine != _MACH_Pmac)
1063 return;
1064 /*
1065 * Fix the interrupt routing on the various cardbus bridges
1066 * used on powerbooks
1067 */
1068 if (dev->vendor != PCI_VENDOR_ID_TI)
1069 return;
1070 if (dev->device == PCI_DEVICE_ID_TI_1130 ||
1071 dev->device == PCI_DEVICE_ID_TI_1131) {
1072 u8 val;
1073 /* Enable PCI interrupt */
1074 if (pci_read_config_byte(dev, 0x91, &val) == 0)
1075 pci_write_config_byte(dev, 0x91, val | 0x30);
1076 /* Disable ISA interrupt mode */
1077 if (pci_read_config_byte(dev, 0x92, &val) == 0)
1078 pci_write_config_byte(dev, 0x92, val & ~0x06);
1079 }
1080 if (dev->device == PCI_DEVICE_ID_TI_1210 ||
1081 dev->device == PCI_DEVICE_ID_TI_1211 ||
1082 dev->device == PCI_DEVICE_ID_TI_1410 ||
1083 dev->device == PCI_DEVICE_ID_TI_1510) {
1084 u8 val;
1085 /* 0x8c == TI122X_IRQMUX, 2 says to route the INTA
1086 signal out the MFUNC0 pin */
1087 if (pci_read_config_byte(dev, 0x8c, &val) == 0)
1088 pci_write_config_byte(dev, 0x8c, (val & ~0x0f) | 2);
1089 /* Disable ISA interrupt mode */
1090 if (pci_read_config_byte(dev, 0x92, &val) == 0)
1091 pci_write_config_byte(dev, 0x92, val & ~0x06);
1092 }
1093}
1094
1095DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_TI, PCI_ANY_ID, pmac_pci_fixup_cardbus);
1096
1097void pmac_pci_fixup_pciata(struct pci_dev* dev)
1098{
1099 u8 progif = 0;
1100
1101 /*
1102 * On PowerMacs, we try to switch any PCI ATA controller to
1103 * fully native mode
1104 */
1105 if (_machine != _MACH_Pmac)
1106 return;
1107 /* Some controllers don't have the class IDE */
1108 if (dev->vendor == PCI_VENDOR_ID_PROMISE)
1109 switch(dev->device) {
1110 case PCI_DEVICE_ID_PROMISE_20246:
1111 case PCI_DEVICE_ID_PROMISE_20262:
1112 case PCI_DEVICE_ID_PROMISE_20263:
1113 case PCI_DEVICE_ID_PROMISE_20265:
1114 case PCI_DEVICE_ID_PROMISE_20267:
1115 case PCI_DEVICE_ID_PROMISE_20268:
1116 case PCI_DEVICE_ID_PROMISE_20269:
1117 case PCI_DEVICE_ID_PROMISE_20270:
1118 case PCI_DEVICE_ID_PROMISE_20271:
1119 case PCI_DEVICE_ID_PROMISE_20275:
1120 case PCI_DEVICE_ID_PROMISE_20276:
1121 case PCI_DEVICE_ID_PROMISE_20277:
1122 goto good;
1123 }
1124 /* Others, check PCI class */
1125 if ((dev->class >> 8) != PCI_CLASS_STORAGE_IDE)
1126 return;
1127 good:
1128 pci_read_config_byte(dev, PCI_CLASS_PROG, &progif);
1129 if ((progif & 5) != 5) {
1130 printk(KERN_INFO "Forcing PCI IDE into native mode: %s\n", pci_name(dev));
1131 (void) pci_write_config_byte(dev, PCI_CLASS_PROG, progif|5);
1132 if (pci_read_config_byte(dev, PCI_CLASS_PROG, &progif) ||
1133 (progif & 5) != 5)
1134 printk(KERN_ERR "Rewrite of PROGIF failed !\n");
1135 }
1136}
1137DECLARE_PCI_FIXUP_FINAL(PCI_ANY_ID, PCI_ANY_ID, pmac_pci_fixup_pciata);
1138#endif
1139
1140/*
1141 * Disable second function on K2-SATA, it's broken
1142 * and disable IO BARs on first one
1143 */
1144static void fixup_k2_sata(struct pci_dev* dev)
1145{
1146 int i;
1147 u16 cmd;
1148
1149 if (PCI_FUNC(dev->devfn) > 0) {
1150 pci_read_config_word(dev, PCI_COMMAND, &cmd);
1151 cmd &= ~(PCI_COMMAND_IO | PCI_COMMAND_MEMORY);
1152 pci_write_config_word(dev, PCI_COMMAND, cmd);
1153 for (i = 0; i < 6; i++) {
1154 dev->resource[i].start = dev->resource[i].end = 0;
1155 dev->resource[i].flags = 0;
1156 pci_write_config_dword(dev, PCI_BASE_ADDRESS_0 + 4 * i, 0);
1157 }
1158 } else {
1159 pci_read_config_word(dev, PCI_COMMAND, &cmd);
1160 cmd &= ~PCI_COMMAND_IO;
1161 pci_write_config_word(dev, PCI_COMMAND, cmd);
1162 for (i = 0; i < 5; i++) {
1163 dev->resource[i].start = dev->resource[i].end = 0;
1164 dev->resource[i].flags = 0;
1165 pci_write_config_dword(dev, PCI_BASE_ADDRESS_0 + 4 * i, 0);
1166 }
1167 }
1168}
1169DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SERVERWORKS, 0x0240, fixup_k2_sata);
1170
diff --git a/arch/powerpc/platforms/powermac/pic.c b/arch/powerpc/platforms/powermac/pic.c
new file mode 100644
index 000000000000..0037a8c8c81f
--- /dev/null
+++ b/arch/powerpc/platforms/powermac/pic.c
@@ -0,0 +1,678 @@
1/*
2 * Support for the interrupt controllers found on Power Macintosh,
3 * currently Apple's "Grand Central" interrupt controller in all
4 * it's incarnations. OpenPIC support used on newer machines is
5 * in a separate file
6 *
7 * Copyright (C) 1997 Paul Mackerras (paulus@samba.org)
8 *
9 * Maintained by Benjamin Herrenschmidt (benh@kernel.crashing.org)
10 *
11 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License
13 * as published by the Free Software Foundation; either version
14 * 2 of the License, or (at your option) any later version.
15 *
16 */
17
18#include <linux/config.h>
19#include <linux/stddef.h>
20#include <linux/init.h>
21#include <linux/sched.h>
22#include <linux/signal.h>
23#include <linux/pci.h>
24#include <linux/interrupt.h>
25#include <linux/sysdev.h>
26#include <linux/adb.h>
27#include <linux/pmu.h>
28#include <linux/module.h>
29
30#include <asm/sections.h>
31#include <asm/io.h>
32#include <asm/smp.h>
33#include <asm/prom.h>
34#include <asm/pci-bridge.h>
35#include <asm/time.h>
36#include <asm/pmac_feature.h>
37#include <asm/mpic.h>
38
39#include "pmac.h"
40
41/*
42 * XXX this should be in xmon.h, but putting it there means xmon.h
43 * has to include <linux/interrupt.h> (to get irqreturn_t), which
44 * causes all sorts of problems. -- paulus
45 */
46extern irqreturn_t xmon_irq(int, void *, struct pt_regs *);
47
48#ifdef CONFIG_PPC32
49struct pmac_irq_hw {
50 unsigned int event;
51 unsigned int enable;
52 unsigned int ack;
53 unsigned int level;
54};
55
56/* Default addresses */
57static volatile struct pmac_irq_hw *pmac_irq_hw[4] = {
58 (struct pmac_irq_hw *) 0xf3000020,
59 (struct pmac_irq_hw *) 0xf3000010,
60 (struct pmac_irq_hw *) 0xf4000020,
61 (struct pmac_irq_hw *) 0xf4000010,
62};
63
64#define GC_LEVEL_MASK 0x3ff00000
65#define OHARE_LEVEL_MASK 0x1ff00000
66#define HEATHROW_LEVEL_MASK 0x1ff00000
67
68static int max_irqs;
69static int max_real_irqs;
70static u32 level_mask[4];
71
72static DEFINE_SPINLOCK(pmac_pic_lock);
73
74#define GATWICK_IRQ_POOL_SIZE 10
75static struct interrupt_info gatwick_int_pool[GATWICK_IRQ_POOL_SIZE];
76
77/*
78 * Mark an irq as "lost". This is only used on the pmac
79 * since it can lose interrupts (see pmac_set_irq_mask).
80 * -- Cort
81 */
82void
83__set_lost(unsigned long irq_nr, int nokick)
84{
85 if (!test_and_set_bit(irq_nr, ppc_lost_interrupts)) {
86 atomic_inc(&ppc_n_lost_interrupts);
87 if (!nokick)
88 set_dec(1);
89 }
90}
91
92static void
93pmac_mask_and_ack_irq(unsigned int irq_nr)
94{
95 unsigned long bit = 1UL << (irq_nr & 0x1f);
96 int i = irq_nr >> 5;
97 unsigned long flags;
98
99 if ((unsigned)irq_nr >= max_irqs)
100 return;
101
102 clear_bit(irq_nr, ppc_cached_irq_mask);
103 if (test_and_clear_bit(irq_nr, ppc_lost_interrupts))
104 atomic_dec(&ppc_n_lost_interrupts);
105 spin_lock_irqsave(&pmac_pic_lock, flags);
106 out_le32(&pmac_irq_hw[i]->enable, ppc_cached_irq_mask[i]);
107 out_le32(&pmac_irq_hw[i]->ack, bit);
108 do {
109 /* make sure ack gets to controller before we enable
110 interrupts */
111 mb();
112 } while((in_le32(&pmac_irq_hw[i]->enable) & bit)
113 != (ppc_cached_irq_mask[i] & bit));
114 spin_unlock_irqrestore(&pmac_pic_lock, flags);
115}
116
117static void pmac_set_irq_mask(unsigned int irq_nr, int nokicklost)
118{
119 unsigned long bit = 1UL << (irq_nr & 0x1f);
120 int i = irq_nr >> 5;
121 unsigned long flags;
122
123 if ((unsigned)irq_nr >= max_irqs)
124 return;
125
126 spin_lock_irqsave(&pmac_pic_lock, flags);
127 /* enable unmasked interrupts */
128 out_le32(&pmac_irq_hw[i]->enable, ppc_cached_irq_mask[i]);
129
130 do {
131 /* make sure mask gets to controller before we
132 return to user */
133 mb();
134 } while((in_le32(&pmac_irq_hw[i]->enable) & bit)
135 != (ppc_cached_irq_mask[i] & bit));
136
137 /*
138 * Unfortunately, setting the bit in the enable register
139 * when the device interrupt is already on *doesn't* set
140 * the bit in the flag register or request another interrupt.
141 */
142 if (bit & ppc_cached_irq_mask[i] & in_le32(&pmac_irq_hw[i]->level))
143 __set_lost((ulong)irq_nr, nokicklost);
144 spin_unlock_irqrestore(&pmac_pic_lock, flags);
145}
146
147/* When an irq gets requested for the first client, if it's an
148 * edge interrupt, we clear any previous one on the controller
149 */
150static unsigned int pmac_startup_irq(unsigned int irq_nr)
151{
152 unsigned long bit = 1UL << (irq_nr & 0x1f);
153 int i = irq_nr >> 5;
154
155 if ((irq_desc[irq_nr].status & IRQ_LEVEL) == 0)
156 out_le32(&pmac_irq_hw[i]->ack, bit);
157 set_bit(irq_nr, ppc_cached_irq_mask);
158 pmac_set_irq_mask(irq_nr, 0);
159
160 return 0;
161}
162
163static void pmac_mask_irq(unsigned int irq_nr)
164{
165 clear_bit(irq_nr, ppc_cached_irq_mask);
166 pmac_set_irq_mask(irq_nr, 0);
167 mb();
168}
169
170static void pmac_unmask_irq(unsigned int irq_nr)
171{
172 set_bit(irq_nr, ppc_cached_irq_mask);
173 pmac_set_irq_mask(irq_nr, 0);
174}
175
176static void pmac_end_irq(unsigned int irq_nr)
177{
178 if (!(irq_desc[irq_nr].status & (IRQ_DISABLED|IRQ_INPROGRESS))
179 && irq_desc[irq_nr].action) {
180 set_bit(irq_nr, ppc_cached_irq_mask);
181 pmac_set_irq_mask(irq_nr, 1);
182 }
183}
184
185
186struct hw_interrupt_type pmac_pic = {
187 .typename = " PMAC-PIC ",
188 .startup = pmac_startup_irq,
189 .enable = pmac_unmask_irq,
190 .disable = pmac_mask_irq,
191 .ack = pmac_mask_and_ack_irq,
192 .end = pmac_end_irq,
193};
194
195struct hw_interrupt_type gatwick_pic = {
196 .typename = " GATWICK ",
197 .startup = pmac_startup_irq,
198 .enable = pmac_unmask_irq,
199 .disable = pmac_mask_irq,
200 .ack = pmac_mask_and_ack_irq,
201 .end = pmac_end_irq,
202};
203
204static irqreturn_t gatwick_action(int cpl, void *dev_id, struct pt_regs *regs)
205{
206 int irq, bits;
207
208 for (irq = max_irqs; (irq -= 32) >= max_real_irqs; ) {
209 int i = irq >> 5;
210 bits = in_le32(&pmac_irq_hw[i]->event) | ppc_lost_interrupts[i];
211 /* We must read level interrupts from the level register */
212 bits |= (in_le32(&pmac_irq_hw[i]->level) & level_mask[i]);
213 bits &= ppc_cached_irq_mask[i];
214 if (bits == 0)
215 continue;
216 irq += __ilog2(bits);
217 __do_IRQ(irq, regs);
218 return IRQ_HANDLED;
219 }
220 printk("gatwick irq not from gatwick pic\n");
221 return IRQ_NONE;
222}
223
224int
225pmac_get_irq(struct pt_regs *regs)
226{
227 int irq;
228 unsigned long bits = 0;
229
230#ifdef CONFIG_SMP
231 void psurge_smp_message_recv(struct pt_regs *);
232
233 /* IPI's are a hack on the powersurge -- Cort */
234 if ( smp_processor_id() != 0 ) {
235 psurge_smp_message_recv(regs);
236 return -2; /* ignore, already handled */
237 }
238#endif /* CONFIG_SMP */
239 for (irq = max_real_irqs; (irq -= 32) >= 0; ) {
240 int i = irq >> 5;
241 bits = in_le32(&pmac_irq_hw[i]->event) | ppc_lost_interrupts[i];
242 /* We must read level interrupts from the level register */
243 bits |= (in_le32(&pmac_irq_hw[i]->level) & level_mask[i]);
244 bits &= ppc_cached_irq_mask[i];
245 if (bits == 0)
246 continue;
247 irq += __ilog2(bits);
248 break;
249 }
250
251 return irq;
252}
253
254/* This routine will fix some missing interrupt values in the device tree
255 * on the gatwick mac-io controller used by some PowerBooks
256 */
257static void __init
258pmac_fix_gatwick_interrupts(struct device_node *gw, int irq_base)
259{
260 struct device_node *node;
261 int count;
262
263 memset(gatwick_int_pool, 0, sizeof(gatwick_int_pool));
264 node = gw->child;
265 count = 0;
266 while(node)
267 {
268 /* Fix SCC */
269 if (strcasecmp(node->name, "escc") == 0)
270 if (node->child) {
271 if (node->child->n_intrs < 3) {
272 node->child->intrs = &gatwick_int_pool[count];
273 count += 3;
274 }
275 node->child->n_intrs = 3;
276 node->child->intrs[0].line = 15+irq_base;
277 node->child->intrs[1].line = 4+irq_base;
278 node->child->intrs[2].line = 5+irq_base;
279 printk(KERN_INFO "irq: fixed SCC on second controller (%d,%d,%d)\n",
280 node->child->intrs[0].line,
281 node->child->intrs[1].line,
282 node->child->intrs[2].line);
283 }
284 /* Fix media-bay & left SWIM */
285 if (strcasecmp(node->name, "media-bay") == 0) {
286 struct device_node* ya_node;
287
288 if (node->n_intrs == 0)
289 node->intrs = &gatwick_int_pool[count++];
290 node->n_intrs = 1;
291 node->intrs[0].line = 29+irq_base;
292 printk(KERN_INFO "irq: fixed media-bay on second controller (%d)\n",
293 node->intrs[0].line);
294
295 ya_node = node->child;
296 while(ya_node)
297 {
298 if (strcasecmp(ya_node->name, "floppy") == 0) {
299 if (ya_node->n_intrs < 2) {
300 ya_node->intrs = &gatwick_int_pool[count];
301 count += 2;
302 }
303 ya_node->n_intrs = 2;
304 ya_node->intrs[0].line = 19+irq_base;
305 ya_node->intrs[1].line = 1+irq_base;
306 printk(KERN_INFO "irq: fixed floppy on second controller (%d,%d)\n",
307 ya_node->intrs[0].line, ya_node->intrs[1].line);
308 }
309 if (strcasecmp(ya_node->name, "ata4") == 0) {
310 if (ya_node->n_intrs < 2) {
311 ya_node->intrs = &gatwick_int_pool[count];
312 count += 2;
313 }
314 ya_node->n_intrs = 2;
315 ya_node->intrs[0].line = 14+irq_base;
316 ya_node->intrs[1].line = 3+irq_base;
317 printk(KERN_INFO "irq: fixed ide on second controller (%d,%d)\n",
318 ya_node->intrs[0].line, ya_node->intrs[1].line);
319 }
320 ya_node = ya_node->sibling;
321 }
322 }
323 node = node->sibling;
324 }
325 if (count > 10) {
326 printk("WARNING !! Gatwick interrupt pool overflow\n");
327 printk(" GATWICK_IRQ_POOL_SIZE = %d\n", GATWICK_IRQ_POOL_SIZE);
328 printk(" requested = %d\n", count);
329 }
330}
331
332/*
333 * The PowerBook 3400/2400/3500 can have a combo ethernet/modem
334 * card which includes an ohare chip that acts as a second interrupt
335 * controller. If we find this second ohare, set it up and fix the
336 * interrupt value in the device tree for the ethernet chip.
337 */
338static int __init enable_second_ohare(void)
339{
340 unsigned char bus, devfn;
341 unsigned short cmd;
342 unsigned long addr;
343 struct device_node *irqctrler = find_devices("pci106b,7");
344 struct device_node *ether;
345
346 if (irqctrler == NULL || irqctrler->n_addrs <= 0)
347 return -1;
348 addr = (unsigned long) ioremap(irqctrler->addrs[0].address, 0x40);
349 pmac_irq_hw[1] = (volatile struct pmac_irq_hw *)(addr + 0x20);
350 max_irqs = 64;
351 if (pci_device_from_OF_node(irqctrler, &bus, &devfn) == 0) {
352 struct pci_controller* hose = pci_find_hose_for_OF_device(irqctrler);
353 if (!hose)
354 printk(KERN_ERR "Can't find PCI hose for OHare2 !\n");
355 else {
356 early_read_config_word(hose, bus, devfn, PCI_COMMAND, &cmd);
357 cmd |= PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER;
358 cmd &= ~PCI_COMMAND_IO;
359 early_write_config_word(hose, bus, devfn, PCI_COMMAND, cmd);
360 }
361 }
362
363 /* Fix interrupt for the modem/ethernet combo controller. The number
364 in the device tree (27) is bogus (correct for the ethernet-only
365 board but not the combo ethernet/modem board).
366 The real interrupt is 28 on the second controller -> 28+32 = 60.
367 */
368 ether = find_devices("pci1011,14");
369 if (ether && ether->n_intrs > 0) {
370 ether->intrs[0].line = 60;
371 printk(KERN_INFO "irq: Fixed ethernet IRQ to %d\n",
372 ether->intrs[0].line);
373 }
374
375 /* Return the interrupt number of the cascade */
376 return irqctrler->intrs[0].line;
377}
378
379#ifdef CONFIG_XMON
380static struct irqaction xmon_action = {
381 .handler = xmon_irq,
382 .flags = 0,
383 .mask = CPU_MASK_NONE,
384 .name = "NMI - XMON"
385};
386#endif
387
388static struct irqaction gatwick_cascade_action = {
389 .handler = gatwick_action,
390 .flags = SA_INTERRUPT,
391 .mask = CPU_MASK_NONE,
392 .name = "cascade",
393};
394#endif /* CONFIG_PPC32 */
395
396static int pmac_u3_cascade(struct pt_regs *regs, void *data)
397{
398 return mpic_get_one_irq((struct mpic *)data, regs);
399}
400
401void __init pmac_pic_init(void)
402{
403 struct device_node *irqctrler = NULL;
404 struct device_node *irqctrler2 = NULL;
405 struct device_node *np;
406#ifdef CONFIG_PPC32
407 int i;
408 unsigned long addr;
409 int irq_cascade = -1;
410#endif
411 struct mpic *mpic1, *mpic2;
412
413 /* We first try to detect Apple's new Core99 chipset, since mac-io
414 * is quite different on those machines and contains an IBM MPIC2.
415 */
416 np = find_type_devices("open-pic");
417 while (np) {
418 if (np->parent && !strcmp(np->parent->name, "u3"))
419 irqctrler2 = np;
420 else
421 irqctrler = np;
422 np = np->next;
423 }
424 if (irqctrler != NULL && irqctrler->n_addrs > 0) {
425 unsigned char senses[128];
426
427 printk(KERN_INFO "PowerMac using OpenPIC irq controller at 0x%08x\n",
428 (unsigned int)irqctrler->addrs[0].address);
429 pmac_call_feature(PMAC_FTR_ENABLE_MPIC, irqctrler, 0, 0);
430
431 prom_get_irq_senses(senses, 0, 128);
432 mpic1 = mpic_alloc(irqctrler->addrs[0].address,
433 MPIC_PRIMARY | MPIC_WANTS_RESET,
434 0, 0, 128, 252, senses, 128, " OpenPIC ");
435 BUG_ON(mpic1 == NULL);
436 mpic_init(mpic1);
437
438 if (irqctrler2 != NULL && irqctrler2->n_intrs > 0 &&
439 irqctrler2->n_addrs > 0) {
440 printk(KERN_INFO "Slave OpenPIC at 0x%08x hooked on IRQ %d\n",
441 (u32)irqctrler2->addrs[0].address,
442 irqctrler2->intrs[0].line);
443
444 pmac_call_feature(PMAC_FTR_ENABLE_MPIC, irqctrler2, 0, 0);
445 prom_get_irq_senses(senses, 128, 128 + 124);
446
447 /* We don't need to set MPIC_BROKEN_U3 here since we don't have
448 * hypertransport interrupts routed to it
449 */
450 mpic2 = mpic_alloc(irqctrler2->addrs[0].address,
451 MPIC_BIG_ENDIAN | MPIC_WANTS_RESET,
452 0, 128, 124, 0, senses, 124,
453 " U3-MPIC ");
454 BUG_ON(mpic2 == NULL);
455 mpic_init(mpic2);
456 mpic_setup_cascade(irqctrler2->intrs[0].line,
457 pmac_u3_cascade, mpic2);
458 }
459#if defined(CONFIG_XMON) && defined(CONFIG_PPC32)
460 {
461 struct device_node* pswitch;
462 int nmi_irq;
463
464 pswitch = find_devices("programmer-switch");
465 if (pswitch && pswitch->n_intrs) {
466 nmi_irq = pswitch->intrs[0].line;
467 mpic_irq_set_priority(nmi_irq, 9);
468 setup_irq(nmi_irq, &xmon_action);
469 }
470 }
471#endif /* CONFIG_XMON */
472 return;
473 }
474 irqctrler = NULL;
475
476#ifdef CONFIG_PPC32
477 /* Get the level/edge settings, assume if it's not
478 * a Grand Central nor an OHare, then it's an Heathrow
479 * (or Paddington).
480 */
481 ppc_md.get_irq = pmac_get_irq;
482 if (find_devices("gc"))
483 level_mask[0] = GC_LEVEL_MASK;
484 else if (find_devices("ohare")) {
485 level_mask[0] = OHARE_LEVEL_MASK;
486 /* We might have a second cascaded ohare */
487 level_mask[1] = OHARE_LEVEL_MASK;
488 } else {
489 level_mask[0] = HEATHROW_LEVEL_MASK;
490 level_mask[1] = 0;
491 /* We might have a second cascaded heathrow */
492 level_mask[2] = HEATHROW_LEVEL_MASK;
493 level_mask[3] = 0;
494 }
495
496 /*
497 * G3 powermacs and 1999 G3 PowerBooks have 64 interrupts,
498 * 1998 G3 Series PowerBooks have 128,
499 * other powermacs have 32.
500 * The combo ethernet/modem card for the Powerstar powerbooks
501 * (2400/3400/3500, ohare based) has a second ohare chip
502 * effectively making a total of 64.
503 */
504 max_irqs = max_real_irqs = 32;
505 irqctrler = find_devices("mac-io");
506 if (irqctrler)
507 {
508 max_real_irqs = 64;
509 if (irqctrler->next)
510 max_irqs = 128;
511 else
512 max_irqs = 64;
513 }
514 for ( i = 0; i < max_real_irqs ; i++ )
515 irq_desc[i].handler = &pmac_pic;
516
517 /* get addresses of first controller */
518 if (irqctrler) {
519 if (irqctrler->n_addrs > 0) {
520 addr = (unsigned long)
521 ioremap(irqctrler->addrs[0].address, 0x40);
522 for (i = 0; i < 2; ++i)
523 pmac_irq_hw[i] = (volatile struct pmac_irq_hw*)
524 (addr + (2 - i) * 0x10);
525 }
526
527 /* get addresses of second controller */
528 irqctrler = irqctrler->next;
529 if (irqctrler && irqctrler->n_addrs > 0) {
530 addr = (unsigned long)
531 ioremap(irqctrler->addrs[0].address, 0x40);
532 for (i = 2; i < 4; ++i)
533 pmac_irq_hw[i] = (volatile struct pmac_irq_hw*)
534 (addr + (4 - i) * 0x10);
535 irq_cascade = irqctrler->intrs[0].line;
536 if (device_is_compatible(irqctrler, "gatwick"))
537 pmac_fix_gatwick_interrupts(irqctrler, max_real_irqs);
538 }
539 } else {
540 /* older powermacs have a GC (grand central) or ohare at
541 f3000000, with interrupt control registers at f3000020. */
542 addr = (unsigned long) ioremap(0xf3000000, 0x40);
543 pmac_irq_hw[0] = (volatile struct pmac_irq_hw *) (addr + 0x20);
544 }
545
546 /* PowerBooks 3400 and 3500 can have a second controller in a second
547 ohare chip, on the combo ethernet/modem card */
548 if (machine_is_compatible("AAPL,3400/2400")
549 || machine_is_compatible("AAPL,3500"))
550 irq_cascade = enable_second_ohare();
551
552 /* disable all interrupts in all controllers */
553 for (i = 0; i * 32 < max_irqs; ++i)
554 out_le32(&pmac_irq_hw[i]->enable, 0);
555 /* mark level interrupts */
556 for (i = 0; i < max_irqs; i++)
557 if (level_mask[i >> 5] & (1UL << (i & 0x1f)))
558 irq_desc[i].status = IRQ_LEVEL;
559
560 /* get interrupt line of secondary interrupt controller */
561 if (irq_cascade >= 0) {
562 printk(KERN_INFO "irq: secondary controller on irq %d\n",
563 (int)irq_cascade);
564 for ( i = max_real_irqs ; i < max_irqs ; i++ )
565 irq_desc[i].handler = &gatwick_pic;
566 setup_irq(irq_cascade, &gatwick_cascade_action);
567 }
568 printk("System has %d possible interrupts\n", max_irqs);
569 if (max_irqs != max_real_irqs)
570 printk(KERN_DEBUG "%d interrupts on main controller\n",
571 max_real_irqs);
572
573#ifdef CONFIG_XMON
574 setup_irq(20, &xmon_action);
575#endif /* CONFIG_XMON */
576#endif /* CONFIG_PPC32 */
577}
578
579#ifdef CONFIG_PM
580/*
581 * These procedures are used in implementing sleep on the powerbooks.
582 * sleep_save_intrs() saves the states of all interrupt enables
583 * and disables all interrupts except for the nominated one.
584 * sleep_restore_intrs() restores the states of all interrupt enables.
585 */
586unsigned long sleep_save_mask[2];
587
588/* This used to be passed by the PMU driver but that link got
589 * broken with the new driver model. We use this tweak for now...
590 */
591static int pmacpic_find_viaint(void)
592{
593 int viaint = -1;
594
595#ifdef CONFIG_ADB_PMU
596 struct device_node *np;
597
598 if (pmu_get_model() != PMU_OHARE_BASED)
599 goto not_found;
600 np = of_find_node_by_name(NULL, "via-pmu");
601 if (np == NULL)
602 goto not_found;
603 viaint = np->intrs[0].line;
604#endif /* CONFIG_ADB_PMU */
605
606not_found:
607 return viaint;
608}
609
610static int pmacpic_suspend(struct sys_device *sysdev, pm_message_t state)
611{
612 int viaint = pmacpic_find_viaint();
613
614 sleep_save_mask[0] = ppc_cached_irq_mask[0];
615 sleep_save_mask[1] = ppc_cached_irq_mask[1];
616 ppc_cached_irq_mask[0] = 0;
617 ppc_cached_irq_mask[1] = 0;
618 if (viaint > 0)
619 set_bit(viaint, ppc_cached_irq_mask);
620 out_le32(&pmac_irq_hw[0]->enable, ppc_cached_irq_mask[0]);
621 if (max_real_irqs > 32)
622 out_le32(&pmac_irq_hw[1]->enable, ppc_cached_irq_mask[1]);
623 (void)in_le32(&pmac_irq_hw[0]->event);
624 /* make sure mask gets to controller before we return to caller */
625 mb();
626 (void)in_le32(&pmac_irq_hw[0]->enable);
627
628 return 0;
629}
630
631static int pmacpic_resume(struct sys_device *sysdev)
632{
633 int i;
634
635 out_le32(&pmac_irq_hw[0]->enable, 0);
636 if (max_real_irqs > 32)
637 out_le32(&pmac_irq_hw[1]->enable, 0);
638 mb();
639 for (i = 0; i < max_real_irqs; ++i)
640 if (test_bit(i, sleep_save_mask))
641 pmac_unmask_irq(i);
642
643 return 0;
644}
645
646#endif /* CONFIG_PM */
647
648static struct sysdev_class pmacpic_sysclass = {
649 set_kset_name("pmac_pic"),
650};
651
652static struct sys_device device_pmacpic = {
653 .id = 0,
654 .cls = &pmacpic_sysclass,
655};
656
657static struct sysdev_driver driver_pmacpic = {
658#ifdef CONFIG_PM
659 .suspend = &pmacpic_suspend,
660 .resume = &pmacpic_resume,
661#endif /* CONFIG_PM */
662};
663
664static int __init init_pmacpic_sysfs(void)
665{
666#ifdef CONFIG_PPC32
667 if (max_irqs == 0)
668 return -ENODEV;
669#endif
670 printk(KERN_DEBUG "Registering pmac pic with sysfs...\n");
671 sysdev_class_register(&pmacpic_sysclass);
672 sysdev_register(&device_pmacpic);
673 sysdev_driver_register(&pmacpic_sysclass, &driver_pmacpic);
674 return 0;
675}
676
677subsys_initcall(init_pmacpic_sysfs);
678
diff --git a/arch/powerpc/platforms/powermac/pic.h b/arch/powerpc/platforms/powermac/pic.h
new file mode 100644
index 000000000000..664103dfeef9
--- /dev/null
+++ b/arch/powerpc/platforms/powermac/pic.h
@@ -0,0 +1,11 @@
1#ifndef __PPC_PLATFORMS_PMAC_PIC_H
2#define __PPC_PLATFORMS_PMAC_PIC_H
3
4#include <linux/irq.h>
5
6extern struct hw_interrupt_type pmac_pic;
7
8void pmac_pic_init(void);
9int pmac_get_irq(struct pt_regs *regs);
10
11#endif /* __PPC_PLATFORMS_PMAC_PIC_H */
diff --git a/arch/powerpc/platforms/powermac/pmac.h b/arch/powerpc/platforms/powermac/pmac.h
new file mode 100644
index 000000000000..2ad25e13423e
--- /dev/null
+++ b/arch/powerpc/platforms/powermac/pmac.h
@@ -0,0 +1,51 @@
1#ifndef __PMAC_H__
2#define __PMAC_H__
3
4#include <linux/pci.h>
5#include <linux/ide.h>
6#include <linux/irq.h>
7
8/*
9 * Declaration for the various functions exported by the
10 * pmac_* files. Mostly for use by pmac_setup
11 */
12
13struct rtc_time;
14
15extern long pmac_time_init(void);
16extern unsigned long pmac_get_boot_time(void);
17extern void pmac_get_rtc_time(struct rtc_time *);
18extern int pmac_set_rtc_time(struct rtc_time *);
19extern void pmac_read_rtc_time(void);
20extern void pmac_calibrate_decr(void);
21extern void pmac_pcibios_fixup(void);
22extern void pmac_pci_init(void);
23extern unsigned long pmac_ide_get_base(int index);
24extern void pmac_ide_init_hwif_ports(hw_regs_t *hw,
25 unsigned long data_port, unsigned long ctrl_port, int *irq);
26
27extern void pmac_nvram_update(void);
28extern unsigned char pmac_nvram_read_byte(int addr);
29extern void pmac_nvram_write_byte(int addr, unsigned char val);
30extern int pmac_pci_enable_device_hook(struct pci_dev *dev, int initial);
31extern void pmac_pcibios_after_init(void);
32extern int of_show_percpuinfo(struct seq_file *m, int i);
33
34extern void pmac_pci_init(void);
35extern void pmac_setup_pci_dma(void);
36extern void pmac_check_ht_link(void);
37
38extern void pmac_setup_smp(void);
39
40extern unsigned long pmac_ide_get_base(int index);
41extern void pmac_ide_init_hwif_ports(hw_regs_t *hw,
42 unsigned long data_port, unsigned long ctrl_port, int *irq);
43
44extern int pmac_nvram_init(void);
45
46extern struct hw_interrupt_type pmac_pic;
47
48void pmac_pic_init(void);
49int pmac_get_irq(struct pt_regs *regs);
50
51#endif /* __PMAC_H__ */
diff --git a/arch/powerpc/platforms/powermac/setup.c b/arch/powerpc/platforms/powermac/setup.c
new file mode 100644
index 000000000000..6f62af597291
--- /dev/null
+++ b/arch/powerpc/platforms/powermac/setup.c
@@ -0,0 +1,794 @@
1/*
2 * Powermac setup and early boot code plus other random bits.
3 *
4 * PowerPC version
5 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
6 *
7 * Adapted for Power Macintosh by Paul Mackerras
8 * Copyright (C) 1996 Paul Mackerras (paulus@samba.org)
9 *
10 * Derived from "arch/alpha/kernel/setup.c"
11 * Copyright (C) 1995 Linus Torvalds
12 *
13 * Maintained by Benjamin Herrenschmidt (benh@kernel.crashing.org)
14 *
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License
17 * as published by the Free Software Foundation; either version
18 * 2 of the License, or (at your option) any later version.
19 *
20 */
21
22/*
23 * bootup setup stuff..
24 */
25
26#include <linux/config.h>
27#include <linux/init.h>
28#include <linux/errno.h>
29#include <linux/sched.h>
30#include <linux/kernel.h>
31#include <linux/mm.h>
32#include <linux/stddef.h>
33#include <linux/unistd.h>
34#include <linux/ptrace.h>
35#include <linux/slab.h>
36#include <linux/user.h>
37#include <linux/a.out.h>
38#include <linux/tty.h>
39#include <linux/string.h>
40#include <linux/delay.h>
41#include <linux/ioport.h>
42#include <linux/major.h>
43#include <linux/initrd.h>
44#include <linux/vt_kern.h>
45#include <linux/console.h>
46#include <linux/ide.h>
47#include <linux/pci.h>
48#include <linux/adb.h>
49#include <linux/cuda.h>
50#include <linux/pmu.h>
51#include <linux/irq.h>
52#include <linux/seq_file.h>
53#include <linux/root_dev.h>
54#include <linux/bitops.h>
55#include <linux/suspend.h>
56
57#include <asm/reg.h>
58#include <asm/sections.h>
59#include <asm/prom.h>
60#include <asm/system.h>
61#include <asm/pgtable.h>
62#include <asm/io.h>
63#include <asm/pci-bridge.h>
64#include <asm/ohare.h>
65#include <asm/mediabay.h>
66#include <asm/machdep.h>
67#include <asm/dma.h>
68#include <asm/cputable.h>
69#include <asm/btext.h>
70#include <asm/pmac_feature.h>
71#include <asm/time.h>
72#include <asm/of_device.h>
73#include <asm/mmu_context.h>
74#include <asm/iommu.h>
75#include <asm/smu.h>
76#include <asm/pmc.h>
77#include <asm/mpic.h>
78
79#include "pmac.h"
80
81#undef SHOW_GATWICK_IRQS
82
83unsigned char drive_info;
84
85int ppc_override_l2cr = 0;
86int ppc_override_l2cr_value;
87int has_l2cache = 0;
88
89int pmac_newworld = 1;
90
91static int current_root_goodness = -1;
92
93extern int pmac_newworld;
94extern struct machdep_calls pmac_md;
95
96#define DEFAULT_ROOT_DEVICE Root_SDA1 /* sda1 - slightly silly choice */
97
98#ifdef CONFIG_PPC64
99#include <asm/udbg.h>
100int sccdbg;
101#endif
102
103extern void zs_kgdb_hook(int tty_num);
104
105sys_ctrler_t sys_ctrler = SYS_CTRLER_UNKNOWN;
106EXPORT_SYMBOL(sys_ctrler);
107
108#ifdef CONFIG_PMAC_SMU
109unsigned long smu_cmdbuf_abs;
110EXPORT_SYMBOL(smu_cmdbuf_abs);
111#endif
112
113#ifdef CONFIG_SMP
114extern struct smp_ops_t psurge_smp_ops;
115extern struct smp_ops_t core99_smp_ops;
116#endif /* CONFIG_SMP */
117
118static void pmac_show_cpuinfo(struct seq_file *m)
119{
120 struct device_node *np;
121 char *pp;
122 int plen;
123 int mbmodel;
124 unsigned int mbflags;
125 char* mbname;
126
127 mbmodel = pmac_call_feature(PMAC_FTR_GET_MB_INFO, NULL,
128 PMAC_MB_INFO_MODEL, 0);
129 mbflags = pmac_call_feature(PMAC_FTR_GET_MB_INFO, NULL,
130 PMAC_MB_INFO_FLAGS, 0);
131 if (pmac_call_feature(PMAC_FTR_GET_MB_INFO, NULL, PMAC_MB_INFO_NAME,
132 (long) &mbname) != 0)
133 mbname = "Unknown";
134
135 /* find motherboard type */
136 seq_printf(m, "machine\t\t: ");
137 np = of_find_node_by_path("/");
138 if (np != NULL) {
139 pp = (char *) get_property(np, "model", NULL);
140 if (pp != NULL)
141 seq_printf(m, "%s\n", pp);
142 else
143 seq_printf(m, "PowerMac\n");
144 pp = (char *) get_property(np, "compatible", &plen);
145 if (pp != NULL) {
146 seq_printf(m, "motherboard\t:");
147 while (plen > 0) {
148 int l = strlen(pp) + 1;
149 seq_printf(m, " %s", pp);
150 plen -= l;
151 pp += l;
152 }
153 seq_printf(m, "\n");
154 }
155 of_node_put(np);
156 } else
157 seq_printf(m, "PowerMac\n");
158
159 /* print parsed model */
160 seq_printf(m, "detected as\t: %d (%s)\n", mbmodel, mbname);
161 seq_printf(m, "pmac flags\t: %08x\n", mbflags);
162
163 /* find l2 cache info */
164 np = of_find_node_by_name(NULL, "l2-cache");
165 if (np == NULL)
166 np = of_find_node_by_type(NULL, "cache");
167 if (np != NULL) {
168 unsigned int *ic = (unsigned int *)
169 get_property(np, "i-cache-size", NULL);
170 unsigned int *dc = (unsigned int *)
171 get_property(np, "d-cache-size", NULL);
172 seq_printf(m, "L2 cache\t:");
173 has_l2cache = 1;
174 if (get_property(np, "cache-unified", NULL) != 0 && dc) {
175 seq_printf(m, " %dK unified", *dc / 1024);
176 } else {
177 if (ic)
178 seq_printf(m, " %dK instruction", *ic / 1024);
179 if (dc)
180 seq_printf(m, "%s %dK data",
181 (ic? " +": ""), *dc / 1024);
182 }
183 pp = get_property(np, "ram-type", NULL);
184 if (pp)
185 seq_printf(m, " %s", pp);
186 seq_printf(m, "\n");
187 of_node_put(np);
188 }
189
190 /* Indicate newworld/oldworld */
191 seq_printf(m, "pmac-generation\t: %s\n",
192 pmac_newworld ? "NewWorld" : "OldWorld");
193}
194
195static void pmac_show_percpuinfo(struct seq_file *m, int i)
196{
197#ifdef CONFIG_CPU_FREQ_PMAC
198 extern unsigned int pmac_get_one_cpufreq(int i);
199 unsigned int freq = pmac_get_one_cpufreq(i);
200 if (freq != 0) {
201 seq_printf(m, "clock\t\t: %dMHz\n", freq/1000);
202 return;
203 }
204#endif /* CONFIG_CPU_FREQ_PMAC */
205}
206
207#ifndef CONFIG_ADB_CUDA
208int find_via_cuda(void)
209{
210 if (!find_devices("via-cuda"))
211 return 0;
212 printk("WARNING ! Your machine is CUDA-based but your kernel\n");
213 printk(" wasn't compiled with CONFIG_ADB_CUDA option !\n");
214 return 0;
215}
216#endif
217
218#ifndef CONFIG_ADB_PMU
219int find_via_pmu(void)
220{
221 if (!find_devices("via-pmu"))
222 return 0;
223 printk("WARNING ! Your machine is PMU-based but your kernel\n");
224 printk(" wasn't compiled with CONFIG_ADB_PMU option !\n");
225 return 0;
226}
227#endif
228
229#ifndef CONFIG_PMAC_SMU
230int smu_init(void)
231{
232 /* should check and warn if SMU is present */
233 return 0;
234}
235#endif
236
237#ifdef CONFIG_PPC32
238static volatile u32 *sysctrl_regs;
239
240static void __init ohare_init(void)
241{
242 /* this area has the CPU identification register
243 and some registers used by smp boards */
244 sysctrl_regs = (volatile u32 *) ioremap(0xf8000000, 0x1000);
245
246 /*
247 * Turn on the L2 cache.
248 * We assume that we have a PSX memory controller iff
249 * we have an ohare I/O controller.
250 */
251 if (find_devices("ohare") != NULL) {
252 if (((sysctrl_regs[2] >> 24) & 0xf) >= 3) {
253 if (sysctrl_regs[4] & 0x10)
254 sysctrl_regs[4] |= 0x04000020;
255 else
256 sysctrl_regs[4] |= 0x04000000;
257 if(has_l2cache)
258 printk(KERN_INFO "Level 2 cache enabled\n");
259 }
260 }
261}
262
263static void __init l2cr_init(void)
264{
265 /* Checks "l2cr-value" property in the registry */
266 if (cpu_has_feature(CPU_FTR_L2CR)) {
267 struct device_node *np = find_devices("cpus");
268 if (np == 0)
269 np = find_type_devices("cpu");
270 if (np != 0) {
271 unsigned int *l2cr = (unsigned int *)
272 get_property(np, "l2cr-value", NULL);
273 if (l2cr != 0) {
274 ppc_override_l2cr = 1;
275 ppc_override_l2cr_value = *l2cr;
276 _set_L2CR(0);
277 _set_L2CR(ppc_override_l2cr_value);
278 }
279 }
280 }
281
282 if (ppc_override_l2cr)
283 printk(KERN_INFO "L2CR overridden (0x%x), "
284 "backside cache is %s\n",
285 ppc_override_l2cr_value,
286 (ppc_override_l2cr_value & 0x80000000)
287 ? "enabled" : "disabled");
288}
289#endif
290
291void __init pmac_setup_arch(void)
292{
293 struct device_node *cpu, *ic;
294 int *fp;
295 unsigned long pvr;
296
297 pvr = PVR_VER(mfspr(SPRN_PVR));
298
299 /* Set loops_per_jiffy to a half-way reasonable value,
300 for use until calibrate_delay gets called. */
301 loops_per_jiffy = 50000000 / HZ;
302 cpu = of_find_node_by_type(NULL, "cpu");
303 if (cpu != NULL) {
304 fp = (int *) get_property(cpu, "clock-frequency", NULL);
305 if (fp != NULL) {
306 if (pvr >= 0x30 && pvr < 0x80)
307 /* PPC970 etc. */
308 loops_per_jiffy = *fp / (3 * HZ);
309 else if (pvr == 4 || pvr >= 8)
310 /* 604, G3, G4 etc. */
311 loops_per_jiffy = *fp / HZ;
312 else
313 /* 601, 603, etc. */
314 loops_per_jiffy = *fp / (2 * HZ);
315 }
316 of_node_put(cpu);
317 }
318
319 /* See if newworld or oldworld */
320 for (ic = NULL; (ic = of_find_all_nodes(ic)) != NULL; )
321 if (get_property(ic, "interrupt-controller", NULL))
322 break;
323 pmac_newworld = (ic != NULL);
324 if (ic)
325 of_node_put(ic);
326
327 /* Lookup PCI hosts */
328 pmac_pci_init();
329
330#ifdef CONFIG_PPC32
331 ohare_init();
332 l2cr_init();
333#endif /* CONFIG_PPC32 */
334
335#ifdef CONFIG_PPC64
336 /* Probe motherboard chipset */
337 /* this is done earlier in setup_arch for 32-bit */
338 pmac_feature_init();
339
340 /* We can NAP */
341 powersave_nap = 1;
342 printk(KERN_INFO "Using native/NAP idle loop\n");
343#endif
344
345#ifdef CONFIG_KGDB
346 zs_kgdb_hook(0);
347#endif
348
349 find_via_cuda();
350 find_via_pmu();
351 smu_init();
352
353#ifdef CONFIG_NVRAM
354 pmac_nvram_init();
355#endif
356
357#ifdef CONFIG_PPC32
358#ifdef CONFIG_BLK_DEV_INITRD
359 if (initrd_start)
360 ROOT_DEV = Root_RAM0;
361 else
362#endif
363 ROOT_DEV = DEFAULT_ROOT_DEVICE;
364#endif
365
366#ifdef CONFIG_SMP
367 /* Check for Core99 */
368 if (find_devices("uni-n") || find_devices("u3"))
369 smp_ops = &core99_smp_ops;
370#ifdef CONFIG_PPC32
371 else
372 smp_ops = &psurge_smp_ops;
373#endif
374#endif /* CONFIG_SMP */
375}
376
377char *bootpath;
378char *bootdevice;
379void *boot_host;
380int boot_target;
381int boot_part;
382extern dev_t boot_dev;
383
384#ifdef CONFIG_SCSI
385void __init note_scsi_host(struct device_node *node, void *host)
386{
387 int l;
388 char *p;
389
390 l = strlen(node->full_name);
391 if (bootpath != NULL && bootdevice != NULL
392 && strncmp(node->full_name, bootdevice, l) == 0
393 && (bootdevice[l] == '/' || bootdevice[l] == 0)) {
394 boot_host = host;
395 /*
396 * There's a bug in OF 1.0.5. (Why am I not surprised.)
397 * If you pass a path like scsi/sd@1:0 to canon, it returns
398 * something like /bandit@F2000000/gc@10/53c94@10000/sd@0,0
399 * That is, the scsi target number doesn't get preserved.
400 * So we pick the target number out of bootpath and use that.
401 */
402 p = strstr(bootpath, "/sd@");
403 if (p != NULL) {
404 p += 4;
405 boot_target = simple_strtoul(p, NULL, 10);
406 p = strchr(p, ':');
407 if (p != NULL)
408 boot_part = simple_strtoul(p + 1, NULL, 10);
409 }
410 }
411}
412EXPORT_SYMBOL(note_scsi_host);
413#endif
414
415#if defined(CONFIG_BLK_DEV_IDE) && defined(CONFIG_BLK_DEV_IDE_PMAC)
416static dev_t __init find_ide_boot(void)
417{
418 char *p;
419 int n;
420 dev_t __init pmac_find_ide_boot(char *bootdevice, int n);
421
422 if (bootdevice == NULL)
423 return 0;
424 p = strrchr(bootdevice, '/');
425 if (p == NULL)
426 return 0;
427 n = p - bootdevice;
428
429 return pmac_find_ide_boot(bootdevice, n);
430}
431#endif /* CONFIG_BLK_DEV_IDE && CONFIG_BLK_DEV_IDE_PMAC */
432
433static void __init find_boot_device(void)
434{
435#if defined(CONFIG_BLK_DEV_IDE) && defined(CONFIG_BLK_DEV_IDE_PMAC)
436 boot_dev = find_ide_boot();
437#endif
438}
439
440/* TODO: Merge the suspend-to-ram with the common code !!!
441 * currently, this is a stub implementation for suspend-to-disk
442 * only
443 */
444
445#ifdef CONFIG_SOFTWARE_SUSPEND
446
447static int pmac_pm_prepare(suspend_state_t state)
448{
449 printk(KERN_DEBUG "%s(%d)\n", __FUNCTION__, state);
450
451 return 0;
452}
453
454static int pmac_pm_enter(suspend_state_t state)
455{
456 printk(KERN_DEBUG "%s(%d)\n", __FUNCTION__, state);
457
458 /* Giveup the lazy FPU & vec so we don't have to back them
459 * up from the low level code
460 */
461 enable_kernel_fp();
462
463#ifdef CONFIG_ALTIVEC
464 if (cur_cpu_spec->cpu_features & CPU_FTR_ALTIVEC)
465 enable_kernel_altivec();
466#endif /* CONFIG_ALTIVEC */
467
468 return 0;
469}
470
471static int pmac_pm_finish(suspend_state_t state)
472{
473 printk(KERN_DEBUG "%s(%d)\n", __FUNCTION__, state);
474
475 /* Restore userland MMU context */
476 set_context(current->active_mm->context, current->active_mm->pgd);
477
478 return 0;
479}
480
481static struct pm_ops pmac_pm_ops = {
482 .pm_disk_mode = PM_DISK_SHUTDOWN,
483 .prepare = pmac_pm_prepare,
484 .enter = pmac_pm_enter,
485 .finish = pmac_pm_finish,
486};
487
488#endif /* CONFIG_SOFTWARE_SUSPEND */
489
490static int initializing = 1;
491
492static int pmac_late_init(void)
493{
494 initializing = 0;
495#ifdef CONFIG_SOFTWARE_SUSPEND
496 pm_set_ops(&pmac_pm_ops);
497#endif /* CONFIG_SOFTWARE_SUSPEND */
498 return 0;
499}
500
501late_initcall(pmac_late_init);
502
503/* can't be __init - can be called whenever a disk is first accessed */
504void note_bootable_part(dev_t dev, int part, int goodness)
505{
506 static int found_boot = 0;
507 char *p;
508
509 if (!initializing)
510 return;
511 if ((goodness <= current_root_goodness) &&
512 ROOT_DEV != DEFAULT_ROOT_DEVICE)
513 return;
514 p = strstr(saved_command_line, "root=");
515 if (p != NULL && (p == saved_command_line || p[-1] == ' '))
516 return;
517
518 if (!found_boot) {
519 find_boot_device();
520 found_boot = 1;
521 }
522 if (!boot_dev || dev == boot_dev) {
523 ROOT_DEV = dev + part;
524 boot_dev = 0;
525 current_root_goodness = goodness;
526 }
527}
528
529#ifdef CONFIG_ADB_CUDA
530static void cuda_restart(void)
531{
532 struct adb_request req;
533
534 cuda_request(&req, NULL, 2, CUDA_PACKET, CUDA_RESET_SYSTEM);
535 for (;;)
536 cuda_poll();
537}
538
539static void cuda_shutdown(void)
540{
541 struct adb_request req;
542
543 cuda_request(&req, NULL, 2, CUDA_PACKET, CUDA_POWERDOWN);
544 for (;;)
545 cuda_poll();
546}
547
548#else
549#define cuda_restart()
550#define cuda_shutdown()
551#endif
552
553#ifndef CONFIG_ADB_PMU
554#define pmu_restart()
555#define pmu_shutdown()
556#endif
557
558#ifndef CONFIG_PMAC_SMU
559#define smu_restart()
560#define smu_shutdown()
561#endif
562
563static void pmac_restart(char *cmd)
564{
565 switch (sys_ctrler) {
566 case SYS_CTRLER_CUDA:
567 cuda_restart();
568 break;
569 case SYS_CTRLER_PMU:
570 pmu_restart();
571 break;
572 case SYS_CTRLER_SMU:
573 smu_restart();
574 break;
575 default: ;
576 }
577}
578
579static void pmac_power_off(void)
580{
581 switch (sys_ctrler) {
582 case SYS_CTRLER_CUDA:
583 cuda_shutdown();
584 break;
585 case SYS_CTRLER_PMU:
586 pmu_shutdown();
587 break;
588 case SYS_CTRLER_SMU:
589 smu_shutdown();
590 break;
591 default: ;
592 }
593}
594
595static void
596pmac_halt(void)
597{
598 pmac_power_off();
599}
600
601#ifdef CONFIG_PPC32
602void __init pmac_init(void)
603{
604 /* isa_io_base gets set in pmac_pci_init */
605 isa_mem_base = PMAC_ISA_MEM_BASE;
606 pci_dram_offset = PMAC_PCI_DRAM_OFFSET;
607 ISA_DMA_THRESHOLD = ~0L;
608 DMA_MODE_READ = 1;
609 DMA_MODE_WRITE = 2;
610
611 ppc_md = pmac_md;
612
613#if defined(CONFIG_BLK_DEV_IDE) || defined(CONFIG_BLK_DEV_IDE_MODULE)
614#ifdef CONFIG_BLK_DEV_IDE_PMAC
615 ppc_ide_md.ide_init_hwif = pmac_ide_init_hwif_ports;
616 ppc_ide_md.default_io_base = pmac_ide_get_base;
617#endif /* CONFIG_BLK_DEV_IDE_PMAC */
618#endif /* defined(CONFIG_BLK_DEV_IDE) || defined(CONFIG_BLK_DEV_IDE_MODULE) */
619
620 if (ppc_md.progress) ppc_md.progress("pmac_init(): exit", 0);
621
622}
623#endif
624
625/*
626 * Early initialization.
627 */
628static void __init pmac_init_early(void)
629{
630#ifdef CONFIG_PPC64
631 /* Initialize hash table, from now on, we can take hash faults
632 * and call ioremap
633 */
634 hpte_init_native();
635
636 /* Init SCC */
637 if (strstr(cmd_line, "sccdbg")) {
638 sccdbg = 1;
639 udbg_init_scc(NULL);
640 }
641
642 /* Setup interrupt mapping options */
643 ppc64_interrupt_controller = IC_OPEN_PIC;
644
645 iommu_init_early_u3();
646#endif
647}
648
649static void __init pmac_progress(char *s, unsigned short hex)
650{
651#ifdef CONFIG_PPC64
652 if (sccdbg) {
653 udbg_puts(s);
654 udbg_puts("\n");
655 return;
656 }
657#endif
658#ifdef CONFIG_BOOTX_TEXT
659 if (boot_text_mapped) {
660 btext_drawstring(s);
661 btext_drawchar('\n');
662 }
663#endif /* CONFIG_BOOTX_TEXT */
664}
665
666/*
667 * pmac has no legacy IO, anything calling this function has to
668 * fail or bad things will happen
669 */
670static int pmac_check_legacy_ioport(unsigned int baseport)
671{
672 return -ENODEV;
673}
674
675static int __init pmac_declare_of_platform_devices(void)
676{
677 struct device_node *np, *npp;
678
679 np = find_devices("uni-n");
680 if (np) {
681 for (np = np->child; np != NULL; np = np->sibling)
682 if (strncmp(np->name, "i2c", 3) == 0) {
683 of_platform_device_create(np, "uni-n-i2c",
684 NULL);
685 break;
686 }
687 }
688 np = find_devices("valkyrie");
689 if (np)
690 of_platform_device_create(np, "valkyrie", NULL);
691 np = find_devices("platinum");
692 if (np)
693 of_platform_device_create(np, "platinum", NULL);
694
695 npp = of_find_node_by_name(NULL, "u3");
696 if (npp) {
697 for (np = NULL; (np = of_get_next_child(npp, np)) != NULL;) {
698 if (strncmp(np->name, "i2c", 3) == 0) {
699 of_platform_device_create(np, "u3-i2c", NULL);
700 of_node_put(np);
701 break;
702 }
703 }
704 of_node_put(npp);
705 }
706 np = of_find_node_by_type(NULL, "smu");
707 if (np) {
708 of_platform_device_create(np, "smu", NULL);
709 of_node_put(np);
710 }
711
712 return 0;
713}
714
715device_initcall(pmac_declare_of_platform_devices);
716
717/*
718 * Called very early, MMU is off, device-tree isn't unflattened
719 */
720static int __init pmac_probe(int platform)
721{
722#ifdef CONFIG_PPC64
723 if (platform != PLATFORM_POWERMAC)
724 return 0;
725
726 /*
727 * On U3, the DART (iommu) must be allocated now since it
728 * has an impact on htab_initialize (due to the large page it
729 * occupies having to be broken up so the DART itself is not
730 * part of the cacheable linar mapping
731 */
732 alloc_u3_dart_table();
733#endif
734
735#ifdef CONFIG_PMAC_SMU
736 /*
737 * SMU based G5s need some memory below 2Gb, at least the current
738 * driver needs that. We have to allocate it now. We allocate 4k
739 * (1 small page) for now.
740 */
741 smu_cmdbuf_abs = lmb_alloc_base(4096, 4096, 0x80000000UL);
742#endif /* CONFIG_PMAC_SMU */
743
744 return 1;
745}
746
747#ifdef CONFIG_PPC64
748static int pmac_probe_mode(struct pci_bus *bus)
749{
750 struct device_node *node = bus->sysdata;
751
752 /* We need to use normal PCI probing for the AGP bus,
753 since the device for the AGP bridge isn't in the tree. */
754 if (bus->self == NULL && device_is_compatible(node, "u3-agp"))
755 return PCI_PROBE_NORMAL;
756
757 return PCI_PROBE_DEVTREE;
758}
759#endif
760
761struct machdep_calls __initdata pmac_md = {
762#if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_PPC64)
763 .cpu_die = generic_mach_cpu_die,
764#endif
765 .probe = pmac_probe,
766 .setup_arch = pmac_setup_arch,
767 .init_early = pmac_init_early,
768 .show_cpuinfo = pmac_show_cpuinfo,
769 .show_percpuinfo = pmac_show_percpuinfo,
770 .init_IRQ = pmac_pic_init,
771 .get_irq = mpic_get_irq, /* changed later */
772 .pcibios_fixup = pmac_pcibios_fixup,
773 .restart = pmac_restart,
774 .power_off = pmac_power_off,
775 .halt = pmac_halt,
776 .time_init = pmac_time_init,
777 .get_boot_time = pmac_get_boot_time,
778 .set_rtc_time = pmac_set_rtc_time,
779 .get_rtc_time = pmac_get_rtc_time,
780 .calibrate_decr = pmac_calibrate_decr,
781 .feature_call = pmac_do_feature_call,
782 .check_legacy_ioport = pmac_check_legacy_ioport,
783 .progress = pmac_progress,
784#ifdef CONFIG_PPC64
785 .pci_probe_mode = pmac_probe_mode,
786 .idle_loop = native_idle,
787 .enable_pmcs = power4_enable_pmcs,
788#endif
789#ifdef CONFIG_PPC32
790 .pcibios_enable_device_hook = pmac_pci_enable_device_hook,
791 .pcibios_after_init = pmac_pcibios_after_init,
792 .phys_mem_access_prot = pci_phys_mem_access_prot,
793#endif
794};
diff --git a/arch/powerpc/platforms/powermac/sleep.S b/arch/powerpc/platforms/powermac/sleep.S
new file mode 100644
index 000000000000..22b113d19b24
--- /dev/null
+++ b/arch/powerpc/platforms/powermac/sleep.S
@@ -0,0 +1,396 @@
1/*
2 * This file contains sleep low-level functions for PowerBook G3.
3 * Copyright (C) 1999 Benjamin Herrenschmidt (benh@kernel.crashing.org)
4 * and Paul Mackerras (paulus@samba.org).
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 *
11 */
12
13#include <linux/config.h>
14#include <asm/processor.h>
15#include <asm/page.h>
16#include <asm/ppc_asm.h>
17#include <asm/cputable.h>
18#include <asm/cache.h>
19#include <asm/thread_info.h>
20#include <asm/asm-offsets.h>
21
22#define MAGIC 0x4c617273 /* 'Lars' */
23
24/*
25 * Structure for storing CPU registers on the stack.
26 */
27#define SL_SP 0
28#define SL_PC 4
29#define SL_MSR 8
30#define SL_SDR1 0xc
31#define SL_SPRG0 0x10 /* 4 sprg's */
32#define SL_DBAT0 0x20
33#define SL_IBAT0 0x28
34#define SL_DBAT1 0x30
35#define SL_IBAT1 0x38
36#define SL_DBAT2 0x40
37#define SL_IBAT2 0x48
38#define SL_DBAT3 0x50
39#define SL_IBAT3 0x58
40#define SL_TB 0x60
41#define SL_R2 0x68
42#define SL_CR 0x6c
43#define SL_R12 0x70 /* r12 to r31 */
44#define SL_SIZE (SL_R12 + 80)
45
46 .section .text
47 .align 5
48
49#if defined(CONFIG_PM) || defined(CONFIG_CPU_FREQ_PMAC)
50
51/* This gets called by via-pmu.c late during the sleep process.
52 * The PMU was already send the sleep command and will shut us down
53 * soon. We need to save all that is needed and setup the wakeup
54 * vector that will be called by the ROM on wakeup
55 */
56_GLOBAL(low_sleep_handler)
57#ifndef CONFIG_6xx
58 blr
59#else
60 mflr r0
61 stw r0,4(r1)
62 stwu r1,-SL_SIZE(r1)
63 mfcr r0
64 stw r0,SL_CR(r1)
65 stw r2,SL_R2(r1)
66 stmw r12,SL_R12(r1)
67
68 /* Save MSR & SDR1 */
69 mfmsr r4
70 stw r4,SL_MSR(r1)
71 mfsdr1 r4
72 stw r4,SL_SDR1(r1)
73
74 /* Get a stable timebase and save it */
751: mftbu r4
76 stw r4,SL_TB(r1)
77 mftb r5
78 stw r5,SL_TB+4(r1)
79 mftbu r3
80 cmpw r3,r4
81 bne 1b
82
83 /* Save SPRGs */
84 mfsprg r4,0
85 stw r4,SL_SPRG0(r1)
86 mfsprg r4,1
87 stw r4,SL_SPRG0+4(r1)
88 mfsprg r4,2
89 stw r4,SL_SPRG0+8(r1)
90 mfsprg r4,3
91 stw r4,SL_SPRG0+12(r1)
92
93 /* Save BATs */
94 mfdbatu r4,0
95 stw r4,SL_DBAT0(r1)
96 mfdbatl r4,0
97 stw r4,SL_DBAT0+4(r1)
98 mfdbatu r4,1
99 stw r4,SL_DBAT1(r1)
100 mfdbatl r4,1
101 stw r4,SL_DBAT1+4(r1)
102 mfdbatu r4,2
103 stw r4,SL_DBAT2(r1)
104 mfdbatl r4,2
105 stw r4,SL_DBAT2+4(r1)
106 mfdbatu r4,3
107 stw r4,SL_DBAT3(r1)
108 mfdbatl r4,3
109 stw r4,SL_DBAT3+4(r1)
110 mfibatu r4,0
111 stw r4,SL_IBAT0(r1)
112 mfibatl r4,0
113 stw r4,SL_IBAT0+4(r1)
114 mfibatu r4,1
115 stw r4,SL_IBAT1(r1)
116 mfibatl r4,1
117 stw r4,SL_IBAT1+4(r1)
118 mfibatu r4,2
119 stw r4,SL_IBAT2(r1)
120 mfibatl r4,2
121 stw r4,SL_IBAT2+4(r1)
122 mfibatu r4,3
123 stw r4,SL_IBAT3(r1)
124 mfibatl r4,3
125 stw r4,SL_IBAT3+4(r1)
126
127 /* Backup various CPU config stuffs */
128 bl __save_cpu_setup
129
130 /* The ROM can wake us up via 2 different vectors:
131 * - On wallstreet & lombard, we must write a magic
132 * value 'Lars' at address 4 and a pointer to a
133 * memory location containing the PC to resume from
134 * at address 0.
135 * - On Core99, we must store the wakeup vector at
136 * address 0x80 and eventually it's parameters
137 * at address 0x84. I've have some trouble with those
138 * parameters however and I no longer use them.
139 */
140 lis r5,grackle_wake_up@ha
141 addi r5,r5,grackle_wake_up@l
142 tophys(r5,r5)
143 stw r5,SL_PC(r1)
144 lis r4,KERNELBASE@h
145 tophys(r5,r1)
146 addi r5,r5,SL_PC
147 lis r6,MAGIC@ha
148 addi r6,r6,MAGIC@l
149 stw r5,0(r4)
150 stw r6,4(r4)
151 /* Setup stuffs at 0x80-0x84 for Core99 */
152 lis r3,core99_wake_up@ha
153 addi r3,r3,core99_wake_up@l
154 tophys(r3,r3)
155 stw r3,0x80(r4)
156 stw r5,0x84(r4)
157 /* Store a pointer to our backup storage into
158 * a kernel global
159 */
160 lis r3,sleep_storage@ha
161 addi r3,r3,sleep_storage@l
162 stw r5,0(r3)
163
164 .globl low_cpu_die
165low_cpu_die:
166 /* Flush & disable all caches */
167 bl flush_disable_caches
168
169 /* Turn off data relocation. */
170 mfmsr r3 /* Save MSR in r7 */
171 rlwinm r3,r3,0,28,26 /* Turn off DR bit */
172 sync
173 mtmsr r3
174 isync
175
176BEGIN_FTR_SECTION
177 /* Flush any pending L2 data prefetches to work around HW bug */
178 sync
179 lis r3,0xfff0
180 lwz r0,0(r3) /* perform cache-inhibited load to ROM */
181 sync /* (caches are disabled at this point) */
182END_FTR_SECTION_IFSET(CPU_FTR_SPEC7450)
183
184/*
185 * Set the HID0 and MSR for sleep.
186 */
187 mfspr r2,SPRN_HID0
188 rlwinm r2,r2,0,10,7 /* clear doze, nap */
189 oris r2,r2,HID0_SLEEP@h
190 sync
191 isync
192 mtspr SPRN_HID0,r2
193 sync
194
195/* This loop puts us back to sleep in case we have a spurrious
196 * wakeup so that the host bridge properly stays asleep. The
197 * CPU will be turned off, either after a known time (about 1
198 * second) on wallstreet & lombard, or as soon as the CPU enters
199 * SLEEP mode on core99
200 */
201 mfmsr r2
202 oris r2,r2,MSR_POW@h
2031: sync
204 mtmsr r2
205 isync
206 b 1b
207
208/*
209 * Here is the resume code.
210 */
211
212
213/*
214 * Core99 machines resume here
215 * r4 has the physical address of SL_PC(sp) (unused)
216 */
217_GLOBAL(core99_wake_up)
218 /* Make sure HID0 no longer contains any sleep bit and that data cache
219 * is disabled
220 */
221 mfspr r3,SPRN_HID0
222 rlwinm r3,r3,0,11,7 /* clear SLEEP, NAP, DOZE bits */
223 rlwinm 3,r3,0,18,15 /* clear DCE, ICE */
224 mtspr SPRN_HID0,r3
225 sync
226 isync
227
228 /* sanitize MSR */
229 mfmsr r3
230 ori r3,r3,MSR_EE|MSR_IP
231 xori r3,r3,MSR_EE|MSR_IP
232 sync
233 isync
234 mtmsr r3
235 sync
236 isync
237
238 /* Recover sleep storage */
239 lis r3,sleep_storage@ha
240 addi r3,r3,sleep_storage@l
241 tophys(r3,r3)
242 lwz r1,0(r3)
243
244 /* Pass thru to older resume code ... */
245/*
246 * Here is the resume code for older machines.
247 * r1 has the physical address of SL_PC(sp).
248 */
249
250grackle_wake_up:
251
252 /* Restore the kernel's segment registers before
253 * we do any r1 memory access as we are not sure they
254 * are in a sane state above the first 256Mb region
255 */
256 li r0,16 /* load up segment register values */
257 mtctr r0 /* for context 0 */
258 lis r3,0x2000 /* Ku = 1, VSID = 0 */
259 li r4,0
2603: mtsrin r3,r4
261 addi r3,r3,0x111 /* increment VSID */
262 addis r4,r4,0x1000 /* address of next segment */
263 bdnz 3b
264 sync
265 isync
266
267 subi r1,r1,SL_PC
268
269 /* Restore various CPU config stuffs */
270 bl __restore_cpu_setup
271
272 /* Make sure all FPRs have been initialized */
273 bl reloc_offset
274 bl __init_fpu_registers
275
276 /* Invalidate & enable L1 cache, we don't care about
277 * whatever the ROM may have tried to write to memory
278 */
279 bl __inval_enable_L1
280
281 /* Restore the BATs, and SDR1. Then we can turn on the MMU. */
282 lwz r4,SL_SDR1(r1)
283 mtsdr1 r4
284 lwz r4,SL_SPRG0(r1)
285 mtsprg 0,r4
286 lwz r4,SL_SPRG0+4(r1)
287 mtsprg 1,r4
288 lwz r4,SL_SPRG0+8(r1)
289 mtsprg 2,r4
290 lwz r4,SL_SPRG0+12(r1)
291 mtsprg 3,r4
292
293 lwz r4,SL_DBAT0(r1)
294 mtdbatu 0,r4
295 lwz r4,SL_DBAT0+4(r1)
296 mtdbatl 0,r4
297 lwz r4,SL_DBAT1(r1)
298 mtdbatu 1,r4
299 lwz r4,SL_DBAT1+4(r1)
300 mtdbatl 1,r4
301 lwz r4,SL_DBAT2(r1)
302 mtdbatu 2,r4
303 lwz r4,SL_DBAT2+4(r1)
304 mtdbatl 2,r4
305 lwz r4,SL_DBAT3(r1)
306 mtdbatu 3,r4
307 lwz r4,SL_DBAT3+4(r1)
308 mtdbatl 3,r4
309 lwz r4,SL_IBAT0(r1)
310 mtibatu 0,r4
311 lwz r4,SL_IBAT0+4(r1)
312 mtibatl 0,r4
313 lwz r4,SL_IBAT1(r1)
314 mtibatu 1,r4
315 lwz r4,SL_IBAT1+4(r1)
316 mtibatl 1,r4
317 lwz r4,SL_IBAT2(r1)
318 mtibatu 2,r4
319 lwz r4,SL_IBAT2+4(r1)
320 mtibatl 2,r4
321 lwz r4,SL_IBAT3(r1)
322 mtibatu 3,r4
323 lwz r4,SL_IBAT3+4(r1)
324 mtibatl 3,r4
325
326BEGIN_FTR_SECTION
327 li r4,0
328 mtspr SPRN_DBAT4U,r4
329 mtspr SPRN_DBAT4L,r4
330 mtspr SPRN_DBAT5U,r4
331 mtspr SPRN_DBAT5L,r4
332 mtspr SPRN_DBAT6U,r4
333 mtspr SPRN_DBAT6L,r4
334 mtspr SPRN_DBAT7U,r4
335 mtspr SPRN_DBAT7L,r4
336 mtspr SPRN_IBAT4U,r4
337 mtspr SPRN_IBAT4L,r4
338 mtspr SPRN_IBAT5U,r4
339 mtspr SPRN_IBAT5L,r4
340 mtspr SPRN_IBAT6U,r4
341 mtspr SPRN_IBAT6L,r4
342 mtspr SPRN_IBAT7U,r4
343 mtspr SPRN_IBAT7L,r4
344END_FTR_SECTION_IFSET(CPU_FTR_HAS_HIGH_BATS)
345
346 /* Flush all TLBs */
347 lis r4,0x1000
3481: addic. r4,r4,-0x1000
349 tlbie r4
350 blt 1b
351 sync
352
353 /* restore the MSR and turn on the MMU */
354 lwz r3,SL_MSR(r1)
355 bl turn_on_mmu
356
357 /* get back the stack pointer */
358 tovirt(r1,r1)
359
360 /* Restore TB */
361 li r3,0
362 mttbl r3
363 lwz r3,SL_TB(r1)
364 lwz r4,SL_TB+4(r1)
365 mttbu r3
366 mttbl r4
367
368 /* Restore the callee-saved registers and return */
369 lwz r0,SL_CR(r1)
370 mtcr r0
371 lwz r2,SL_R2(r1)
372 lmw r12,SL_R12(r1)
373 addi r1,r1,SL_SIZE
374 lwz r0,4(r1)
375 mtlr r0
376 blr
377
378turn_on_mmu:
379 mflr r4
380 tovirt(r4,r4)
381 mtsrr0 r4
382 mtsrr1 r3
383 sync
384 isync
385 rfi
386
387#endif /* defined(CONFIG_PM) || defined(CONFIG_CPU_FREQ) */
388
389 .section .data
390 .balign L1_CACHE_BYTES
391sleep_storage:
392 .long 0
393 .balign L1_CACHE_BYTES, 0
394
395#endif /* CONFIG_6xx */
396 .section .text
diff --git a/arch/powerpc/platforms/powermac/smp.c b/arch/powerpc/platforms/powermac/smp.c
new file mode 100644
index 000000000000..e1f9443cc872
--- /dev/null
+++ b/arch/powerpc/platforms/powermac/smp.c
@@ -0,0 +1,865 @@
1/*
2 * SMP support for power macintosh.
3 *
4 * We support both the old "powersurge" SMP architecture
5 * and the current Core99 (G4 PowerMac) machines.
6 *
7 * Note that we don't support the very first rev. of
8 * Apple/DayStar 2 CPUs board, the one with the funky
9 * watchdog. Hopefully, none of these should be there except
10 * maybe internally to Apple. I should probably still add some
11 * code to detect this card though and disable SMP. --BenH.
12 *
13 * Support Macintosh G4 SMP by Troy Benjegerdes (hozer@drgw.net)
14 * and Ben Herrenschmidt <benh@kernel.crashing.org>.
15 *
16 * Support for DayStar quad CPU cards
17 * Copyright (C) XLR8, Inc. 1994-2000
18 *
19 * This program is free software; you can redistribute it and/or
20 * modify it under the terms of the GNU General Public License
21 * as published by the Free Software Foundation; either version
22 * 2 of the License, or (at your option) any later version.
23 */
24#include <linux/config.h>
25#include <linux/kernel.h>
26#include <linux/sched.h>
27#include <linux/smp.h>
28#include <linux/smp_lock.h>
29#include <linux/interrupt.h>
30#include <linux/kernel_stat.h>
31#include <linux/delay.h>
32#include <linux/init.h>
33#include <linux/spinlock.h>
34#include <linux/errno.h>
35#include <linux/hardirq.h>
36#include <linux/cpu.h>
37
38#include <asm/ptrace.h>
39#include <asm/atomic.h>
40#include <asm/irq.h>
41#include <asm/page.h>
42#include <asm/pgtable.h>
43#include <asm/sections.h>
44#include <asm/io.h>
45#include <asm/prom.h>
46#include <asm/smp.h>
47#include <asm/machdep.h>
48#include <asm/pmac_feature.h>
49#include <asm/time.h>
50#include <asm/mpic.h>
51#include <asm/cacheflush.h>
52#include <asm/keylargo.h>
53#include <asm/pmac_low_i2c.h>
54
55#undef DEBUG
56
57#ifdef DEBUG
58#define DBG(fmt...) udbg_printf(fmt)
59#else
60#define DBG(fmt...)
61#endif
62
63extern void __secondary_start_pmac_0(void);
64
65#ifdef CONFIG_PPC32
66
67/* Sync flag for HW tb sync */
68static volatile int sec_tb_reset = 0;
69
70/*
71 * Powersurge (old powermac SMP) support.
72 */
73
74/* Addresses for powersurge registers */
75#define HAMMERHEAD_BASE 0xf8000000
76#define HHEAD_CONFIG 0x90
77#define HHEAD_SEC_INTR 0xc0
78
79/* register for interrupting the primary processor on the powersurge */
80/* N.B. this is actually the ethernet ROM! */
81#define PSURGE_PRI_INTR 0xf3019000
82
83/* register for storing the start address for the secondary processor */
84/* N.B. this is the PCI config space address register for the 1st bridge */
85#define PSURGE_START 0xf2800000
86
87/* Daystar/XLR8 4-CPU card */
88#define PSURGE_QUAD_REG_ADDR 0xf8800000
89
90#define PSURGE_QUAD_IRQ_SET 0
91#define PSURGE_QUAD_IRQ_CLR 1
92#define PSURGE_QUAD_IRQ_PRIMARY 2
93#define PSURGE_QUAD_CKSTOP_CTL 3
94#define PSURGE_QUAD_PRIMARY_ARB 4
95#define PSURGE_QUAD_BOARD_ID 6
96#define PSURGE_QUAD_WHICH_CPU 7
97#define PSURGE_QUAD_CKSTOP_RDBK 8
98#define PSURGE_QUAD_RESET_CTL 11
99
100#define PSURGE_QUAD_OUT(r, v) (out_8(quad_base + ((r) << 4) + 4, (v)))
101#define PSURGE_QUAD_IN(r) (in_8(quad_base + ((r) << 4) + 4) & 0x0f)
102#define PSURGE_QUAD_BIS(r, v) (PSURGE_QUAD_OUT((r), PSURGE_QUAD_IN(r) | (v)))
103#define PSURGE_QUAD_BIC(r, v) (PSURGE_QUAD_OUT((r), PSURGE_QUAD_IN(r) & ~(v)))
104
105/* virtual addresses for the above */
106static volatile u8 __iomem *hhead_base;
107static volatile u8 __iomem *quad_base;
108static volatile u32 __iomem *psurge_pri_intr;
109static volatile u8 __iomem *psurge_sec_intr;
110static volatile u32 __iomem *psurge_start;
111
112/* values for psurge_type */
113#define PSURGE_NONE -1
114#define PSURGE_DUAL 0
115#define PSURGE_QUAD_OKEE 1
116#define PSURGE_QUAD_COTTON 2
117#define PSURGE_QUAD_ICEGRASS 3
118
119/* what sort of powersurge board we have */
120static int psurge_type = PSURGE_NONE;
121
122/*
123 * Set and clear IPIs for powersurge.
124 */
125static inline void psurge_set_ipi(int cpu)
126{
127 if (psurge_type == PSURGE_NONE)
128 return;
129 if (cpu == 0)
130 in_be32(psurge_pri_intr);
131 else if (psurge_type == PSURGE_DUAL)
132 out_8(psurge_sec_intr, 0);
133 else
134 PSURGE_QUAD_OUT(PSURGE_QUAD_IRQ_SET, 1 << cpu);
135}
136
137static inline void psurge_clr_ipi(int cpu)
138{
139 if (cpu > 0) {
140 switch(psurge_type) {
141 case PSURGE_DUAL:
142 out_8(psurge_sec_intr, ~0);
143 case PSURGE_NONE:
144 break;
145 default:
146 PSURGE_QUAD_OUT(PSURGE_QUAD_IRQ_CLR, 1 << cpu);
147 }
148 }
149}
150
151/*
152 * On powersurge (old SMP powermac architecture) we don't have
153 * separate IPIs for separate messages like openpic does. Instead
154 * we have a bitmap for each processor, where a 1 bit means that
155 * the corresponding message is pending for that processor.
156 * Ideally each cpu's entry would be in a different cache line.
157 * -- paulus.
158 */
159static unsigned long psurge_smp_message[NR_CPUS];
160
161void psurge_smp_message_recv(struct pt_regs *regs)
162{
163 int cpu = smp_processor_id();
164 int msg;
165
166 /* clear interrupt */
167 psurge_clr_ipi(cpu);
168
169 if (num_online_cpus() < 2)
170 return;
171
172 /* make sure there is a message there */
173 for (msg = 0; msg < 4; msg++)
174 if (test_and_clear_bit(msg, &psurge_smp_message[cpu]))
175 smp_message_recv(msg, regs);
176}
177
178irqreturn_t psurge_primary_intr(int irq, void *d, struct pt_regs *regs)
179{
180 psurge_smp_message_recv(regs);
181 return IRQ_HANDLED;
182}
183
184static void smp_psurge_message_pass(int target, int msg)
185{
186 int i;
187
188 if (num_online_cpus() < 2)
189 return;
190
191 for (i = 0; i < NR_CPUS; i++) {
192 if (!cpu_online(i))
193 continue;
194 if (target == MSG_ALL
195 || (target == MSG_ALL_BUT_SELF && i != smp_processor_id())
196 || target == i) {
197 set_bit(msg, &psurge_smp_message[i]);
198 psurge_set_ipi(i);
199 }
200 }
201}
202
203/*
204 * Determine a quad card presence. We read the board ID register, we
205 * force the data bus to change to something else, and we read it again.
206 * It it's stable, then the register probably exist (ugh !)
207 */
208static int __init psurge_quad_probe(void)
209{
210 int type;
211 unsigned int i;
212
213 type = PSURGE_QUAD_IN(PSURGE_QUAD_BOARD_ID);
214 if (type < PSURGE_QUAD_OKEE || type > PSURGE_QUAD_ICEGRASS
215 || type != PSURGE_QUAD_IN(PSURGE_QUAD_BOARD_ID))
216 return PSURGE_DUAL;
217
218 /* looks OK, try a slightly more rigorous test */
219 /* bogus is not necessarily cacheline-aligned,
220 though I don't suppose that really matters. -- paulus */
221 for (i = 0; i < 100; i++) {
222 volatile u32 bogus[8];
223 bogus[(0+i)%8] = 0x00000000;
224 bogus[(1+i)%8] = 0x55555555;
225 bogus[(2+i)%8] = 0xFFFFFFFF;
226 bogus[(3+i)%8] = 0xAAAAAAAA;
227 bogus[(4+i)%8] = 0x33333333;
228 bogus[(5+i)%8] = 0xCCCCCCCC;
229 bogus[(6+i)%8] = 0xCCCCCCCC;
230 bogus[(7+i)%8] = 0x33333333;
231 wmb();
232 asm volatile("dcbf 0,%0" : : "r" (bogus) : "memory");
233 mb();
234 if (type != PSURGE_QUAD_IN(PSURGE_QUAD_BOARD_ID))
235 return PSURGE_DUAL;
236 }
237 return type;
238}
239
240static void __init psurge_quad_init(void)
241{
242 int procbits;
243
244 if (ppc_md.progress) ppc_md.progress("psurge_quad_init", 0x351);
245 procbits = ~PSURGE_QUAD_IN(PSURGE_QUAD_WHICH_CPU);
246 if (psurge_type == PSURGE_QUAD_ICEGRASS)
247 PSURGE_QUAD_BIS(PSURGE_QUAD_RESET_CTL, procbits);
248 else
249 PSURGE_QUAD_BIC(PSURGE_QUAD_CKSTOP_CTL, procbits);
250 mdelay(33);
251 out_8(psurge_sec_intr, ~0);
252 PSURGE_QUAD_OUT(PSURGE_QUAD_IRQ_CLR, procbits);
253 PSURGE_QUAD_BIS(PSURGE_QUAD_RESET_CTL, procbits);
254 if (psurge_type != PSURGE_QUAD_ICEGRASS)
255 PSURGE_QUAD_BIS(PSURGE_QUAD_CKSTOP_CTL, procbits);
256 PSURGE_QUAD_BIC(PSURGE_QUAD_PRIMARY_ARB, procbits);
257 mdelay(33);
258 PSURGE_QUAD_BIC(PSURGE_QUAD_RESET_CTL, procbits);
259 mdelay(33);
260 PSURGE_QUAD_BIS(PSURGE_QUAD_PRIMARY_ARB, procbits);
261 mdelay(33);
262}
263
264static int __init smp_psurge_probe(void)
265{
266 int i, ncpus;
267
268 /* We don't do SMP on the PPC601 -- paulus */
269 if (PVR_VER(mfspr(SPRN_PVR)) == 1)
270 return 1;
271
272 /*
273 * The powersurge cpu board can be used in the generation
274 * of powermacs that have a socket for an upgradeable cpu card,
275 * including the 7500, 8500, 9500, 9600.
276 * The device tree doesn't tell you if you have 2 cpus because
277 * OF doesn't know anything about the 2nd processor.
278 * Instead we look for magic bits in magic registers,
279 * in the hammerhead memory controller in the case of the
280 * dual-cpu powersurge board. -- paulus.
281 */
282 if (find_devices("hammerhead") == NULL)
283 return 1;
284
285 hhead_base = ioremap(HAMMERHEAD_BASE, 0x800);
286 quad_base = ioremap(PSURGE_QUAD_REG_ADDR, 1024);
287 psurge_sec_intr = hhead_base + HHEAD_SEC_INTR;
288
289 psurge_type = psurge_quad_probe();
290 if (psurge_type != PSURGE_DUAL) {
291 psurge_quad_init();
292 /* All released cards using this HW design have 4 CPUs */
293 ncpus = 4;
294 } else {
295 iounmap(quad_base);
296 if ((in_8(hhead_base + HHEAD_CONFIG) & 0x02) == 0) {
297 /* not a dual-cpu card */
298 iounmap(hhead_base);
299 psurge_type = PSURGE_NONE;
300 return 1;
301 }
302 ncpus = 2;
303 }
304
305 psurge_start = ioremap(PSURGE_START, 4);
306 psurge_pri_intr = ioremap(PSURGE_PRI_INTR, 4);
307
308 /* this is not actually strictly necessary -- paulus. */
309 for (i = 1; i < ncpus; ++i)
310 smp_hw_index[i] = i;
311
312 if (ppc_md.progress) ppc_md.progress("smp_psurge_probe - done", 0x352);
313
314 return ncpus;
315}
316
317static void __init smp_psurge_kick_cpu(int nr)
318{
319 unsigned long start = __pa(__secondary_start_pmac_0) + nr * 8;
320 unsigned long a;
321
322 /* may need to flush here if secondary bats aren't setup */
323 for (a = KERNELBASE; a < KERNELBASE + 0x800000; a += 32)
324 asm volatile("dcbf 0,%0" : : "r" (a) : "memory");
325 asm volatile("sync");
326
327 if (ppc_md.progress) ppc_md.progress("smp_psurge_kick_cpu", 0x353);
328
329 out_be32(psurge_start, start);
330 mb();
331
332 psurge_set_ipi(nr);
333 udelay(10);
334 psurge_clr_ipi(nr);
335
336 if (ppc_md.progress) ppc_md.progress("smp_psurge_kick_cpu - done", 0x354);
337}
338
339/*
340 * With the dual-cpu powersurge board, the decrementers and timebases
341 * of both cpus are frozen after the secondary cpu is started up,
342 * until we give the secondary cpu another interrupt. This routine
343 * uses this to get the timebases synchronized.
344 * -- paulus.
345 */
346static void __init psurge_dual_sync_tb(int cpu_nr)
347{
348 int t;
349
350 set_dec(tb_ticks_per_jiffy);
351 set_tb(0, 0);
352 last_jiffy_stamp(cpu_nr) = 0;
353
354 if (cpu_nr > 0) {
355 mb();
356 sec_tb_reset = 1;
357 return;
358 }
359
360 /* wait for the secondary to have reset its TB before proceeding */
361 for (t = 10000000; t > 0 && !sec_tb_reset; --t)
362 ;
363
364 /* now interrupt the secondary, starting both TBs */
365 psurge_set_ipi(1);
366
367 smp_tb_synchronized = 1;
368}
369
370static struct irqaction psurge_irqaction = {
371 .handler = psurge_primary_intr,
372 .flags = SA_INTERRUPT,
373 .mask = CPU_MASK_NONE,
374 .name = "primary IPI",
375};
376
377static void __init smp_psurge_setup_cpu(int cpu_nr)
378{
379
380 if (cpu_nr == 0) {
381 /* If we failed to start the second CPU, we should still
382 * send it an IPI to start the timebase & DEC or we might
383 * have them stuck.
384 */
385 if (num_online_cpus() < 2) {
386 if (psurge_type == PSURGE_DUAL)
387 psurge_set_ipi(1);
388 return;
389 }
390 /* reset the entry point so if we get another intr we won't
391 * try to startup again */
392 out_be32(psurge_start, 0x100);
393 if (setup_irq(30, &psurge_irqaction))
394 printk(KERN_ERR "Couldn't get primary IPI interrupt");
395 }
396
397 if (psurge_type == PSURGE_DUAL)
398 psurge_dual_sync_tb(cpu_nr);
399}
400
401void __init smp_psurge_take_timebase(void)
402{
403 /* Dummy implementation */
404}
405
406void __init smp_psurge_give_timebase(void)
407{
408 /* Dummy implementation */
409}
410
411/* PowerSurge-style Macs */
412struct smp_ops_t psurge_smp_ops = {
413 .message_pass = smp_psurge_message_pass,
414 .probe = smp_psurge_probe,
415 .kick_cpu = smp_psurge_kick_cpu,
416 .setup_cpu = smp_psurge_setup_cpu,
417 .give_timebase = smp_psurge_give_timebase,
418 .take_timebase = smp_psurge_take_timebase,
419};
420#endif /* CONFIG_PPC32 - actually powersurge support */
421
422#ifdef CONFIG_PPC64
423/*
424 * G5s enable/disable the timebase via an i2c-connected clock chip.
425 */
426static struct device_node *pmac_tb_clock_chip_host;
427static u8 pmac_tb_pulsar_addr;
428static void (*pmac_tb_freeze)(int freeze);
429static DEFINE_SPINLOCK(timebase_lock);
430static unsigned long timebase;
431
432static void smp_core99_cypress_tb_freeze(int freeze)
433{
434 u8 data;
435 int rc;
436
437 /* Strangely, the device-tree says address is 0xd2, but darwin
438 * accesses 0xd0 ...
439 */
440 pmac_low_i2c_setmode(pmac_tb_clock_chip_host, pmac_low_i2c_mode_combined);
441 rc = pmac_low_i2c_xfer(pmac_tb_clock_chip_host,
442 0xd0 | pmac_low_i2c_read,
443 0x81, &data, 1);
444 if (rc != 0)
445 goto bail;
446
447 data = (data & 0xf3) | (freeze ? 0x00 : 0x0c);
448
449 pmac_low_i2c_setmode(pmac_tb_clock_chip_host, pmac_low_i2c_mode_stdsub);
450 rc = pmac_low_i2c_xfer(pmac_tb_clock_chip_host,
451 0xd0 | pmac_low_i2c_write,
452 0x81, &data, 1);
453
454 bail:
455 if (rc != 0) {
456 printk("Cypress Timebase %s rc: %d\n",
457 freeze ? "freeze" : "unfreeze", rc);
458 panic("Timebase freeze failed !\n");
459 }
460}
461
462
463static void smp_core99_pulsar_tb_freeze(int freeze)
464{
465 u8 data;
466 int rc;
467
468 pmac_low_i2c_setmode(pmac_tb_clock_chip_host, pmac_low_i2c_mode_combined);
469 rc = pmac_low_i2c_xfer(pmac_tb_clock_chip_host,
470 pmac_tb_pulsar_addr | pmac_low_i2c_read,
471 0x2e, &data, 1);
472 if (rc != 0)
473 goto bail;
474
475 data = (data & 0x88) | (freeze ? 0x11 : 0x22);
476
477 pmac_low_i2c_setmode(pmac_tb_clock_chip_host, pmac_low_i2c_mode_stdsub);
478 rc = pmac_low_i2c_xfer(pmac_tb_clock_chip_host,
479 pmac_tb_pulsar_addr | pmac_low_i2c_write,
480 0x2e, &data, 1);
481 bail:
482 if (rc != 0) {
483 printk(KERN_ERR "Pulsar Timebase %s rc: %d\n",
484 freeze ? "freeze" : "unfreeze", rc);
485 panic("Timebase freeze failed !\n");
486 }
487}
488
489
490static void smp_core99_give_timebase(void)
491{
492 /* Open i2c bus for synchronous access */
493 if (pmac_low_i2c_open(pmac_tb_clock_chip_host, 0))
494 panic("Can't open i2c for TB sync !\n");
495
496 spin_lock(&timebase_lock);
497 (*pmac_tb_freeze)(1);
498 mb();
499 timebase = get_tb();
500 spin_unlock(&timebase_lock);
501
502 while (timebase)
503 barrier();
504
505 spin_lock(&timebase_lock);
506 (*pmac_tb_freeze)(0);
507 spin_unlock(&timebase_lock);
508
509 /* Close i2c bus */
510 pmac_low_i2c_close(pmac_tb_clock_chip_host);
511}
512
513
514static void __devinit smp_core99_take_timebase(void)
515{
516 while (!timebase)
517 barrier();
518 spin_lock(&timebase_lock);
519 set_tb(timebase >> 32, timebase & 0xffffffff);
520 timebase = 0;
521 spin_unlock(&timebase_lock);
522}
523
524static void __init smp_core99_setup(int ncpus)
525{
526 struct device_node *cc = NULL;
527 struct device_node *p;
528 u32 *reg;
529 int ok;
530
531 /* HW sync only on these platforms */
532 if (!machine_is_compatible("PowerMac7,2") &&
533 !machine_is_compatible("PowerMac7,3") &&
534 !machine_is_compatible("RackMac3,1"))
535 return;
536
537 /* Look for the clock chip */
538 while ((cc = of_find_node_by_name(cc, "i2c-hwclock")) != NULL) {
539 p = of_get_parent(cc);
540 ok = p && device_is_compatible(p, "uni-n-i2c");
541 of_node_put(p);
542 if (!ok)
543 continue;
544
545 reg = (u32 *)get_property(cc, "reg", NULL);
546 if (reg == NULL)
547 continue;
548
549 switch (*reg) {
550 case 0xd2:
551 if (device_is_compatible(cc, "pulsar-legacy-slewing")) {
552 pmac_tb_freeze = smp_core99_pulsar_tb_freeze;
553 pmac_tb_pulsar_addr = 0xd2;
554 printk(KERN_INFO "Timebase clock is Pulsar chip\n");
555 } else if (device_is_compatible(cc, "cy28508")) {
556 pmac_tb_freeze = smp_core99_cypress_tb_freeze;
557 printk(KERN_INFO "Timebase clock is Cypress chip\n");
558 }
559 break;
560 case 0xd4:
561 pmac_tb_freeze = smp_core99_pulsar_tb_freeze;
562 pmac_tb_pulsar_addr = 0xd4;
563 printk(KERN_INFO "Timebase clock is Pulsar chip\n");
564 break;
565 }
566 if (pmac_tb_freeze != NULL) {
567 pmac_tb_clock_chip_host = of_get_parent(cc);
568 of_node_put(cc);
569 break;
570 }
571 }
572 if (pmac_tb_freeze == NULL) {
573 smp_ops->give_timebase = smp_generic_give_timebase;
574 smp_ops->take_timebase = smp_generic_take_timebase;
575 }
576}
577
578/* nothing to do here, caches are already set up by service processor */
579static inline void __devinit core99_init_caches(int cpu)
580{
581}
582
583#else /* CONFIG_PPC64 */
584
585/*
586 * SMP G4 powermacs use a GPIO to enable/disable the timebase.
587 */
588
589static unsigned int core99_tb_gpio; /* Timebase freeze GPIO */
590
591static unsigned int pri_tb_hi, pri_tb_lo;
592static unsigned int pri_tb_stamp;
593
594/* not __init, called in sleep/wakeup code */
595void smp_core99_give_timebase(void)
596{
597 unsigned long flags;
598 unsigned int t;
599
600 /* wait for the secondary to be in take_timebase */
601 for (t = 100000; t > 0 && !sec_tb_reset; --t)
602 udelay(10);
603 if (!sec_tb_reset) {
604 printk(KERN_WARNING "Timeout waiting sync on second CPU\n");
605 return;
606 }
607
608 /* freeze the timebase and read it */
609 /* disable interrupts so the timebase is disabled for the
610 shortest possible time */
611 local_irq_save(flags);
612 pmac_call_feature(PMAC_FTR_WRITE_GPIO, NULL, core99_tb_gpio, 4);
613 pmac_call_feature(PMAC_FTR_READ_GPIO, NULL, core99_tb_gpio, 0);
614 mb();
615 pri_tb_hi = get_tbu();
616 pri_tb_lo = get_tbl();
617 pri_tb_stamp = last_jiffy_stamp(smp_processor_id());
618 mb();
619
620 /* tell the secondary we're ready */
621 sec_tb_reset = 2;
622 mb();
623
624 /* wait for the secondary to have taken it */
625 for (t = 100000; t > 0 && sec_tb_reset; --t)
626 udelay(10);
627 if (sec_tb_reset)
628 printk(KERN_WARNING "Timeout waiting sync(2) on second CPU\n");
629 else
630 smp_tb_synchronized = 1;
631
632 /* Now, restart the timebase by leaving the GPIO to an open collector */
633 pmac_call_feature(PMAC_FTR_WRITE_GPIO, NULL, core99_tb_gpio, 0);
634 pmac_call_feature(PMAC_FTR_READ_GPIO, NULL, core99_tb_gpio, 0);
635 local_irq_restore(flags);
636}
637
638/* not __init, called in sleep/wakeup code */
639void smp_core99_take_timebase(void)
640{
641 unsigned long flags;
642
643 /* tell the primary we're here */
644 sec_tb_reset = 1;
645 mb();
646
647 /* wait for the primary to set pri_tb_hi/lo */
648 while (sec_tb_reset < 2)
649 mb();
650
651 /* set our stuff the same as the primary */
652 local_irq_save(flags);
653 set_dec(1);
654 set_tb(pri_tb_hi, pri_tb_lo);
655 last_jiffy_stamp(smp_processor_id()) = pri_tb_stamp;
656 mb();
657
658 /* tell the primary we're done */
659 sec_tb_reset = 0;
660 mb();
661 local_irq_restore(flags);
662}
663
664/* L2 and L3 cache settings to pass from CPU0 to CPU1 on G4 cpus */
665volatile static long int core99_l2_cache;
666volatile static long int core99_l3_cache;
667
668static void __devinit core99_init_caches(int cpu)
669{
670 if (!cpu_has_feature(CPU_FTR_L2CR))
671 return;
672
673 if (cpu == 0) {
674 core99_l2_cache = _get_L2CR();
675 printk("CPU0: L2CR is %lx\n", core99_l2_cache);
676 } else {
677 printk("CPU%d: L2CR was %lx\n", cpu, _get_L2CR());
678 _set_L2CR(0);
679 _set_L2CR(core99_l2_cache);
680 printk("CPU%d: L2CR set to %lx\n", cpu, core99_l2_cache);
681 }
682
683 if (!cpu_has_feature(CPU_FTR_L3CR))
684 return;
685
686 if (cpu == 0){
687 core99_l3_cache = _get_L3CR();
688 printk("CPU0: L3CR is %lx\n", core99_l3_cache);
689 } else {
690 printk("CPU%d: L3CR was %lx\n", cpu, _get_L3CR());
691 _set_L3CR(0);
692 _set_L3CR(core99_l3_cache);
693 printk("CPU%d: L3CR set to %lx\n", cpu, core99_l3_cache);
694 }
695}
696
697static void __init smp_core99_setup(int ncpus)
698{
699 struct device_node *cpu;
700 u32 *tbprop = NULL;
701 int i;
702
703 core99_tb_gpio = KL_GPIO_TB_ENABLE; /* default value */
704 cpu = of_find_node_by_type(NULL, "cpu");
705 if (cpu != NULL) {
706 tbprop = (u32 *)get_property(cpu, "timebase-enable", NULL);
707 if (tbprop)
708 core99_tb_gpio = *tbprop;
709 of_node_put(cpu);
710 }
711
712 /* XXX should get this from reg properties */
713 for (i = 1; i < ncpus; ++i)
714 smp_hw_index[i] = i;
715 powersave_nap = 0;
716}
717#endif
718
719static int __init smp_core99_probe(void)
720{
721 struct device_node *cpus;
722 int ncpus = 0;
723
724 if (ppc_md.progress) ppc_md.progress("smp_core99_probe", 0x345);
725
726 /* Count CPUs in the device-tree */
727 for (cpus = NULL; (cpus = of_find_node_by_type(cpus, "cpu")) != NULL;)
728 ++ncpus;
729
730 printk(KERN_INFO "PowerMac SMP probe found %d cpus\n", ncpus);
731
732 /* Nothing more to do if less than 2 of them */
733 if (ncpus <= 1)
734 return 1;
735
736 smp_core99_setup(ncpus);
737 mpic_request_ipis();
738 core99_init_caches(0);
739
740 return ncpus;
741}
742
743static void __devinit smp_core99_kick_cpu(int nr)
744{
745 unsigned int save_vector;
746 unsigned long new_vector;
747 unsigned long flags;
748 volatile unsigned int *vector
749 = ((volatile unsigned int *)(KERNELBASE+0x100));
750
751 if (nr < 0 || nr > 3)
752 return;
753 if (ppc_md.progress) ppc_md.progress("smp_core99_kick_cpu", 0x346);
754
755 local_irq_save(flags);
756 local_irq_disable();
757
758 /* Save reset vector */
759 save_vector = *vector;
760
761 /* Setup fake reset vector that does
762 * b __secondary_start_pmac_0 + nr*8 - KERNELBASE
763 */
764 new_vector = (unsigned long) __secondary_start_pmac_0 + nr * 8;
765 *vector = 0x48000002 + new_vector - KERNELBASE;
766
767 /* flush data cache and inval instruction cache */
768 flush_icache_range((unsigned long) vector, (unsigned long) vector + 4);
769
770 /* Put some life in our friend */
771 pmac_call_feature(PMAC_FTR_RESET_CPU, NULL, nr, 0);
772
773 /* FIXME: We wait a bit for the CPU to take the exception, I should
774 * instead wait for the entry code to set something for me. Well,
775 * ideally, all that crap will be done in prom.c and the CPU left
776 * in a RAM-based wait loop like CHRP.
777 */
778 mdelay(1);
779
780 /* Restore our exception vector */
781 *vector = save_vector;
782 flush_icache_range((unsigned long) vector, (unsigned long) vector + 4);
783
784 local_irq_restore(flags);
785 if (ppc_md.progress) ppc_md.progress("smp_core99_kick_cpu done", 0x347);
786}
787
788static void __devinit smp_core99_setup_cpu(int cpu_nr)
789{
790 /* Setup L2/L3 */
791 if (cpu_nr != 0)
792 core99_init_caches(cpu_nr);
793
794 /* Setup openpic */
795 mpic_setup_this_cpu();
796
797 if (cpu_nr == 0) {
798#ifdef CONFIG_POWER4
799 extern void g5_phy_disable_cpu1(void);
800
801 /* If we didn't start the second CPU, we must take
802 * it off the bus
803 */
804 if (machine_is_compatible("MacRISC4") &&
805 num_online_cpus() < 2)
806 g5_phy_disable_cpu1();
807#endif /* CONFIG_POWER4 */
808 if (ppc_md.progress) ppc_md.progress("core99_setup_cpu 0 done", 0x349);
809 }
810}
811
812
813/* Core99 Macs (dual G4s and G5s) */
814struct smp_ops_t core99_smp_ops = {
815 .message_pass = smp_mpic_message_pass,
816 .probe = smp_core99_probe,
817 .kick_cpu = smp_core99_kick_cpu,
818 .setup_cpu = smp_core99_setup_cpu,
819 .give_timebase = smp_core99_give_timebase,
820 .take_timebase = smp_core99_take_timebase,
821};
822
823#if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_PPC32)
824
825int __cpu_disable(void)
826{
827 cpu_clear(smp_processor_id(), cpu_online_map);
828
829 /* XXX reset cpu affinity here */
830 mpic_cpu_set_priority(0xf);
831 asm volatile("mtdec %0" : : "r" (0x7fffffff));
832 mb();
833 udelay(20);
834 asm volatile("mtdec %0" : : "r" (0x7fffffff));
835 return 0;
836}
837
838extern void low_cpu_die(void) __attribute__((noreturn)); /* in sleep.S */
839static int cpu_dead[NR_CPUS];
840
841void cpu_die(void)
842{
843 local_irq_disable();
844 cpu_dead[smp_processor_id()] = 1;
845 mb();
846 low_cpu_die();
847}
848
849void __cpu_die(unsigned int cpu)
850{
851 int timeout;
852
853 timeout = 1000;
854 while (!cpu_dead[cpu]) {
855 if (--timeout == 0) {
856 printk("CPU %u refused to die!\n", cpu);
857 break;
858 }
859 msleep(1);
860 }
861 cpu_callin_map[cpu] = 0;
862 cpu_dead[cpu] = 0;
863}
864
865#endif
diff --git a/arch/powerpc/platforms/powermac/time.c b/arch/powerpc/platforms/powermac/time.c
new file mode 100644
index 000000000000..5947b21a8588
--- /dev/null
+++ b/arch/powerpc/platforms/powermac/time.c
@@ -0,0 +1,360 @@
1/*
2 * Support for periodic interrupts (100 per second) and for getting
3 * the current time from the RTC on Power Macintoshes.
4 *
5 * We use the decrementer register for our periodic interrupts.
6 *
7 * Paul Mackerras August 1996.
8 * Copyright (C) 1996 Paul Mackerras.
9 * Copyright (C) 2003-2005 Benjamin Herrenschmidt.
10 *
11 */
12#include <linux/config.h>
13#include <linux/errno.h>
14#include <linux/sched.h>
15#include <linux/kernel.h>
16#include <linux/param.h>
17#include <linux/string.h>
18#include <linux/mm.h>
19#include <linux/init.h>
20#include <linux/time.h>
21#include <linux/adb.h>
22#include <linux/cuda.h>
23#include <linux/pmu.h>
24#include <linux/interrupt.h>
25#include <linux/hardirq.h>
26#include <linux/rtc.h>
27
28#include <asm/sections.h>
29#include <asm/prom.h>
30#include <asm/system.h>
31#include <asm/io.h>
32#include <asm/pgtable.h>
33#include <asm/machdep.h>
34#include <asm/time.h>
35#include <asm/nvram.h>
36#include <asm/smu.h>
37
38#undef DEBUG
39
40#ifdef DEBUG
41#define DBG(x...) printk(x)
42#else
43#define DBG(x...)
44#endif
45
46/* Apparently the RTC stores seconds since 1 Jan 1904 */
47#define RTC_OFFSET 2082844800
48
49/*
50 * Calibrate the decrementer frequency with the VIA timer 1.
51 */
52#define VIA_TIMER_FREQ_6 4700000 /* time 1 frequency * 6 */
53
54/* VIA registers */
55#define RS 0x200 /* skip between registers */
56#define T1CL (4*RS) /* Timer 1 ctr/latch (low 8 bits) */
57#define T1CH (5*RS) /* Timer 1 counter (high 8 bits) */
58#define T1LL (6*RS) /* Timer 1 latch (low 8 bits) */
59#define T1LH (7*RS) /* Timer 1 latch (high 8 bits) */
60#define ACR (11*RS) /* Auxiliary control register */
61#define IFR (13*RS) /* Interrupt flag register */
62
63/* Bits in ACR */
64#define T1MODE 0xc0 /* Timer 1 mode */
65#define T1MODE_CONT 0x40 /* continuous interrupts */
66
67/* Bits in IFR and IER */
68#define T1_INT 0x40 /* Timer 1 interrupt */
69
70long __init pmac_time_init(void)
71{
72 s32 delta = 0;
73#ifdef CONFIG_NVRAM
74 int dst;
75
76 delta = ((s32)pmac_xpram_read(PMAC_XPRAM_MACHINE_LOC + 0x9)) << 16;
77 delta |= ((s32)pmac_xpram_read(PMAC_XPRAM_MACHINE_LOC + 0xa)) << 8;
78 delta |= pmac_xpram_read(PMAC_XPRAM_MACHINE_LOC + 0xb);
79 if (delta & 0x00800000UL)
80 delta |= 0xFF000000UL;
81 dst = ((pmac_xpram_read(PMAC_XPRAM_MACHINE_LOC + 0x8) & 0x80) != 0);
82 printk("GMT Delta read from XPRAM: %d minutes, DST: %s\n", delta/60,
83 dst ? "on" : "off");
84#endif
85 return delta;
86}
87
88static void to_rtc_time(unsigned long now, struct rtc_time *tm)
89{
90 to_tm(now, tm);
91 tm->tm_year -= 1900;
92 tm->tm_mon -= 1;
93}
94
95static unsigned long from_rtc_time(struct rtc_time *tm)
96{
97 return mktime(tm->tm_year+1900, tm->tm_mon+1, tm->tm_mday,
98 tm->tm_hour, tm->tm_min, tm->tm_sec);
99}
100
101#ifdef CONFIG_ADB_CUDA
102static unsigned long cuda_get_time(void)
103{
104 struct adb_request req;
105 unsigned long now;
106
107 if (cuda_request(&req, NULL, 2, CUDA_PACKET, CUDA_GET_TIME) < 0)
108 return 0;
109 while (!req.complete)
110 cuda_poll();
111 if (req.reply_len != 7)
112 printk(KERN_ERR "cuda_get_time: got %d byte reply\n",
113 req.reply_len);
114 now = (req.reply[3] << 24) + (req.reply[4] << 16)
115 + (req.reply[5] << 8) + req.reply[6];
116 return now - RTC_OFFSET;
117}
118
119#define cuda_get_rtc_time(tm) to_rtc_time(cuda_get_time(), (tm))
120
121static int cuda_set_rtc_time(struct rtc_time *tm)
122{
123 unsigned int nowtime;
124 struct adb_request req;
125
126 nowtime = from_rtc_time(tm) + RTC_OFFSET;
127 if (cuda_request(&req, NULL, 6, CUDA_PACKET, CUDA_SET_TIME,
128 nowtime >> 24, nowtime >> 16, nowtime >> 8,
129 nowtime) < 0)
130 return -ENXIO;
131 while (!req.complete)
132 cuda_poll();
133 if ((req.reply_len != 3) && (req.reply_len != 7))
134 printk(KERN_ERR "cuda_set_rtc_time: got %d byte reply\n",
135 req.reply_len);
136 return 0;
137}
138
139#else
140#define cuda_get_time() 0
141#define cuda_get_rtc_time(tm)
142#define cuda_set_rtc_time(tm) 0
143#endif
144
145#ifdef CONFIG_ADB_PMU
146static unsigned long pmu_get_time(void)
147{
148 struct adb_request req;
149 unsigned long now;
150
151 if (pmu_request(&req, NULL, 1, PMU_READ_RTC) < 0)
152 return 0;
153 pmu_wait_complete(&req);
154 if (req.reply_len != 4)
155 printk(KERN_ERR "pmu_get_time: got %d byte reply from PMU\n",
156 req.reply_len);
157 now = (req.reply[0] << 24) + (req.reply[1] << 16)
158 + (req.reply[2] << 8) + req.reply[3];
159 return now - RTC_OFFSET;
160}
161
162#define pmu_get_rtc_time(tm) to_rtc_time(pmu_get_time(), (tm))
163
164static int pmu_set_rtc_time(struct rtc_time *tm)
165{
166 unsigned int nowtime;
167 struct adb_request req;
168
169 nowtime = from_rtc_time(tm) + RTC_OFFSET;
170 if (pmu_request(&req, NULL, 5, PMU_SET_RTC, nowtime >> 24,
171 nowtime >> 16, nowtime >> 8, nowtime) < 0)
172 return -ENXIO;
173 pmu_wait_complete(&req);
174 if (req.reply_len != 0)
175 printk(KERN_ERR "pmu_set_rtc_time: %d byte reply from PMU\n",
176 req.reply_len);
177 return 0;
178}
179
180#else
181#define pmu_get_time() 0
182#define pmu_get_rtc_time(tm)
183#define pmu_set_rtc_time(tm) 0
184#endif
185
186#ifdef CONFIG_PMAC_SMU
187static unsigned long smu_get_time(void)
188{
189 struct rtc_time tm;
190
191 if (smu_get_rtc_time(&tm, 1))
192 return 0;
193 return from_rtc_time(&tm);
194}
195
196#else
197#define smu_get_time() 0
198#define smu_get_rtc_time(tm, spin)
199#define smu_set_rtc_time(tm, spin) 0
200#endif
201
202unsigned long pmac_get_boot_time(void)
203{
204 /* Get the time from the RTC, used only at boot time */
205 switch (sys_ctrler) {
206 case SYS_CTRLER_CUDA:
207 return cuda_get_time();
208 case SYS_CTRLER_PMU:
209 return pmu_get_time();
210 case SYS_CTRLER_SMU:
211 return smu_get_time();
212 default:
213 return 0;
214 }
215}
216
217void pmac_get_rtc_time(struct rtc_time *tm)
218{
219 /* Get the time from the RTC, used only at boot time */
220 switch (sys_ctrler) {
221 case SYS_CTRLER_CUDA:
222 cuda_get_rtc_time(tm);
223 break;
224 case SYS_CTRLER_PMU:
225 pmu_get_rtc_time(tm);
226 break;
227 case SYS_CTRLER_SMU:
228 smu_get_rtc_time(tm, 1);
229 break;
230 default:
231 ;
232 }
233}
234
235int pmac_set_rtc_time(struct rtc_time *tm)
236{
237 switch (sys_ctrler) {
238 case SYS_CTRLER_CUDA:
239 return cuda_set_rtc_time(tm);
240 case SYS_CTRLER_PMU:
241 return pmu_set_rtc_time(tm);
242 case SYS_CTRLER_SMU:
243 return smu_set_rtc_time(tm, 1);
244 default:
245 return -ENODEV;
246 }
247}
248
249#ifdef CONFIG_PPC32
250/*
251 * Calibrate the decrementer register using VIA timer 1.
252 * This is used both on powermacs and CHRP machines.
253 */
254int __init via_calibrate_decr(void)
255{
256 struct device_node *vias;
257 volatile unsigned char __iomem *via;
258 int count = VIA_TIMER_FREQ_6 / 100;
259 unsigned int dstart, dend;
260
261 vias = find_devices("via-cuda");
262 if (vias == 0)
263 vias = find_devices("via-pmu");
264 if (vias == 0)
265 vias = find_devices("via");
266 if (vias == 0 || vias->n_addrs == 0)
267 return 0;
268 via = ioremap(vias->addrs[0].address, vias->addrs[0].size);
269
270 /* set timer 1 for continuous interrupts */
271 out_8(&via[ACR], (via[ACR] & ~T1MODE) | T1MODE_CONT);
272 /* set the counter to a small value */
273 out_8(&via[T1CH], 2);
274 /* set the latch to `count' */
275 out_8(&via[T1LL], count);
276 out_8(&via[T1LH], count >> 8);
277 /* wait until it hits 0 */
278 while ((in_8(&via[IFR]) & T1_INT) == 0)
279 ;
280 dstart = get_dec();
281 /* clear the interrupt & wait until it hits 0 again */
282 in_8(&via[T1CL]);
283 while ((in_8(&via[IFR]) & T1_INT) == 0)
284 ;
285 dend = get_dec();
286
287 ppc_tb_freq = (dstart - dend) * 100 / 6;
288
289 iounmap(via);
290
291 return 1;
292}
293#endif
294
295#ifdef CONFIG_PM
296/*
297 * Reset the time after a sleep.
298 */
299static int
300time_sleep_notify(struct pmu_sleep_notifier *self, int when)
301{
302 static unsigned long time_diff;
303 unsigned long flags;
304 unsigned long seq;
305 struct timespec tv;
306
307 switch (when) {
308 case PBOOK_SLEEP_NOW:
309 do {
310 seq = read_seqbegin_irqsave(&xtime_lock, flags);
311 time_diff = xtime.tv_sec - pmac_get_boot_time();
312 } while (read_seqretry_irqrestore(&xtime_lock, seq, flags));
313 break;
314 case PBOOK_WAKE:
315 tv.tv_sec = pmac_get_boot_time() + time_diff;
316 tv.tv_nsec = 0;
317 do_settimeofday(&tv);
318 break;
319 }
320 return PBOOK_SLEEP_OK;
321}
322
323static struct pmu_sleep_notifier time_sleep_notifier = {
324 time_sleep_notify, SLEEP_LEVEL_MISC,
325};
326#endif /* CONFIG_PM */
327
328/*
329 * Query the OF and get the decr frequency.
330 */
331void __init pmac_calibrate_decr(void)
332{
333#ifdef CONFIG_PM
334 /* XXX why here? */
335 pmu_register_sleep_notifier(&time_sleep_notifier);
336#endif /* CONFIG_PM */
337
338 generic_calibrate_decr();
339
340#ifdef CONFIG_PPC32
341 /* We assume MacRISC2 machines have correct device-tree
342 * calibration. That's better since the VIA itself seems
343 * to be slightly off. --BenH
344 */
345 if (!machine_is_compatible("MacRISC2") &&
346 !machine_is_compatible("MacRISC3") &&
347 !machine_is_compatible("MacRISC4"))
348 if (via_calibrate_decr())
349 return;
350
351 /* Special case: QuickSilver G4s seem to have a badly calibrated
352 * timebase-frequency in OF, VIA is much better on these. We should
353 * probably implement calibration based on the KL timer on these
354 * machines anyway... -BenH
355 */
356 if (machine_is_compatible("PowerMac3,5"))
357 if (via_calibrate_decr())
358 return;
359#endif
360}
diff --git a/arch/powerpc/platforms/prep/Kconfig b/arch/powerpc/platforms/prep/Kconfig
new file mode 100644
index 000000000000..673ac47a1626
--- /dev/null
+++ b/arch/powerpc/platforms/prep/Kconfig
@@ -0,0 +1,22 @@
1
2config PREP_RESIDUAL
3 bool "Support for PReP Residual Data"
4 depends on PPC_PREP
5 help
6 Some PReP systems have residual data passed to the kernel by the
7 firmware. This allows detection of memory size, devices present and
8 other useful pieces of information. Sometimes this information is
9 not present or incorrect, in which case it could lead to the machine
10 behaving incorrectly. If this happens, either disable PREP_RESIDUAL
11 or pass the 'noresidual' option to the kernel.
12
13 If you are running a PReP system, say Y here, otherwise say N.
14
15config PROC_PREPRESIDUAL
16 bool "Support for reading of PReP Residual Data in /proc"
17 depends on PREP_RESIDUAL && PROC_FS
18 help
19 Enabling this option will create a /proc/residual file which allows
20 you to get at the residual data on PReP systems. You will need a tool
21 (lsresidual) to parse it. If you aren't on a PReP system, you don't
22 want this.
diff --git a/arch/powerpc/platforms/pseries/Kconfig b/arch/powerpc/platforms/pseries/Kconfig
new file mode 100644
index 000000000000..2d57f588151d
--- /dev/null
+++ b/arch/powerpc/platforms/pseries/Kconfig
@@ -0,0 +1,42 @@
1
2config PPC_SPLPAR
3 depends on PPC_PSERIES
4 bool "Support for shared-processor logical partitions"
5 default n
6 help
7 Enabling this option will make the kernel run more efficiently
8 on logically-partitioned pSeries systems which use shared
9 processors, that is, which share physical processors between
10 two or more partitions.
11
12config HMT
13 bool "Hardware multithreading"
14 depends on SMP && PPC_PSERIES && BROKEN
15 help
16 This option enables hardware multithreading on RS64 cpus.
17 pSeries systems p620 and p660 have such a cpu type.
18
19config EEH
20 bool "PCI Extended Error Handling (EEH)" if EMBEDDED
21 depends on PPC_PSERIES
22 default y if !EMBEDDED
23
24config RTAS_PROC
25 bool "Proc interface to RTAS"
26 depends on PPC_RTAS
27 default y
28
29config RTAS_FLASH
30 tristate "Firmware flash interface"
31 depends on PPC64 && RTAS_PROC
32
33config SCANLOG
34 tristate "Scanlog dump interface"
35 depends on RTAS_PROC && PPC_PSERIES
36
37config LPARCFG
38 tristate "LPAR Configuration Data"
39 depends on PPC_PSERIES || PPC_ISERIES
40 help
41 Provide system capacity information via human readable
42 <key word>=<value> pairs through a /proc/ppc64/lparcfg interface.
diff --git a/arch/powerpc/platforms/pseries/Makefile b/arch/powerpc/platforms/pseries/Makefile
new file mode 100644
index 000000000000..5ef494e3a70f
--- /dev/null
+++ b/arch/powerpc/platforms/pseries/Makefile
@@ -0,0 +1,5 @@
1obj-y := pci.o lpar.o hvCall.o nvram.o reconfig.o \
2 setup.o iommu.o rtas-fw.o ras.o
3obj-$(CONFIG_SMP) += smp.o
4obj-$(CONFIG_IBMVIO) += vio.o
5obj-$(CONFIG_XICS) += xics.o
diff --git a/arch/ppc64/kernel/pSeries_hvCall.S b/arch/powerpc/platforms/pseries/hvCall.S
index 176e8da76466..176e8da76466 100644
--- a/arch/ppc64/kernel/pSeries_hvCall.S
+++ b/arch/powerpc/platforms/pseries/hvCall.S
diff --git a/arch/ppc64/kernel/pSeries_iommu.c b/arch/powerpc/platforms/pseries/iommu.c
index d17f0108a032..9e90d41131d8 100644
--- a/arch/ppc64/kernel/pSeries_iommu.c
+++ b/arch/powerpc/platforms/pseries/iommu.c
@@ -46,7 +46,8 @@
46#include <asm/pSeries_reconfig.h> 46#include <asm/pSeries_reconfig.h>
47#include <asm/systemcfg.h> 47#include <asm/systemcfg.h>
48#include <asm/firmware.h> 48#include <asm/firmware.h>
49#include "pci.h" 49#include <asm/tce.h>
50#include <asm/ppc-pci.h>
50 51
51#define DBG(fmt...) 52#define DBG(fmt...)
52 53
@@ -59,6 +60,9 @@ static void tce_build_pSeries(struct iommu_table *tbl, long index,
59 union tce_entry t; 60 union tce_entry t;
60 union tce_entry *tp; 61 union tce_entry *tp;
61 62
63 index <<= TCE_PAGE_FACTOR;
64 npages <<= TCE_PAGE_FACTOR;
65
62 t.te_word = 0; 66 t.te_word = 0;
63 t.te_rdwr = 1; // Read allowed 67 t.te_rdwr = 1; // Read allowed
64 68
@@ -69,11 +73,11 @@ static void tce_build_pSeries(struct iommu_table *tbl, long index,
69 73
70 while (npages--) { 74 while (npages--) {
71 /* can't move this out since we might cross LMB boundary */ 75 /* can't move this out since we might cross LMB boundary */
72 t.te_rpn = (virt_to_abs(uaddr)) >> PAGE_SHIFT; 76 t.te_rpn = (virt_to_abs(uaddr)) >> TCE_SHIFT;
73 77
74 tp->te_word = t.te_word; 78 tp->te_word = t.te_word;
75 79
76 uaddr += PAGE_SIZE; 80 uaddr += TCE_PAGE_SIZE;
77 tp++; 81 tp++;
78 } 82 }
79} 83}
@@ -84,6 +88,9 @@ static void tce_free_pSeries(struct iommu_table *tbl, long index, long npages)
84 union tce_entry t; 88 union tce_entry t;
85 union tce_entry *tp; 89 union tce_entry *tp;
86 90
91 npages <<= TCE_PAGE_FACTOR;
92 index <<= TCE_PAGE_FACTOR;
93
87 t.te_word = 0; 94 t.te_word = 0;
88 tp = ((union tce_entry *)tbl->it_base) + index; 95 tp = ((union tce_entry *)tbl->it_base) + index;
89 96
@@ -103,7 +110,7 @@ static void tce_build_pSeriesLP(struct iommu_table *tbl, long tcenum,
103 union tce_entry tce; 110 union tce_entry tce;
104 111
105 tce.te_word = 0; 112 tce.te_word = 0;
106 tce.te_rpn = (virt_to_abs(uaddr)) >> PAGE_SHIFT; 113 tce.te_rpn = (virt_to_abs(uaddr)) >> TCE_SHIFT;
107 tce.te_rdwr = 1; 114 tce.te_rdwr = 1;
108 if (direction != DMA_TO_DEVICE) 115 if (direction != DMA_TO_DEVICE)
109 tce.te_pciwr = 1; 116 tce.te_pciwr = 1;
@@ -136,6 +143,9 @@ static void tce_buildmulti_pSeriesLP(struct iommu_table *tbl, long tcenum,
136 union tce_entry tce, *tcep; 143 union tce_entry tce, *tcep;
137 long l, limit; 144 long l, limit;
138 145
146 tcenum <<= TCE_PAGE_FACTOR;
147 npages <<= TCE_PAGE_FACTOR;
148
139 if (npages == 1) 149 if (npages == 1)
140 return tce_build_pSeriesLP(tbl, tcenum, npages, uaddr, 150 return tce_build_pSeriesLP(tbl, tcenum, npages, uaddr,
141 direction); 151 direction);
@@ -155,7 +165,7 @@ static void tce_buildmulti_pSeriesLP(struct iommu_table *tbl, long tcenum,
155 } 165 }
156 166
157 tce.te_word = 0; 167 tce.te_word = 0;
158 tce.te_rpn = (virt_to_abs(uaddr)) >> PAGE_SHIFT; 168 tce.te_rpn = (virt_to_abs(uaddr)) >> TCE_SHIFT;
159 tce.te_rdwr = 1; 169 tce.te_rdwr = 1;
160 if (direction != DMA_TO_DEVICE) 170 if (direction != DMA_TO_DEVICE)
161 tce.te_pciwr = 1; 171 tce.te_pciwr = 1;
@@ -166,7 +176,7 @@ static void tce_buildmulti_pSeriesLP(struct iommu_table *tbl, long tcenum,
166 * Set up the page with TCE data, looping through and setting 176 * Set up the page with TCE data, looping through and setting
167 * the values. 177 * the values.
168 */ 178 */
169 limit = min_t(long, npages, PAGE_SIZE/sizeof(union tce_entry)); 179 limit = min_t(long, npages, 4096/sizeof(union tce_entry));
170 180
171 for (l = 0; l < limit; l++) { 181 for (l = 0; l < limit; l++) {
172 tcep[l] = tce; 182 tcep[l] = tce;
@@ -196,6 +206,9 @@ static void tce_free_pSeriesLP(struct iommu_table *tbl, long tcenum, long npages
196 u64 rc; 206 u64 rc;
197 union tce_entry tce; 207 union tce_entry tce;
198 208
209 tcenum <<= TCE_PAGE_FACTOR;
210 npages <<= TCE_PAGE_FACTOR;
211
199 tce.te_word = 0; 212 tce.te_word = 0;
200 213
201 while (npages--) { 214 while (npages--) {
@@ -221,6 +234,9 @@ static void tce_freemulti_pSeriesLP(struct iommu_table *tbl, long tcenum, long n
221 u64 rc; 234 u64 rc;
222 union tce_entry tce; 235 union tce_entry tce;
223 236
237 tcenum <<= TCE_PAGE_FACTOR;
238 npages <<= TCE_PAGE_FACTOR;
239
224 tce.te_word = 0; 240 tce.te_word = 0;
225 241
226 rc = plpar_tce_stuff((u64)tbl->it_index, 242 rc = plpar_tce_stuff((u64)tbl->it_index,
diff --git a/arch/ppc64/kernel/pSeries_lpar.c b/arch/powerpc/platforms/pseries/lpar.c
index a6de83f2078f..268d8362dde7 100644
--- a/arch/ppc64/kernel/pSeries_lpar.c
+++ b/arch/powerpc/platforms/pseries/lpar.c
@@ -486,8 +486,7 @@ static void pSeries_lpar_hpte_invalidate(unsigned long slot, unsigned long va,
486 * Take a spinlock around flushes to avoid bouncing the hypervisor tlbie 486 * Take a spinlock around flushes to avoid bouncing the hypervisor tlbie
487 * lock. 487 * lock.
488 */ 488 */
489void pSeries_lpar_flush_hash_range(unsigned long context, unsigned long number, 489void pSeries_lpar_flush_hash_range(unsigned long number, int local)
490 int local)
491{ 490{
492 int i; 491 int i;
493 unsigned long flags = 0; 492 unsigned long flags = 0;
@@ -498,7 +497,7 @@ void pSeries_lpar_flush_hash_range(unsigned long context, unsigned long number,
498 spin_lock_irqsave(&pSeries_lpar_tlbie_lock, flags); 497 spin_lock_irqsave(&pSeries_lpar_tlbie_lock, flags);
499 498
500 for (i = 0; i < number; i++) 499 for (i = 0; i < number; i++)
501 flush_hash_page(context, batch->addr[i], batch->pte[i], local); 500 flush_hash_page(batch->vaddr[i], batch->pte[i], local);
502 501
503 if (lock_tlbie) 502 if (lock_tlbie)
504 spin_unlock_irqrestore(&pSeries_lpar_tlbie_lock, flags); 503 spin_unlock_irqrestore(&pSeries_lpar_tlbie_lock, flags);
diff --git a/arch/ppc64/kernel/pSeries_nvram.c b/arch/powerpc/platforms/pseries/nvram.c
index 18abfb1f4e24..18abfb1f4e24 100644
--- a/arch/ppc64/kernel/pSeries_nvram.c
+++ b/arch/powerpc/platforms/pseries/nvram.c
diff --git a/arch/ppc64/kernel/pSeries_pci.c b/arch/powerpc/platforms/pseries/pci.c
index 928f8febdb3b..c198656a3bb5 100644
--- a/arch/ppc64/kernel/pSeries_pci.c
+++ b/arch/powerpc/platforms/pseries/pci.c
@@ -29,8 +29,7 @@
29 29
30#include <asm/pci-bridge.h> 30#include <asm/pci-bridge.h>
31#include <asm/prom.h> 31#include <asm/prom.h>
32 32#include <asm/ppc-pci.h>
33#include "pci.h"
34 33
35static int __devinitdata s7a_workaround = -1; 34static int __devinitdata s7a_workaround = -1;
36 35
diff --git a/arch/ppc64/kernel/ras.c b/arch/powerpc/platforms/pseries/ras.c
index 41b97dc9cc0a..6562ff4b0a82 100644
--- a/arch/ppc64/kernel/ras.c
+++ b/arch/powerpc/platforms/pseries/ras.c
@@ -1,17 +1,16 @@
1/* 1/*
2 * ras.c
3 * Copyright (C) 2001 Dave Engebretsen IBM Corporation 2 * Copyright (C) 2001 Dave Engebretsen IBM Corporation
4 * 3 *
5 * This program is free software; you can redistribute it and/or modify 4 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by 5 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or 6 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version. 7 * (at your option) any later version.
9 * 8 *
10 * This program is distributed in the hope that it will be useful, 9 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details. 12 * GNU General Public License for more details.
14 * 13 *
15 * You should have received a copy of the GNU General Public License 14 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software 15 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
@@ -19,7 +18,7 @@
19 18
20/* Change Activity: 19/* Change Activity:
21 * 2001/09/21 : engebret : Created with minimal EPOW and HW exception support. 20 * 2001/09/21 : engebret : Created with minimal EPOW and HW exception support.
22 * End Change Activity 21 * End Change Activity
23 */ 22 */
24 23
25#include <linux/errno.h> 24#include <linux/errno.h>
@@ -323,7 +322,7 @@ static int recover_mce(struct pt_regs *regs, struct rtas_error_log * err)
323 nonfatal = 1; 322 nonfatal = 1;
324 } 323 }
325 324
326 log_error((char *)err, ERR_TYPE_RTAS_LOG, !nonfatal); 325 log_error((char *)err, ERR_TYPE_RTAS_LOG, !nonfatal);
327 326
328 return nonfatal; 327 return nonfatal;
329} 328}
diff --git a/arch/ppc64/kernel/pSeries_reconfig.c b/arch/powerpc/platforms/pseries/reconfig.c
index 58c61219d08e..58c61219d08e 100644
--- a/arch/ppc64/kernel/pSeries_reconfig.c
+++ b/arch/powerpc/platforms/pseries/reconfig.c
diff --git a/arch/powerpc/platforms/pseries/rtas-fw.c b/arch/powerpc/platforms/pseries/rtas-fw.c
new file mode 100644
index 000000000000..15d81d758ca0
--- /dev/null
+++ b/arch/powerpc/platforms/pseries/rtas-fw.c
@@ -0,0 +1,138 @@
1/*
2 *
3 * Procedures for firmware flash updates on pSeries systems.
4 *
5 * Peter Bergner, IBM March 2001.
6 * Copyright (C) 2001 IBM.
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
12 */
13
14#include <stdarg.h>
15#include <linux/kernel.h>
16#include <linux/types.h>
17#include <linux/spinlock.h>
18#include <linux/module.h>
19#include <linux/init.h>
20
21#include <asm/prom.h>
22#include <asm/rtas.h>
23#include <asm/semaphore.h>
24#include <asm/machdep.h>
25#include <asm/page.h>
26#include <asm/param.h>
27#include <asm/system.h>
28#include <asm/abs_addr.h>
29#include <asm/udbg.h>
30#include <asm/delay.h>
31#include <asm/uaccess.h>
32#include <asm/systemcfg.h>
33
34#include "rtas-fw.h"
35
36struct flash_block_list_header rtas_firmware_flash_list = {0, NULL};
37
38#define FLASH_BLOCK_LIST_VERSION (1UL)
39
40static void rtas_flash_firmware(void)
41{
42 unsigned long image_size;
43 struct flash_block_list *f, *next, *flist;
44 unsigned long rtas_block_list;
45 int i, status, update_token;
46
47 update_token = rtas_token("ibm,update-flash-64-and-reboot");
48 if (update_token == RTAS_UNKNOWN_SERVICE) {
49 printk(KERN_ALERT "FLASH: ibm,update-flash-64-and-reboot is not available -- not a service partition?\n");
50 printk(KERN_ALERT "FLASH: firmware will not be flashed\n");
51 return;
52 }
53
54 /* NOTE: the "first" block list is a global var with no data
55 * blocks in the kernel data segment. We do this because
56 * we want to ensure this block_list addr is under 4GB.
57 */
58 rtas_firmware_flash_list.num_blocks = 0;
59 flist = (struct flash_block_list *)&rtas_firmware_flash_list;
60 rtas_block_list = virt_to_abs(flist);
61 if (rtas_block_list >= 4UL*1024*1024*1024) {
62 printk(KERN_ALERT "FLASH: kernel bug...flash list header addr above 4GB\n");
63 return;
64 }
65
66 printk(KERN_ALERT "FLASH: preparing saved firmware image for flash\n");
67 /* Update the block_list in place. */
68 image_size = 0;
69 for (f = flist; f; f = next) {
70 /* Translate data addrs to absolute */
71 for (i = 0; i < f->num_blocks; i++) {
72 f->blocks[i].data = (char *)virt_to_abs(f->blocks[i].data);
73 image_size += f->blocks[i].length;
74 }
75 next = f->next;
76 /* Don't translate NULL pointer for last entry */
77 if (f->next)
78 f->next = (struct flash_block_list *)virt_to_abs(f->next);
79 else
80 f->next = NULL;
81 /* make num_blocks into the version/length field */
82 f->num_blocks = (FLASH_BLOCK_LIST_VERSION << 56) | ((f->num_blocks+1)*16);
83 }
84
85 printk(KERN_ALERT "FLASH: flash image is %ld bytes\n", image_size);
86 printk(KERN_ALERT "FLASH: performing flash and reboot\n");
87 rtas_progress("Flashing \n", 0x0);
88 rtas_progress("Please Wait... ", 0x0);
89 printk(KERN_ALERT "FLASH: this will take several minutes. Do not power off!\n");
90 status = rtas_call(update_token, 1, 1, NULL, rtas_block_list);
91 switch (status) { /* should only get "bad" status */
92 case 0:
93 printk(KERN_ALERT "FLASH: success\n");
94 break;
95 case -1:
96 printk(KERN_ALERT "FLASH: hardware error. Firmware may not be not flashed\n");
97 break;
98 case -3:
99 printk(KERN_ALERT "FLASH: image is corrupt or not correct for this platform. Firmware not flashed\n");
100 break;
101 case -4:
102 printk(KERN_ALERT "FLASH: flash failed when partially complete. System may not reboot\n");
103 break;
104 default:
105 printk(KERN_ALERT "FLASH: unknown flash return code %d\n", status);
106 break;
107 }
108}
109
110void rtas_flash_bypass_warning(void)
111{
112 printk(KERN_ALERT "FLASH: firmware flash requires a reboot\n");
113 printk(KERN_ALERT "FLASH: the firmware image will NOT be flashed\n");
114}
115
116
117void rtas_fw_restart(char *cmd)
118{
119 if (rtas_firmware_flash_list.next)
120 rtas_flash_firmware();
121 rtas_restart(cmd);
122}
123
124void rtas_fw_power_off(void)
125{
126 if (rtas_firmware_flash_list.next)
127 rtas_flash_bypass_warning();
128 rtas_power_off();
129}
130
131void rtas_fw_halt(void)
132{
133 if (rtas_firmware_flash_list.next)
134 rtas_flash_bypass_warning();
135 rtas_halt();
136}
137
138EXPORT_SYMBOL(rtas_firmware_flash_list);
diff --git a/arch/powerpc/platforms/pseries/rtas-fw.h b/arch/powerpc/platforms/pseries/rtas-fw.h
new file mode 100644
index 000000000000..e70fa69974a3
--- /dev/null
+++ b/arch/powerpc/platforms/pseries/rtas-fw.h
@@ -0,0 +1,3 @@
1void rtas_fw_restart(char *cmd);
2void rtas_fw_power_off(void);
3void rtas_fw_halt(void);
diff --git a/arch/ppc64/kernel/pSeries_setup.c b/arch/powerpc/platforms/pseries/setup.c
index 3009701eb90d..10cb0f2d9b5b 100644
--- a/arch/ppc64/kernel/pSeries_setup.c
+++ b/arch/powerpc/platforms/pseries/setup.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * linux/arch/ppc/kernel/setup.c 2 * 64-bit pSeries and RS/6000 setup code.
3 * 3 *
4 * Copyright (C) 1995 Linus Torvalds 4 * Copyright (C) 1995 Linus Torvalds
5 * Adapted from 'alpha' version by Gary Thomas 5 * Adapted from 'alpha' version by Gary Thomas
@@ -59,13 +59,15 @@
59#include <asm/time.h> 59#include <asm/time.h>
60#include <asm/nvram.h> 60#include <asm/nvram.h>
61#include <asm/plpar_wrappers.h> 61#include <asm/plpar_wrappers.h>
62#include <asm/xics.h> 62#include "xics.h"
63#include <asm/firmware.h> 63#include <asm/firmware.h>
64#include <asm/pmc.h> 64#include <asm/pmc.h>
65#include <asm/mpic.h>
66#include <asm/ppc-pci.h>
67#include <asm/i8259.h>
68#include <asm/udbg.h>
65 69
66#include "i8259.h" 70#include "rtas-fw.h"
67#include "mpic.h"
68#include "pci.h"
69 71
70#ifdef DEBUG 72#ifdef DEBUG
71#define DBG(fmt...) udbg_printf(fmt) 73#define DBG(fmt...) udbg_printf(fmt)
@@ -84,13 +86,12 @@ int fwnmi_active; /* TRUE if an FWNMI handler is present */
84extern void pSeries_system_reset_exception(struct pt_regs *regs); 86extern void pSeries_system_reset_exception(struct pt_regs *regs);
85extern int pSeries_machine_check_exception(struct pt_regs *regs); 87extern int pSeries_machine_check_exception(struct pt_regs *regs);
86 88
87static int pseries_shared_idle(void); 89static void pseries_shared_idle(void);
88static int pseries_dedicated_idle(void); 90static void pseries_dedicated_idle(void);
89 91
90static volatile void __iomem * chrp_int_ack_special;
91struct mpic *pSeries_mpic; 92struct mpic *pSeries_mpic;
92 93
93void pSeries_get_cpuinfo(struct seq_file *m) 94void pSeries_show_cpuinfo(struct seq_file *m)
94{ 95{
95 struct device_node *root; 96 struct device_node *root;
96 const char *model = ""; 97 const char *model = "";
@@ -119,19 +120,11 @@ static void __init fwnmi_init(void)
119 fwnmi_active = 1; 120 fwnmi_active = 1;
120} 121}
121 122
122static int pSeries_irq_cascade(struct pt_regs *regs, void *data)
123{
124 if (chrp_int_ack_special)
125 return readb(chrp_int_ack_special);
126 else
127 return i8259_irq(smp_processor_id());
128}
129
130static void __init pSeries_init_mpic(void) 123static void __init pSeries_init_mpic(void)
131{ 124{
132 unsigned int *addrp; 125 unsigned int *addrp;
133 struct device_node *np; 126 struct device_node *np;
134 int i; 127 unsigned long intack = 0;
135 128
136 /* All ISUs are setup, complete initialization */ 129 /* All ISUs are setup, complete initialization */
137 mpic_init(pSeries_mpic); 130 mpic_init(pSeries_mpic);
@@ -142,16 +135,14 @@ static void __init pSeries_init_mpic(void)
142 get_property(np, "8259-interrupt-acknowledge", NULL))) 135 get_property(np, "8259-interrupt-acknowledge", NULL)))
143 printk(KERN_ERR "Cannot find pci to get ack address\n"); 136 printk(KERN_ERR "Cannot find pci to get ack address\n");
144 else 137 else
145 chrp_int_ack_special = ioremap(addrp[prom_n_addr_cells(np)-1], 1); 138 intack = addrp[prom_n_addr_cells(np)-1];
146 of_node_put(np); 139 of_node_put(np);
147 140
148 /* Setup the legacy interrupts & controller */ 141 /* Setup the legacy interrupts & controller */
149 for (i = 0; i < NUM_ISA_INTERRUPTS; i++) 142 i8259_init(intack, 0);
150 irq_desc[i].handler = &i8259_pic;
151 i8259_init(0);
152 143
153 /* Hook cascade to mpic */ 144 /* Hook cascade to mpic */
154 mpic_setup_cascade(NUM_ISA_INTERRUPTS, pSeries_irq_cascade, NULL); 145 mpic_setup_cascade(NUM_ISA_INTERRUPTS, i8259_irq_cascade, NULL);
155} 146}
156 147
157static void __init pSeries_setup_mpic(void) 148static void __init pSeries_setup_mpic(void)
@@ -241,10 +232,6 @@ static void __init pSeries_setup_arch(void)
241 find_and_init_phbs(); 232 find_and_init_phbs();
242 eeh_init(); 233 eeh_init();
243 234
244#ifdef CONFIG_DUMMY_CONSOLE
245 conswitchp = &dummy_con;
246#endif
247
248 pSeries_nvram_init(); 235 pSeries_nvram_init();
249 236
250 /* Choose an idle loop */ 237 /* Choose an idle loop */
@@ -488,8 +475,8 @@ static inline void dedicated_idle_sleep(unsigned int cpu)
488 } 475 }
489} 476}
490 477
491static int pseries_dedicated_idle(void) 478static void pseries_dedicated_idle(void)
492{ 479{
493 long oldval; 480 long oldval;
494 struct paca_struct *lpaca = get_paca(); 481 struct paca_struct *lpaca = get_paca();
495 unsigned int cpu = smp_processor_id(); 482 unsigned int cpu = smp_processor_id();
@@ -544,7 +531,7 @@ static int pseries_dedicated_idle(void)
544 } 531 }
545} 532}
546 533
547static int pseries_shared_idle(void) 534static void pseries_shared_idle(void)
548{ 535{
549 struct paca_struct *lpaca = get_paca(); 536 struct paca_struct *lpaca = get_paca();
550 unsigned int cpu = smp_processor_id(); 537 unsigned int cpu = smp_processor_id();
@@ -586,8 +573,6 @@ static int pseries_shared_idle(void)
586 if (cpu_is_offline(cpu) && system_state == SYSTEM_RUNNING) 573 if (cpu_is_offline(cpu) && system_state == SYSTEM_RUNNING)
587 cpu_die(); 574 cpu_die();
588 } 575 }
589
590 return 0;
591} 576}
592 577
593static int pSeries_pci_probe_mode(struct pci_bus *bus) 578static int pSeries_pci_probe_mode(struct pci_bus *bus)
@@ -601,14 +586,14 @@ struct machdep_calls __initdata pSeries_md = {
601 .probe = pSeries_probe, 586 .probe = pSeries_probe,
602 .setup_arch = pSeries_setup_arch, 587 .setup_arch = pSeries_setup_arch,
603 .init_early = pSeries_init_early, 588 .init_early = pSeries_init_early,
604 .get_cpuinfo = pSeries_get_cpuinfo, 589 .show_cpuinfo = pSeries_show_cpuinfo,
605 .log_error = pSeries_log_error, 590 .log_error = pSeries_log_error,
606 .pcibios_fixup = pSeries_final_fixup, 591 .pcibios_fixup = pSeries_final_fixup,
607 .pci_probe_mode = pSeries_pci_probe_mode, 592 .pci_probe_mode = pSeries_pci_probe_mode,
608 .irq_bus_setup = pSeries_irq_bus_setup, 593 .irq_bus_setup = pSeries_irq_bus_setup,
609 .restart = rtas_restart, 594 .restart = rtas_fw_restart,
610 .power_off = rtas_power_off, 595 .power_off = rtas_fw_power_off,
611 .halt = rtas_halt, 596 .halt = rtas_fw_halt,
612 .panic = rtas_os_term, 597 .panic = rtas_os_term,
613 .cpu_die = pSeries_mach_cpu_die, 598 .cpu_die = pSeries_mach_cpu_die,
614 .get_boot_time = rtas_get_boot_time, 599 .get_boot_time = rtas_get_boot_time,
diff --git a/arch/ppc64/kernel/pSeries_smp.c b/arch/powerpc/platforms/pseries/smp.c
index d2c7e2c4733b..9c9458ddfc25 100644
--- a/arch/ppc64/kernel/pSeries_smp.c
+++ b/arch/powerpc/platforms/pseries/smp.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * SMP support for pSeries and BPA machines. 2 * SMP support for pSeries machines.
3 * 3 *
4 * Dave Engebretsen, Peter Bergner, and 4 * Dave Engebretsen, Peter Bergner, and
5 * Mike Corrigan {engebret|bergner|mikec}@us.ibm.com 5 * Mike Corrigan {engebret|bergner|mikec}@us.ibm.com
@@ -39,16 +39,14 @@
39#include <asm/paca.h> 39#include <asm/paca.h>
40#include <asm/time.h> 40#include <asm/time.h>
41#include <asm/machdep.h> 41#include <asm/machdep.h>
42#include <asm/xics.h> 42#include "xics.h"
43#include <asm/cputable.h> 43#include <asm/cputable.h>
44#include <asm/firmware.h> 44#include <asm/firmware.h>
45#include <asm/system.h> 45#include <asm/system.h>
46#include <asm/rtas.h> 46#include <asm/rtas.h>
47#include <asm/plpar_wrappers.h> 47#include <asm/plpar_wrappers.h>
48#include <asm/pSeries_reconfig.h> 48#include <asm/pSeries_reconfig.h>
49 49#include <asm/mpic.h>
50#include "mpic.h"
51#include "bpa_iic.h"
52 50
53#ifdef DEBUG 51#ifdef DEBUG
54#define DBG(fmt...) udbg_printf(fmt) 52#define DBG(fmt...) udbg_printf(fmt)
@@ -343,36 +341,6 @@ static void __devinit smp_xics_setup_cpu(int cpu)
343 341
344} 342}
345#endif /* CONFIG_XICS */ 343#endif /* CONFIG_XICS */
346#ifdef CONFIG_BPA_IIC
347static void smp_iic_message_pass(int target, int msg)
348{
349 unsigned int i;
350
351 if (target < NR_CPUS) {
352 iic_cause_IPI(target, msg);
353 } else {
354 for_each_online_cpu(i) {
355 if (target == MSG_ALL_BUT_SELF
356 && i == smp_processor_id())
357 continue;
358 iic_cause_IPI(i, msg);
359 }
360 }
361}
362
363static int __init smp_iic_probe(void)
364{
365 iic_request_IPIs();
366
367 return cpus_weight(cpu_possible_map);
368}
369
370static void __devinit smp_iic_setup_cpu(int cpu)
371{
372 if (cpu != boot_cpuid)
373 iic_setup_cpu();
374}
375#endif /* CONFIG_BPA_IIC */
376 344
377static DEFINE_SPINLOCK(timebase_lock); 345static DEFINE_SPINLOCK(timebase_lock);
378static unsigned long timebase = 0; 346static unsigned long timebase = 0;
@@ -444,15 +412,6 @@ static struct smp_ops_t pSeries_xics_smp_ops = {
444 .cpu_bootable = smp_pSeries_cpu_bootable, 412 .cpu_bootable = smp_pSeries_cpu_bootable,
445}; 413};
446#endif 414#endif
447#ifdef CONFIG_BPA_IIC
448static struct smp_ops_t bpa_iic_smp_ops = {
449 .message_pass = smp_iic_message_pass,
450 .probe = smp_iic_probe,
451 .kick_cpu = smp_pSeries_kick_cpu,
452 .setup_cpu = smp_iic_setup_cpu,
453 .cpu_bootable = smp_pSeries_cpu_bootable,
454};
455#endif
456 415
457/* This is called very early */ 416/* This is called very early */
458void __init smp_init_pSeries(void) 417void __init smp_init_pSeries(void)
@@ -472,11 +431,6 @@ void __init smp_init_pSeries(void)
472 smp_ops = &pSeries_xics_smp_ops; 431 smp_ops = &pSeries_xics_smp_ops;
473 break; 432 break;
474#endif 433#endif
475#ifdef CONFIG_BPA_IIC
476 case IC_BPA_IIC:
477 smp_ops = &bpa_iic_smp_ops;
478 break;
479#endif
480 default: 434 default:
481 panic("Invalid interrupt controller"); 435 panic("Invalid interrupt controller");
482 } 436 }
diff --git a/arch/ppc64/kernel/pSeries_vio.c b/arch/powerpc/platforms/pseries/vio.c
index e0ae06f58f86..866379b80c09 100644
--- a/arch/ppc64/kernel/pSeries_vio.c
+++ b/arch/powerpc/platforms/pseries/vio.c
@@ -22,6 +22,7 @@
22#include <asm/prom.h> 22#include <asm/prom.h>
23#include <asm/vio.h> 23#include <asm/vio.h>
24#include <asm/hvcall.h> 24#include <asm/hvcall.h>
25#include <asm/tce.h>
25 26
26extern struct subsystem devices_subsys; /* needed for vio_find_name() */ 27extern struct subsystem devices_subsys; /* needed for vio_find_name() */
27 28
diff --git a/arch/ppc64/kernel/xics.c b/arch/powerpc/platforms/pseries/xics.c
index daf93885dcfa..c72c86f05cb6 100644
--- a/arch/ppc64/kernel/xics.c
+++ b/arch/powerpc/platforms/pseries/xics.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * arch/ppc64/kernel/xics.c 2 * arch/powerpc/platforms/pseries/xics.c
3 * 3 *
4 * Copyright 2000 IBM Corporation. 4 * Copyright 2000 IBM Corporation.
5 * 5 *
@@ -25,11 +25,11 @@
25#include <asm/pgtable.h> 25#include <asm/pgtable.h>
26#include <asm/smp.h> 26#include <asm/smp.h>
27#include <asm/rtas.h> 27#include <asm/rtas.h>
28#include <asm/xics.h>
29#include <asm/hvcall.h> 28#include <asm/hvcall.h>
30#include <asm/machdep.h> 29#include <asm/machdep.h>
30#include <asm/i8259.h>
31 31
32#include "i8259.h" 32#include "xics.h"
33 33
34static unsigned int xics_startup(unsigned int irq); 34static unsigned int xics_startup(unsigned int irq);
35static void xics_enable_irq(unsigned int irq); 35static void xics_enable_irq(unsigned int irq);
@@ -62,7 +62,7 @@ static struct radix_tree_root irq_map = RADIX_TREE_INIT(GFP_ATOMIC);
62/* Want a priority other than 0. Various HW issues require this. */ 62/* Want a priority other than 0. Various HW issues require this. */
63#define DEFAULT_PRIORITY 5 63#define DEFAULT_PRIORITY 5
64 64
65/* 65/*
66 * Mark IPIs as higher priority so we can take them inside interrupts that 66 * Mark IPIs as higher priority so we can take them inside interrupts that
67 * arent marked SA_INTERRUPT 67 * arent marked SA_INTERRUPT
68 */ 68 */
@@ -169,11 +169,11 @@ static inline long plpar_xirr(unsigned long *xirr_ret)
169static int pSeriesLP_xirr_info_get(int n_cpu) 169static int pSeriesLP_xirr_info_get(int n_cpu)
170{ 170{
171 unsigned long lpar_rc; 171 unsigned long lpar_rc;
172 unsigned long return_value; 172 unsigned long return_value;
173 173
174 lpar_rc = plpar_xirr(&return_value); 174 lpar_rc = plpar_xirr(&return_value);
175 if (lpar_rc != H_Success) 175 if (lpar_rc != H_Success)
176 panic(" bad return code xirr - rc = %lx \n", lpar_rc); 176 panic(" bad return code xirr - rc = %lx \n", lpar_rc);
177 return (int)return_value; 177 return (int)return_value;
178} 178}
179 179
@@ -185,7 +185,7 @@ static void pSeriesLP_xirr_info_set(int n_cpu, int value)
185 lpar_rc = plpar_eoi(val64); 185 lpar_rc = plpar_eoi(val64);
186 if (lpar_rc != H_Success) 186 if (lpar_rc != H_Success)
187 panic("bad return code EOI - rc = %ld, value=%lx\n", lpar_rc, 187 panic("bad return code EOI - rc = %ld, value=%lx\n", lpar_rc,
188 val64); 188 val64);
189} 189}
190 190
191void pSeriesLP_cppr_info(int n_cpu, u8 value) 191void pSeriesLP_cppr_info(int n_cpu, u8 value)
@@ -194,7 +194,7 @@ void pSeriesLP_cppr_info(int n_cpu, u8 value)
194 194
195 lpar_rc = plpar_cppr(value); 195 lpar_rc = plpar_cppr(value);
196 if (lpar_rc != H_Success) 196 if (lpar_rc != H_Success)
197 panic("bad return code cppr - rc = %lx\n", lpar_rc); 197 panic("bad return code cppr - rc = %lx\n", lpar_rc);
198} 198}
199 199
200static void pSeriesLP_qirr_info(int n_cpu , u8 value) 200static void pSeriesLP_qirr_info(int n_cpu , u8 value)
@@ -203,7 +203,7 @@ static void pSeriesLP_qirr_info(int n_cpu , u8 value)
203 203
204 lpar_rc = plpar_ipi(get_hard_smp_processor_id(n_cpu), value); 204 lpar_rc = plpar_ipi(get_hard_smp_processor_id(n_cpu), value);
205 if (lpar_rc != H_Success) 205 if (lpar_rc != H_Success)
206 panic("bad return code qirr - rc = %lx\n", lpar_rc); 206 panic("bad return code qirr - rc = %lx\n", lpar_rc);
207} 207}
208 208
209xics_ops pSeriesLP_ops = { 209xics_ops pSeriesLP_ops = {
@@ -366,7 +366,7 @@ int xics_get_irq(struct pt_regs *regs)
366 366
367 /* for sanity, this had better be < NR_IRQS - 16 */ 367 /* for sanity, this had better be < NR_IRQS - 16 */
368 if (vec == xics_irq_8259_cascade_real) { 368 if (vec == xics_irq_8259_cascade_real) {
369 irq = i8259_irq(cpu); 369 irq = i8259_irq(regs);
370 if (irq == -1) { 370 if (irq == -1) {
371 /* Spurious cascaded interrupt. Still must ack xics */ 371 /* Spurious cascaded interrupt. Still must ack xics */
372 xics_end_irq(irq_offset_up(xics_irq_8259_cascade)); 372 xics_end_irq(irq_offset_up(xics_irq_8259_cascade));
@@ -462,7 +462,7 @@ void xics_init_IRQ(void)
462 struct xics_interrupt_node { 462 struct xics_interrupt_node {
463 unsigned long addr; 463 unsigned long addr;
464 unsigned long size; 464 unsigned long size;
465 } intnodes[NR_CPUS]; 465 } intnodes[NR_CPUS];
466 466
467 ppc64_boot_msg(0x20, "XICS Init"); 467 ppc64_boot_msg(0x20, "XICS Init");
468 468
@@ -487,7 +487,7 @@ nextnode:
487 ireg = (uint *)get_property(np, "reg", &ilen); 487 ireg = (uint *)get_property(np, "reg", &ilen);
488 if (!ireg) 488 if (!ireg)
489 panic("xics_init_IRQ: can't find interrupt reg property"); 489 panic("xics_init_IRQ: can't find interrupt reg property");
490 490
491 while (ilen) { 491 while (ilen) {
492 intnodes[indx].addr = (unsigned long)*ireg++ << 32; 492 intnodes[indx].addr = (unsigned long)*ireg++ << 32;
493 ilen -= sizeof(uint); 493 ilen -= sizeof(uint);
@@ -555,7 +555,7 @@ nextnode:
555 continue; 555 continue;
556 556
557 hard_id = get_hard_smp_processor_id(i); 557 hard_id = get_hard_smp_processor_id(i);
558 xics_per_cpu[i] = ioremap(intnodes[hard_id].addr, 558 xics_per_cpu[i] = ioremap(intnodes[hard_id].addr,
559 intnodes[hard_id].size); 559 intnodes[hard_id].size);
560 } 560 }
561#else 561#else
@@ -589,7 +589,7 @@ static int __init xics_setup_i8259(void)
589 no_action, 0, "8259 cascade", NULL)) 589 no_action, 0, "8259 cascade", NULL))
590 printk(KERN_ERR "xics_setup_i8259: couldn't get 8259 " 590 printk(KERN_ERR "xics_setup_i8259: couldn't get 8259 "
591 "cascade\n"); 591 "cascade\n");
592 i8259_init(0); 592 i8259_init(0, 0);
593 } 593 }
594 return 0; 594 return 0;
595} 595}
diff --git a/arch/powerpc/platforms/pseries/xics.h b/arch/powerpc/platforms/pseries/xics.h
new file mode 100644
index 000000000000..e14c70868f1d
--- /dev/null
+++ b/arch/powerpc/platforms/pseries/xics.h
@@ -0,0 +1,34 @@
1/*
2 * arch/powerpc/platforms/pseries/xics.h
3 *
4 * Copyright 2000 IBM Corporation.
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11
12#ifndef _POWERPC_KERNEL_XICS_H
13#define _POWERPC_KERNEL_XICS_H
14
15#include <linux/cache.h>
16
17void xics_init_IRQ(void);
18int xics_get_irq(struct pt_regs *);
19void xics_setup_cpu(void);
20void xics_teardown_cpu(int secondary);
21void xics_cause_IPI(int cpu);
22void xics_request_IPIs(void);
23void xics_migrate_irqs_away(void);
24
25/* first argument is ignored for now*/
26void pSeriesLP_cppr_info(int n_cpu, u8 value);
27
28struct xics_ipi_struct {
29 volatile unsigned long value;
30} ____cacheline_aligned;
31
32extern struct xics_ipi_struct xics_ipi_message[NR_CPUS] __cacheline_aligned;
33
34#endif /* _POWERPC_KERNEL_XICS_H */
diff --git a/arch/powerpc/sysdev/Makefile b/arch/powerpc/sysdev/Makefile
new file mode 100644
index 000000000000..8acd21dee05d
--- /dev/null
+++ b/arch/powerpc/sysdev/Makefile
@@ -0,0 +1,7 @@
1obj-$(CONFIG_MPIC) += mpic.o
2obj-$(CONFIG_PPC_INDIRECT_PCI) += indirect_pci.o
3obj-$(CONFIG_PPC_I8259) += i8259.o
4obj-$(CONFIG_PPC_MPC106) += grackle.o
5obj-$(CONFIG_BOOKE) += dcr.o
6obj-$(CONFIG_40x) += dcr.o
7obj-$(CONFIG_U3_DART) += u3_iommu.o
diff --git a/arch/ppc/syslib/dcr.S b/arch/powerpc/sysdev/dcr.S
index 895f10243a43..895f10243a43 100644
--- a/arch/ppc/syslib/dcr.S
+++ b/arch/powerpc/sysdev/dcr.S
diff --git a/arch/powerpc/sysdev/grackle.c b/arch/powerpc/sysdev/grackle.c
new file mode 100644
index 000000000000..b6ec793a23be
--- /dev/null
+++ b/arch/powerpc/sysdev/grackle.c
@@ -0,0 +1,64 @@
1/*
2 * Functions for setting up and using a MPC106 northbridge
3 * Extracted from arch/powerpc/platforms/powermac/pci.c.
4 *
5 * Copyright (C) 2003 Benjamin Herrenschmuidt (benh@kernel.crashing.org)
6 * Copyright (C) 1997 Paul Mackerras (paulus@samba.org)
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
12 */
13#include <linux/kernel.h>
14#include <linux/pci.h>
15#include <linux/init.h>
16
17#include <asm/io.h>
18#include <asm/prom.h>
19#include <asm/pci-bridge.h>
20#include <asm/grackle.h>
21
22#define GRACKLE_CFA(b, d, o) (0x80 | ((b) << 8) | ((d) << 16) \
23 | (((o) & ~3) << 24))
24
25#define GRACKLE_PICR1_STG 0x00000040
26#define GRACKLE_PICR1_LOOPSNOOP 0x00000010
27
28/* N.B. this is called before bridges is initialized, so we can't
29 use grackle_pcibios_{read,write}_config_dword. */
30static inline void grackle_set_stg(struct pci_controller* bp, int enable)
31{
32 unsigned int val;
33
34 out_be32(bp->cfg_addr, GRACKLE_CFA(0, 0, 0xa8));
35 val = in_le32(bp->cfg_data);
36 val = enable? (val | GRACKLE_PICR1_STG) :
37 (val & ~GRACKLE_PICR1_STG);
38 out_be32(bp->cfg_addr, GRACKLE_CFA(0, 0, 0xa8));
39 out_le32(bp->cfg_data, val);
40 (void)in_le32(bp->cfg_data);
41}
42
43static inline void grackle_set_loop_snoop(struct pci_controller *bp, int enable)
44{
45 unsigned int val;
46
47 out_be32(bp->cfg_addr, GRACKLE_CFA(0, 0, 0xa8));
48 val = in_le32(bp->cfg_data);
49 val = enable? (val | GRACKLE_PICR1_LOOPSNOOP) :
50 (val & ~GRACKLE_PICR1_LOOPSNOOP);
51 out_be32(bp->cfg_addr, GRACKLE_CFA(0, 0, 0xa8));
52 out_le32(bp->cfg_data, val);
53 (void)in_le32(bp->cfg_data);
54}
55
56void __init setup_grackle(struct pci_controller *hose)
57{
58 setup_indirect_pci(hose, 0xfec00000, 0xfee00000);
59 if (machine_is_compatible("AAPL,PowerBook1998"))
60 grackle_set_loop_snoop(hose, 1);
61#if 0 /* Disabled for now, HW problems ??? */
62 grackle_set_stg(hose, 1);
63#endif
64}
diff --git a/arch/ppc/syslib/i8259.c b/arch/powerpc/sysdev/i8259.c
index 5c7908c20e43..90bce6e0c191 100644
--- a/arch/ppc/syslib/i8259.c
+++ b/arch/powerpc/sysdev/i8259.c
@@ -1,18 +1,26 @@
1/*
2 * i8259 interrupt controller driver.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 */
1#include <linux/init.h> 9#include <linux/init.h>
2#include <linux/ioport.h> 10#include <linux/ioport.h>
3#include <linux/interrupt.h> 11#include <linux/interrupt.h>
4#include <asm/io.h> 12#include <asm/io.h>
5#include <asm/i8259.h> 13#include <asm/i8259.h>
6 14
7static volatile unsigned char *pci_intack; /* RO, gives us the irq vector */ 15static volatile void __iomem *pci_intack; /* RO, gives us the irq vector */
8 16
9unsigned char cached_8259[2] = { 0xff, 0xff }; 17static unsigned char cached_8259[2] = { 0xff, 0xff };
10#define cached_A1 (cached_8259[0]) 18#define cached_A1 (cached_8259[0])
11#define cached_21 (cached_8259[1]) 19#define cached_21 (cached_8259[1])
12 20
13static DEFINE_SPINLOCK(i8259_lock); 21static DEFINE_SPINLOCK(i8259_lock);
14 22
15int i8259_pic_irq_offset; 23static int i8259_pic_irq_offset;
16 24
17/* 25/*
18 * Acknowledge the IRQ using either the PCI host bridge's interrupt 26 * Acknowledge the IRQ using either the PCI host bridge's interrupt
@@ -20,8 +28,7 @@ int i8259_pic_irq_offset;
20 * which is called. It should be noted that polling is broken on some 28 * which is called. It should be noted that polling is broken on some
21 * IBM and Motorola PReP boxes so we must use the int-ack feature on them. 29 * IBM and Motorola PReP boxes so we must use the int-ack feature on them.
22 */ 30 */
23int 31int i8259_irq(struct pt_regs *regs)
24i8259_irq(struct pt_regs *regs)
25{ 32{
26 int irq; 33 int irq;
27 34
@@ -29,7 +36,7 @@ i8259_irq(struct pt_regs *regs)
29 36
30 /* Either int-ack or poll for the IRQ */ 37 /* Either int-ack or poll for the IRQ */
31 if (pci_intack) 38 if (pci_intack)
32 irq = *pci_intack; 39 irq = readb(pci_intack);
33 else { 40 else {
34 /* Perform an interrupt acknowledge cycle on controller 1. */ 41 /* Perform an interrupt acknowledge cycle on controller 1. */
35 outb(0x0C, 0x20); /* prepare for poll */ 42 outb(0x0C, 0x20); /* prepare for poll */
@@ -59,7 +66,12 @@ i8259_irq(struct pt_regs *regs)
59 } 66 }
60 67
61 spin_unlock(&i8259_lock); 68 spin_unlock(&i8259_lock);
62 return irq; 69 return irq + i8259_pic_irq_offset;
70}
71
72int i8259_irq_cascade(struct pt_regs *regs, void *unused)
73{
74 return i8259_irq(regs);
63} 75}
64 76
65static void i8259_mask_and_ack_irq(unsigned int irq_nr) 77static void i8259_mask_and_ack_irq(unsigned int irq_nr)
@@ -67,20 +79,18 @@ static void i8259_mask_and_ack_irq(unsigned int irq_nr)
67 unsigned long flags; 79 unsigned long flags;
68 80
69 spin_lock_irqsave(&i8259_lock, flags); 81 spin_lock_irqsave(&i8259_lock, flags);
70 if ( irq_nr >= i8259_pic_irq_offset ) 82 irq_nr -= i8259_pic_irq_offset;
71 irq_nr -= i8259_pic_irq_offset;
72
73 if (irq_nr > 7) { 83 if (irq_nr > 7) {
74 cached_A1 |= 1 << (irq_nr-8); 84 cached_A1 |= 1 << (irq_nr-8);
75 inb(0xA1); /* DUMMY */ 85 inb(0xA1); /* DUMMY */
76 outb(cached_A1,0xA1); 86 outb(cached_A1, 0xA1);
77 outb(0x20,0xA0); /* Non-specific EOI */ 87 outb(0x20, 0xA0); /* Non-specific EOI */
78 outb(0x20,0x20); /* Non-specific EOI to cascade */ 88 outb(0x20, 0x20); /* Non-specific EOI to cascade */
79 } else { 89 } else {
80 cached_21 |= 1 << irq_nr; 90 cached_21 |= 1 << irq_nr;
81 inb(0x21); /* DUMMY */ 91 inb(0x21); /* DUMMY */
82 outb(cached_21,0x21); 92 outb(cached_21, 0x21);
83 outb(0x20,0x20); /* Non-specific EOI */ 93 outb(0x20, 0x20); /* Non-specific EOI */
84 } 94 }
85 spin_unlock_irqrestore(&i8259_lock, flags); 95 spin_unlock_irqrestore(&i8259_lock, flags);
86} 96}
@@ -96,9 +106,8 @@ static void i8259_mask_irq(unsigned int irq_nr)
96 unsigned long flags; 106 unsigned long flags;
97 107
98 spin_lock_irqsave(&i8259_lock, flags); 108 spin_lock_irqsave(&i8259_lock, flags);
99 if ( irq_nr >= i8259_pic_irq_offset ) 109 irq_nr -= i8259_pic_irq_offset;
100 irq_nr -= i8259_pic_irq_offset; 110 if (irq_nr < 8)
101 if ( irq_nr < 8 )
102 cached_21 |= 1 << irq_nr; 111 cached_21 |= 1 << irq_nr;
103 else 112 else
104 cached_A1 |= 1 << (irq_nr-8); 113 cached_A1 |= 1 << (irq_nr-8);
@@ -111,9 +120,8 @@ static void i8259_unmask_irq(unsigned int irq_nr)
111 unsigned long flags; 120 unsigned long flags;
112 121
113 spin_lock_irqsave(&i8259_lock, flags); 122 spin_lock_irqsave(&i8259_lock, flags);
114 if ( irq_nr >= i8259_pic_irq_offset ) 123 irq_nr -= i8259_pic_irq_offset;
115 irq_nr -= i8259_pic_irq_offset; 124 if (irq_nr < 8)
116 if ( irq_nr < 8 )
117 cached_21 &= ~(1 << irq_nr); 125 cached_21 &= ~(1 << irq_nr);
118 else 126 else
119 cached_A1 &= ~(1 << (irq_nr-8)); 127 cached_A1 &= ~(1 << (irq_nr-8));
@@ -169,12 +177,14 @@ static struct irqaction i8259_irqaction = {
169 * intack_addr - PCI interrupt acknowledge (real) address which will return 177 * intack_addr - PCI interrupt acknowledge (real) address which will return
170 * the active irq from the 8259 178 * the active irq from the 8259
171 */ 179 */
172void __init 180void __init i8259_init(unsigned long intack_addr, int offset)
173i8259_init(long intack_addr)
174{ 181{
175 unsigned long flags; 182 unsigned long flags;
183 int i;
176 184
177 spin_lock_irqsave(&i8259_lock, flags); 185 spin_lock_irqsave(&i8259_lock, flags);
186 i8259_pic_irq_offset = offset;
187
178 /* init master interrupt controller */ 188 /* init master interrupt controller */
179 outb(0x11, 0x20); /* Start init sequence */ 189 outb(0x11, 0x20); /* Start init sequence */
180 outb(0x00, 0x21); /* Vector base */ 190 outb(0x00, 0x21); /* Vector base */
@@ -198,11 +208,14 @@ i8259_init(long intack_addr)
198 spin_unlock_irqrestore(&i8259_lock, flags); 208 spin_unlock_irqrestore(&i8259_lock, flags);
199 209
200 /* reserve our resources */ 210 /* reserve our resources */
201 setup_irq( i8259_pic_irq_offset + 2, &i8259_irqaction); 211 setup_irq(offset + 2, &i8259_irqaction);
202 request_resource(&ioport_resource, &pic1_iores); 212 request_resource(&ioport_resource, &pic1_iores);
203 request_resource(&ioport_resource, &pic2_iores); 213 request_resource(&ioport_resource, &pic2_iores);
204 request_resource(&ioport_resource, &pic_edgectrl_iores); 214 request_resource(&ioport_resource, &pic_edgectrl_iores);
205 215
206 if (intack_addr != 0) 216 if (intack_addr != 0)
207 pci_intack = ioremap(intack_addr, 1); 217 pci_intack = ioremap(intack_addr, 1);
218
219 for (i = 0; i < NUM_ISA_INTERRUPTS; ++i)
220 irq_desc[offset + i].handler = &i8259_pic;
208} 221}
diff --git a/arch/ppc/syslib/indirect_pci.c b/arch/powerpc/sysdev/indirect_pci.c
index e71488469704..e71488469704 100644
--- a/arch/ppc/syslib/indirect_pci.c
+++ b/arch/powerpc/sysdev/indirect_pci.c
diff --git a/arch/ppc64/kernel/mpic.c b/arch/powerpc/sysdev/mpic.c
index 5f5bc73754d9..105f05341a41 100644
--- a/arch/ppc64/kernel/mpic.c
+++ b/arch/powerpc/sysdev/mpic.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * arch/ppc64/kernel/mpic.c 2 * arch/powerpc/kernel/mpic.c
3 * 3 *
4 * Driver for interrupt controllers following the OpenPIC standard, the 4 * Driver for interrupt controllers following the OpenPIC standard, the
5 * common implementation beeing IBM's MPIC. This driver also can deal 5 * common implementation beeing IBM's MPIC. This driver also can deal
@@ -31,8 +31,8 @@
31#include <asm/pgtable.h> 31#include <asm/pgtable.h>
32#include <asm/irq.h> 32#include <asm/irq.h>
33#include <asm/machdep.h> 33#include <asm/machdep.h>
34 34#include <asm/mpic.h>
35#include "mpic.h" 35#include <asm/smp.h>
36 36
37#ifdef DEBUG 37#ifdef DEBUG
38#define DBG(fmt...) printk(fmt) 38#define DBG(fmt...) printk(fmt)
@@ -44,6 +44,9 @@ static struct mpic *mpics;
44static struct mpic *mpic_primary; 44static struct mpic *mpic_primary;
45static DEFINE_SPINLOCK(mpic_lock); 45static DEFINE_SPINLOCK(mpic_lock);
46 46
47#ifdef CONFIG_PPC32 /* XXX for now */
48#define distribute_irqs CONFIG_IRQ_ALL_CPUS
49#endif
47 50
48/* 51/*
49 * Register accessor functions 52 * Register accessor functions
@@ -355,7 +358,7 @@ static void mpic_enable_irq(unsigned int irq)
355 struct mpic *mpic = mpic_from_irq(irq); 358 struct mpic *mpic = mpic_from_irq(irq);
356 unsigned int src = irq - mpic->irq_offset; 359 unsigned int src = irq - mpic->irq_offset;
357 360
358 DBG("%s: enable_irq: %d (src %d)\n", mpic->name, irq, src); 361 DBG("%p: %s: enable_irq: %d (src %d)\n", mpic, mpic->name, irq, src);
359 362
360 mpic_irq_write(src, MPIC_IRQ_VECTOR_PRI, 363 mpic_irq_write(src, MPIC_IRQ_VECTOR_PRI,
361 mpic_irq_read(src, MPIC_IRQ_VECTOR_PRI) & ~MPIC_VECPRI_MASK); 364 mpic_irq_read(src, MPIC_IRQ_VECTOR_PRI) & ~MPIC_VECPRI_MASK);
@@ -480,6 +483,7 @@ struct mpic * __init mpic_alloc(unsigned long phys_addr,
480 if (mpic == NULL) 483 if (mpic == NULL)
481 return NULL; 484 return NULL;
482 485
486
483 memset(mpic, 0, sizeof(struct mpic)); 487 memset(mpic, 0, sizeof(struct mpic));
484 mpic->name = name; 488 mpic->name = name;
485 489
@@ -506,7 +510,7 @@ struct mpic * __init mpic_alloc(unsigned long phys_addr,
506 mpic->senses_count = senses_count; 510 mpic->senses_count = senses_count;
507 511
508 /* Map the global registers */ 512 /* Map the global registers */
509 mpic->gregs = ioremap(phys_addr + MPIC_GREG_BASE, 0x2000); 513 mpic->gregs = ioremap(phys_addr + MPIC_GREG_BASE, 0x1000);
510 mpic->tmregs = mpic->gregs + ((MPIC_TIMER_BASE - MPIC_GREG_BASE) >> 2); 514 mpic->tmregs = mpic->gregs + ((MPIC_TIMER_BASE - MPIC_GREG_BASE) >> 2);
511 BUG_ON(mpic->gregs == NULL); 515 BUG_ON(mpic->gregs == NULL);
512 516
@@ -644,7 +648,6 @@ void __init mpic_init(struct mpic *mpic)
644 continue; 648 continue;
645 irq_desc[mpic->ipi_offset+i].status |= IRQ_PER_CPU; 649 irq_desc[mpic->ipi_offset+i].status |= IRQ_PER_CPU;
646 irq_desc[mpic->ipi_offset+i].handler = &mpic->hc_ipi; 650 irq_desc[mpic->ipi_offset+i].handler = &mpic->hc_ipi;
647
648#endif /* CONFIG_SMP */ 651#endif /* CONFIG_SMP */
649 } 652 }
650 653
@@ -700,7 +703,7 @@ void __init mpic_init(struct mpic *mpic)
700 /* init hw */ 703 /* init hw */
701 mpic_irq_write(i, MPIC_IRQ_VECTOR_PRI, vecpri); 704 mpic_irq_write(i, MPIC_IRQ_VECTOR_PRI, vecpri);
702 mpic_irq_write(i, MPIC_IRQ_DESTINATION, 705 mpic_irq_write(i, MPIC_IRQ_DESTINATION,
703 1 << get_hard_smp_processor_id(boot_cpuid)); 706 1 << hard_smp_processor_id());
704 707
705 /* init linux descriptors */ 708 /* init linux descriptors */
706 if (i < mpic->irq_count) { 709 if (i < mpic->irq_count) {
@@ -792,6 +795,21 @@ void mpic_setup_this_cpu(void)
792#endif /* CONFIG_SMP */ 795#endif /* CONFIG_SMP */
793} 796}
794 797
798int mpic_cpu_get_priority(void)
799{
800 struct mpic *mpic = mpic_primary;
801
802 return mpic_cpu_read(MPIC_CPU_CURRENT_TASK_PRI);
803}
804
805void mpic_cpu_set_priority(int prio)
806{
807 struct mpic *mpic = mpic_primary;
808
809 prio &= MPIC_CPU_TASKPRI_MASK;
810 mpic_cpu_write(MPIC_CPU_CURRENT_TASK_PRI, prio);
811}
812
795/* 813/*
796 * XXX: someone who knows mpic should check this. 814 * XXX: someone who knows mpic should check this.
797 * do we need to eoi the ipi including for kexec cpu here (see xics comments)? 815 * do we need to eoi the ipi including for kexec cpu here (see xics comments)?
@@ -885,4 +903,25 @@ void mpic_request_ipis(void)
885 903
886 printk("IPIs requested... \n"); 904 printk("IPIs requested... \n");
887} 905}
906
907void smp_mpic_message_pass(int target, int msg)
908{
909 /* make sure we're sending something that translates to an IPI */
910 if ((unsigned int)msg > 3) {
911 printk("SMP %d: smp_message_pass: unknown msg %d\n",
912 smp_processor_id(), msg);
913 return;
914 }
915 switch (target) {
916 case MSG_ALL:
917 mpic_send_ipi(msg, 0xffffffff);
918 break;
919 case MSG_ALL_BUT_SELF:
920 mpic_send_ipi(msg, 0xffffffff & ~(1 << smp_processor_id()));
921 break;
922 default:
923 mpic_send_ipi(msg, 1 << target);
924 break;
925 }
926}
888#endif /* CONFIG_SMP */ 927#endif /* CONFIG_SMP */
diff --git a/arch/ppc64/kernel/u3_iommu.c b/arch/powerpc/sysdev/u3_iommu.c
index 41ea09cb9ac7..fba871a1bda5 100644
--- a/arch/ppc64/kernel/u3_iommu.c
+++ b/arch/powerpc/sysdev/u3_iommu.c
@@ -44,39 +44,11 @@
44#include <asm/abs_addr.h> 44#include <asm/abs_addr.h>
45#include <asm/cacheflush.h> 45#include <asm/cacheflush.h>
46#include <asm/lmb.h> 46#include <asm/lmb.h>
47 47#include <asm/dart.h>
48#include "pci.h" 48#include <asm/ppc-pci.h>
49 49
50extern int iommu_force_on; 50extern int iommu_force_on;
51 51
52/* physical base of DART registers */
53#define DART_BASE 0xf8033000UL
54
55/* Offset from base to control register */
56#define DARTCNTL 0
57/* Offset from base to exception register */
58#define DARTEXCP 0x10
59/* Offset from base to TLB tag registers */
60#define DARTTAG 0x1000
61
62
63/* Control Register fields */
64
65/* base address of table (pfn) */
66#define DARTCNTL_BASE_MASK 0xfffff
67#define DARTCNTL_BASE_SHIFT 12
68
69#define DARTCNTL_FLUSHTLB 0x400
70#define DARTCNTL_ENABLE 0x200
71
72/* size of table in pages */
73#define DARTCNTL_SIZE_MASK 0x1ff
74#define DARTCNTL_SIZE_SHIFT 0
75
76/* DART table fields */
77#define DARTMAP_VALID 0x80000000
78#define DARTMAP_RPNMASK 0x00ffffff
79
80/* Physical base address and size of the DART table */ 52/* Physical base address and size of the DART table */
81unsigned long dart_tablebase; /* exported to htab_initialize */ 53unsigned long dart_tablebase; /* exported to htab_initialize */
82static unsigned long dart_tablesize; 54static unsigned long dart_tablesize;
@@ -152,18 +124,21 @@ static void dart_build(struct iommu_table *tbl, long index,
152 124
153 DBG("dart: build at: %lx, %lx, addr: %x\n", index, npages, uaddr); 125 DBG("dart: build at: %lx, %lx, addr: %x\n", index, npages, uaddr);
154 126
127 index <<= DART_PAGE_FACTOR;
128 npages <<= DART_PAGE_FACTOR;
129
155 dp = ((unsigned int*)tbl->it_base) + index; 130 dp = ((unsigned int*)tbl->it_base) + index;
156 131
157 /* On U3, all memory is contigous, so we can move this 132 /* On U3, all memory is contigous, so we can move this
158 * out of the loop. 133 * out of the loop.
159 */ 134 */
160 while (npages--) { 135 while (npages--) {
161 rpn = virt_to_abs(uaddr) >> PAGE_SHIFT; 136 rpn = virt_to_abs(uaddr) >> DART_PAGE_SHIFT;
162 137
163 *(dp++) = DARTMAP_VALID | (rpn & DARTMAP_RPNMASK); 138 *(dp++) = DARTMAP_VALID | (rpn & DARTMAP_RPNMASK);
164 139
165 rpn++; 140 rpn++;
166 uaddr += PAGE_SIZE; 141 uaddr += DART_PAGE_SIZE;
167 } 142 }
168 143
169 dart_dirty = 1; 144 dart_dirty = 1;
@@ -181,6 +156,9 @@ static void dart_free(struct iommu_table *tbl, long index, long npages)
181 156
182 DBG("dart: free at: %lx, %lx\n", index, npages); 157 DBG("dart: free at: %lx, %lx\n", index, npages);
183 158
159 index <<= DART_PAGE_FACTOR;
160 npages <<= DART_PAGE_FACTOR;
161
184 dp = ((unsigned int *)tbl->it_base) + index; 162 dp = ((unsigned int *)tbl->it_base) + index;
185 163
186 while (npages--) 164 while (npages--)
@@ -209,10 +187,10 @@ static int dart_init(struct device_node *dart_node)
209 * that to work around what looks like a problem with the HT bridge 187 * that to work around what looks like a problem with the HT bridge
210 * prefetching into invalid pages and corrupting data 188 * prefetching into invalid pages and corrupting data
211 */ 189 */
212 tmp = lmb_alloc(PAGE_SIZE, PAGE_SIZE); 190 tmp = lmb_alloc(DART_PAGE_SIZE, DART_PAGE_SIZE);
213 if (!tmp) 191 if (!tmp)
214 panic("U3-DART: Cannot allocate spare page!"); 192 panic("U3-DART: Cannot allocate spare page!");
215 dart_emptyval = DARTMAP_VALID | ((tmp >> PAGE_SHIFT) & DARTMAP_RPNMASK); 193 dart_emptyval = DARTMAP_VALID | ((tmp >> DART_PAGE_SHIFT) & DARTMAP_RPNMASK);
216 194
217 /* Map in DART registers. FIXME: Use device node to get base address */ 195 /* Map in DART registers. FIXME: Use device node to get base address */
218 dart = ioremap(DART_BASE, 0x7000); 196 dart = ioremap(DART_BASE, 0x7000);
@@ -223,8 +201,8 @@ static int dart_init(struct device_node *dart_node)
223 * table size and enable bit 201 * table size and enable bit
224 */ 202 */
225 regword = DARTCNTL_ENABLE | 203 regword = DARTCNTL_ENABLE |
226 ((dart_tablebase >> PAGE_SHIFT) << DARTCNTL_BASE_SHIFT) | 204 ((dart_tablebase >> DART_PAGE_SHIFT) << DARTCNTL_BASE_SHIFT) |
227 (((dart_tablesize >> PAGE_SHIFT) & DARTCNTL_SIZE_MASK) 205 (((dart_tablesize >> DART_PAGE_SHIFT) & DARTCNTL_SIZE_MASK)
228 << DARTCNTL_SIZE_SHIFT); 206 << DARTCNTL_SIZE_SHIFT);
229 dart_vbase = ioremap(virt_to_abs(dart_tablebase), dart_tablesize); 207 dart_vbase = ioremap(virt_to_abs(dart_tablebase), dart_tablesize);
230 208
diff --git a/arch/powerpc/xmon/Makefile b/arch/powerpc/xmon/Makefile
new file mode 100644
index 000000000000..79a784f0e7a9
--- /dev/null
+++ b/arch/powerpc/xmon/Makefile
@@ -0,0 +1,11 @@
1# Makefile for xmon
2
3ifdef CONFIG_PPC64
4EXTRA_CFLAGS += -mno-minimal-toc
5endif
6
7obj-$(CONFIG_8xx) += start_8xx.o
8obj-$(CONFIG_6xx) += start_32.o
9obj-$(CONFIG_4xx) += start_32.o
10obj-$(CONFIG_PPC64) += start_64.o
11obj-y += xmon.o ppc-dis.o ppc-opc.o subr_prf.o setjmp.o
diff --git a/arch/ppc64/xmon/ansidecl.h b/arch/powerpc/xmon/ansidecl.h
index c9b9f0929e9e..c9b9f0929e9e 100644
--- a/arch/ppc64/xmon/ansidecl.h
+++ b/arch/powerpc/xmon/ansidecl.h
diff --git a/arch/ppc64/xmon/nonstdio.h b/arch/powerpc/xmon/nonstdio.h
index 84211a21c6f4..84211a21c6f4 100644
--- a/arch/ppc64/xmon/nonstdio.h
+++ b/arch/powerpc/xmon/nonstdio.h
diff --git a/arch/ppc64/xmon/ppc-dis.c b/arch/powerpc/xmon/ppc-dis.c
index ac0a9d2427e0..ac0a9d2427e0 100644
--- a/arch/ppc64/xmon/ppc-dis.c
+++ b/arch/powerpc/xmon/ppc-dis.c
diff --git a/arch/ppc64/xmon/ppc-opc.c b/arch/powerpc/xmon/ppc-opc.c
index 5ee8fc32f824..5ee8fc32f824 100644
--- a/arch/ppc64/xmon/ppc-opc.c
+++ b/arch/powerpc/xmon/ppc-opc.c
diff --git a/arch/ppc64/xmon/ppc.h b/arch/powerpc/xmon/ppc.h
index 342237e8dd69..342237e8dd69 100644
--- a/arch/ppc64/xmon/ppc.h
+++ b/arch/powerpc/xmon/ppc.h
diff --git a/arch/powerpc/xmon/setjmp.S b/arch/powerpc/xmon/setjmp.S
new file mode 100644
index 000000000000..f8e40dfd2bff
--- /dev/null
+++ b/arch/powerpc/xmon/setjmp.S
@@ -0,0 +1,135 @@
1/*
2 * Copyright (C) 1996 Paul Mackerras.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * NOTE: assert(sizeof(buf) > 23 * sizeof(long))
10 */
11#include <asm/processor.h>
12#include <asm/ppc_asm.h>
13#include <asm/asm-offsets.h>
14
15_GLOBAL(xmon_setjmp)
16 mflr r0
17 STL r0,0(r3)
18 STL r1,SZL(r3)
19 STL r2,2*SZL(r3)
20 mfcr r0
21 STL r0,3*SZL(r3)
22 STL r13,4*SZL(r3)
23 STL r14,5*SZL(r3)
24 STL r15,6*SZL(r3)
25 STL r16,7*SZL(r3)
26 STL r17,8*SZL(r3)
27 STL r18,9*SZL(r3)
28 STL r19,10*SZL(r3)
29 STL r20,11*SZL(r3)
30 STL r21,12*SZL(r3)
31 STL r22,13*SZL(r3)
32 STL r23,14*SZL(r3)
33 STL r24,15*SZL(r3)
34 STL r25,16*SZL(r3)
35 STL r26,17*SZL(r3)
36 STL r27,18*SZL(r3)
37 STL r28,19*SZL(r3)
38 STL r29,20*SZL(r3)
39 STL r30,21*SZL(r3)
40 STL r31,22*SZL(r3)
41 li r3,0
42 blr
43
44_GLOBAL(xmon_longjmp)
45 CMPI r4,0
46 bne 1f
47 li r4,1
481: LDL r13,4*SZL(r3)
49 LDL r14,5*SZL(r3)
50 LDL r15,6*SZL(r3)
51 LDL r16,7*SZL(r3)
52 LDL r17,8*SZL(r3)
53 LDL r18,9*SZL(r3)
54 LDL r19,10*SZL(r3)
55 LDL r20,11*SZL(r3)
56 LDL r21,12*SZL(r3)
57 LDL r22,13*SZL(r3)
58 LDL r23,14*SZL(r3)
59 LDL r24,15*SZL(r3)
60 LDL r25,16*SZL(r3)
61 LDL r26,17*SZL(r3)
62 LDL r27,18*SZL(r3)
63 LDL r28,19*SZL(r3)
64 LDL r29,20*SZL(r3)
65 LDL r30,21*SZL(r3)
66 LDL r31,22*SZL(r3)
67 LDL r0,3*SZL(r3)
68 mtcrf 0x38,r0
69 LDL r0,0(r3)
70 LDL r1,SZL(r3)
71 LDL r2,2*SZL(r3)
72 mtlr r0
73 mr r3,r4
74 blr
75
76/*
77 * Grab the register values as they are now.
78 * This won't do a particularily good job because we really
79 * want our caller's caller's registers, and our caller has
80 * already executed its prologue.
81 * ToDo: We could reach back into the caller's save area to do
82 * a better job of representing the caller's state (note that
83 * that will be different for 32-bit and 64-bit, because of the
84 * different ABIs, though).
85 */
86_GLOBAL(xmon_save_regs)
87 STL r0,0*SZL(r3)
88 STL r2,2*SZL(r3)
89 STL r3,3*SZL(r3)
90 STL r4,4*SZL(r3)
91 STL r5,5*SZL(r3)
92 STL r6,6*SZL(r3)
93 STL r7,7*SZL(r3)
94 STL r8,8*SZL(r3)
95 STL r9,9*SZL(r3)
96 STL r10,10*SZL(r3)
97 STL r11,11*SZL(r3)
98 STL r12,12*SZL(r3)
99 STL r13,13*SZL(r3)
100 STL r14,14*SZL(r3)
101 STL r15,15*SZL(r3)
102 STL r16,16*SZL(r3)
103 STL r17,17*SZL(r3)
104 STL r18,18*SZL(r3)
105 STL r19,19*SZL(r3)
106 STL r20,20*SZL(r3)
107 STL r21,21*SZL(r3)
108 STL r22,22*SZL(r3)
109 STL r23,23*SZL(r3)
110 STL r24,24*SZL(r3)
111 STL r25,25*SZL(r3)
112 STL r26,26*SZL(r3)
113 STL r27,27*SZL(r3)
114 STL r28,28*SZL(r3)
115 STL r29,29*SZL(r3)
116 STL r30,30*SZL(r3)
117 STL r31,31*SZL(r3)
118 /* go up one stack frame for SP */
119 LDL r4,0(r1)
120 STL r4,1*SZL(r3)
121 /* get caller's LR */
122 LDL r0,LRSAVE(r4)
123 STL r0,_NIP-STACK_FRAME_OVERHEAD(r3)
124 STL r0,_LINK-STACK_FRAME_OVERHEAD(r3)
125 mfmsr r0
126 STL r0,_MSR-STACK_FRAME_OVERHEAD(r3)
127 mfctr r0
128 STL r0,_CTR-STACK_FRAME_OVERHEAD(r3)
129 mfxer r0
130 STL r0,_XER-STACK_FRAME_OVERHEAD(r3)
131 mfcr r0
132 STL r0,_CCR-STACK_FRAME_OVERHEAD(r3)
133 li r0,0
134 STL r0,_TRAP-STACK_FRAME_OVERHEAD(r3)
135 blr
diff --git a/arch/powerpc/xmon/start_32.c b/arch/powerpc/xmon/start_32.c
new file mode 100644
index 000000000000..69b658c0f760
--- /dev/null
+++ b/arch/powerpc/xmon/start_32.c
@@ -0,0 +1,624 @@
1/*
2 * Copyright (C) 1996 Paul Mackerras.
3 */
4#include <linux/config.h>
5#include <linux/string.h>
6#include <asm/machdep.h>
7#include <asm/io.h>
8#include <asm/page.h>
9#include <linux/adb.h>
10#include <linux/pmu.h>
11#include <linux/cuda.h>
12#include <linux/kernel.h>
13#include <linux/errno.h>
14#include <linux/sysrq.h>
15#include <linux/bitops.h>
16#include <asm/xmon.h>
17#include <asm/prom.h>
18#include <asm/bootx.h>
19#include <asm/machdep.h>
20#include <asm/errno.h>
21#include <asm/pmac_feature.h>
22#include <asm/processor.h>
23#include <asm/delay.h>
24#include <asm/btext.h>
25
26static volatile unsigned char __iomem *sccc, *sccd;
27unsigned int TXRDY, RXRDY, DLAB;
28static int xmon_expect(const char *str, unsigned int timeout);
29
30static int use_serial;
31static int use_screen;
32static int via_modem;
33static int xmon_use_sccb;
34static struct device_node *channel_node;
35
36#define TB_SPEED 25000000
37
38static inline unsigned int readtb(void)
39{
40 unsigned int ret;
41
42 asm volatile("mftb %0" : "=r" (ret) :);
43 return ret;
44}
45
46void buf_access(void)
47{
48 if (DLAB)
49 sccd[3] &= ~DLAB; /* reset DLAB */
50}
51
52extern int adb_init(void);
53
54#ifdef CONFIG_PPC_CHRP
55/*
56 * This looks in the "ranges" property for the primary PCI host bridge
57 * to find the physical address of the start of PCI/ISA I/O space.
58 * It is basically a cut-down version of pci_process_bridge_OF_ranges.
59 */
60static unsigned long chrp_find_phys_io_base(void)
61{
62 struct device_node *node;
63 unsigned int *ranges;
64 unsigned long base = CHRP_ISA_IO_BASE;
65 int rlen = 0;
66 int np;
67
68 node = find_devices("isa");
69 if (node != NULL) {
70 node = node->parent;
71 if (node == NULL || node->type == NULL
72 || strcmp(node->type, "pci") != 0)
73 node = NULL;
74 }
75 if (node == NULL)
76 node = find_devices("pci");
77 if (node == NULL)
78 return base;
79
80 ranges = (unsigned int *) get_property(node, "ranges", &rlen);
81 np = prom_n_addr_cells(node) + 5;
82 while ((rlen -= np * sizeof(unsigned int)) >= 0) {
83 if ((ranges[0] >> 24) == 1 && ranges[2] == 0) {
84 /* I/O space starting at 0, grab the phys base */
85 base = ranges[np - 3];
86 break;
87 }
88 ranges += np;
89 }
90 return base;
91}
92#endif /* CONFIG_PPC_CHRP */
93
94#ifdef CONFIG_MAGIC_SYSRQ
95static void sysrq_handle_xmon(int key, struct pt_regs *regs,
96 struct tty_struct *tty)
97{
98 xmon(regs);
99}
100
101static struct sysrq_key_op sysrq_xmon_op =
102{
103 .handler = sysrq_handle_xmon,
104 .help_msg = "Xmon",
105 .action_msg = "Entering xmon",
106};
107#endif
108
109void
110xmon_map_scc(void)
111{
112#ifdef CONFIG_PPC_MULTIPLATFORM
113 volatile unsigned char __iomem *base;
114
115 if (_machine == _MACH_Pmac) {
116 struct device_node *np;
117 unsigned long addr;
118#ifdef CONFIG_BOOTX_TEXT
119 if (!use_screen && !use_serial
120 && !machine_is_compatible("iMac")) {
121 /* see if there is a keyboard in the device tree
122 with a parent of type "adb" */
123 for (np = find_devices("keyboard"); np; np = np->next)
124 if (np->parent && np->parent->type
125 && strcmp(np->parent->type, "adb") == 0)
126 break;
127
128 /* needs to be hacked if xmon_printk is to be used
129 from within find_via_pmu() */
130#ifdef CONFIG_ADB_PMU
131 if (np != NULL && boot_text_mapped && find_via_pmu())
132 use_screen = 1;
133#endif
134#ifdef CONFIG_ADB_CUDA
135 if (np != NULL && boot_text_mapped && find_via_cuda())
136 use_screen = 1;
137#endif
138 }
139 if (!use_screen && (np = find_devices("escc")) != NULL) {
140 /*
141 * look for the device node for the serial port
142 * we're using and see if it says it has a modem
143 */
144 char *name = xmon_use_sccb? "ch-b": "ch-a";
145 char *slots;
146 int l;
147
148 np = np->child;
149 while (np != NULL && strcmp(np->name, name) != 0)
150 np = np->sibling;
151 if (np != NULL) {
152 /* XXX should parse this properly */
153 channel_node = np;
154 slots = get_property(np, "slot-names", &l);
155 if (slots != NULL && l >= 10
156 && strcmp(slots+4, "Modem") == 0)
157 via_modem = 1;
158 }
159 }
160 btext_drawstring("xmon uses ");
161 if (use_screen)
162 btext_drawstring("screen and keyboard\n");
163 else {
164 if (via_modem)
165 btext_drawstring("modem on ");
166 btext_drawstring(xmon_use_sccb? "printer": "modem");
167 btext_drawstring(" port\n");
168 }
169
170#endif /* CONFIG_BOOTX_TEXT */
171
172#ifdef CHRP_ESCC
173 addr = 0xc1013020;
174#else
175 addr = 0xf3013020;
176#endif
177 TXRDY = 4;
178 RXRDY = 1;
179
180 np = find_devices("mac-io");
181 if (np && np->n_addrs)
182 addr = np->addrs[0].address + 0x13020;
183 base = (volatile unsigned char *) ioremap(addr & PAGE_MASK, PAGE_SIZE);
184 sccc = base + (addr & ~PAGE_MASK);
185 sccd = sccc + 0x10;
186
187 } else {
188 base = (volatile unsigned char *) isa_io_base;
189
190#ifdef CONFIG_PPC_CHRP
191 if (_machine == _MACH_chrp)
192 base = (volatile unsigned char __iomem *)
193 ioremap(chrp_find_phys_io_base(), 0x1000);
194#endif
195
196 sccc = base + 0x3fd;
197 sccd = base + 0x3f8;
198 if (xmon_use_sccb) {
199 sccc -= 0x100;
200 sccd -= 0x100;
201 }
202 TXRDY = 0x20;
203 RXRDY = 1;
204 DLAB = 0x80;
205 }
206#elif defined(CONFIG_GEMINI)
207 /* should already be mapped by the kernel boot */
208 sccc = (volatile unsigned char __iomem *) 0xffeffb0d;
209 sccd = (volatile unsigned char __iomem *) 0xffeffb08;
210 TXRDY = 0x20;
211 RXRDY = 1;
212 DLAB = 0x80;
213#elif defined(CONFIG_405GP)
214 sccc = (volatile unsigned char __iomem *)0xef600305;
215 sccd = (volatile unsigned char __iomem *)0xef600300;
216 TXRDY = 0x20;
217 RXRDY = 1;
218 DLAB = 0x80;
219#endif /* platform */
220
221 register_sysrq_key('x', &sysrq_xmon_op);
222}
223
224static int scc_initialized = 0;
225
226void xmon_init_scc(void);
227extern void cuda_poll(void);
228
229static inline void do_poll_adb(void)
230{
231#ifdef CONFIG_ADB_PMU
232 if (sys_ctrler == SYS_CTRLER_PMU)
233 pmu_poll_adb();
234#endif /* CONFIG_ADB_PMU */
235#ifdef CONFIG_ADB_CUDA
236 if (sys_ctrler == SYS_CTRLER_CUDA)
237 cuda_poll();
238#endif /* CONFIG_ADB_CUDA */
239}
240
241int
242xmon_write(void *handle, void *ptr, int nb)
243{
244 char *p = ptr;
245 int i, c, ct;
246
247#ifdef CONFIG_SMP
248 static unsigned long xmon_write_lock;
249 int lock_wait = 1000000;
250 int locked;
251
252 while ((locked = test_and_set_bit(0, &xmon_write_lock)) != 0)
253 if (--lock_wait == 0)
254 break;
255#endif
256
257#ifdef CONFIG_BOOTX_TEXT
258 if (use_screen) {
259 /* write it on the screen */
260 for (i = 0; i < nb; ++i)
261 btext_drawchar(*p++);
262 goto out;
263 }
264#endif
265 if (!scc_initialized)
266 xmon_init_scc();
267 ct = 0;
268 for (i = 0; i < nb; ++i) {
269 while ((*sccc & TXRDY) == 0)
270 do_poll_adb();
271 c = p[i];
272 if (c == '\n' && !ct) {
273 c = '\r';
274 ct = 1;
275 --i;
276 } else {
277 ct = 0;
278 }
279 buf_access();
280 *sccd = c;
281 eieio();
282 }
283
284 out:
285#ifdef CONFIG_SMP
286 if (!locked)
287 clear_bit(0, &xmon_write_lock);
288#endif
289 return nb;
290}
291
292int xmon_wants_key;
293int xmon_adb_keycode;
294
295#ifdef CONFIG_BOOTX_TEXT
296static int xmon_adb_shiftstate;
297
298static unsigned char xmon_keytab[128] =
299 "asdfhgzxcv\000bqwer" /* 0x00 - 0x0f */
300 "yt123465=97-80]o" /* 0x10 - 0x1f */
301 "u[ip\rlj'k;\\,/nm." /* 0x20 - 0x2f */
302 "\t `\177\0\033\0\0\0\0\0\0\0\0\0\0" /* 0x30 - 0x3f */
303 "\0.\0*\0+\0\0\0\0\0/\r\0-\0" /* 0x40 - 0x4f */
304 "\0\0000123456789\0\0\0"; /* 0x50 - 0x5f */
305
306static unsigned char xmon_shift_keytab[128] =
307 "ASDFHGZXCV\000BQWER" /* 0x00 - 0x0f */
308 "YT!@#$^%+(&_*)}O" /* 0x10 - 0x1f */
309 "U{IP\rLJ\"K:|<?NM>" /* 0x20 - 0x2f */
310 "\t ~\177\0\033\0\0\0\0\0\0\0\0\0\0" /* 0x30 - 0x3f */
311 "\0.\0*\0+\0\0\0\0\0/\r\0-\0" /* 0x40 - 0x4f */
312 "\0\0000123456789\0\0\0"; /* 0x50 - 0x5f */
313
314static int
315xmon_get_adb_key(void)
316{
317 int k, t, on;
318
319 xmon_wants_key = 1;
320 for (;;) {
321 xmon_adb_keycode = -1;
322 t = 0;
323 on = 0;
324 do {
325 if (--t < 0) {
326 on = 1 - on;
327 btext_drawchar(on? 0xdb: 0x20);
328 btext_drawchar('\b');
329 t = 200000;
330 }
331 do_poll_adb();
332 } while (xmon_adb_keycode == -1);
333 k = xmon_adb_keycode;
334 if (on)
335 btext_drawstring(" \b");
336
337 /* test for shift keys */
338 if ((k & 0x7f) == 0x38 || (k & 0x7f) == 0x7b) {
339 xmon_adb_shiftstate = (k & 0x80) == 0;
340 continue;
341 }
342 if (k >= 0x80)
343 continue; /* ignore up transitions */
344 k = (xmon_adb_shiftstate? xmon_shift_keytab: xmon_keytab)[k];
345 if (k != 0)
346 break;
347 }
348 xmon_wants_key = 0;
349 return k;
350}
351#endif /* CONFIG_BOOTX_TEXT */
352
353int
354xmon_read(void *handle, void *ptr, int nb)
355{
356 char *p = ptr;
357 int i;
358
359#ifdef CONFIG_BOOTX_TEXT
360 if (use_screen) {
361 for (i = 0; i < nb; ++i)
362 *p++ = xmon_get_adb_key();
363 return i;
364 }
365#endif
366 if (!scc_initialized)
367 xmon_init_scc();
368 for (i = 0; i < nb; ++i) {
369 while ((*sccc & RXRDY) == 0)
370 do_poll_adb();
371 buf_access();
372 *p++ = *sccd;
373 }
374 return i;
375}
376
377int
378xmon_read_poll(void)
379{
380 if ((*sccc & RXRDY) == 0) {
381 do_poll_adb();
382 return -1;
383 }
384 buf_access();
385 return *sccd;
386}
387
388static unsigned char scc_inittab[] = {
389 13, 0, /* set baud rate divisor */
390 12, 1,
391 14, 1, /* baud rate gen enable, src=rtxc */
392 11, 0x50, /* clocks = br gen */
393 5, 0xea, /* tx 8 bits, assert DTR & RTS */
394 4, 0x46, /* x16 clock, 1 stop */
395 3, 0xc1, /* rx enable, 8 bits */
396};
397
398void
399xmon_init_scc(void)
400{
401 if ( _machine == _MACH_chrp )
402 {
403 sccd[3] = 0x83; eieio(); /* LCR = 8N1 + DLAB */
404 sccd[0] = 12; eieio(); /* DLL = 9600 baud */
405 sccd[1] = 0; eieio();
406 sccd[2] = 0; eieio(); /* FCR = 0 */
407 sccd[3] = 3; eieio(); /* LCR = 8N1 */
408 sccd[1] = 0; eieio(); /* IER = 0 */
409 }
410 else if ( _machine == _MACH_Pmac )
411 {
412 int i, x;
413
414 if (channel_node != 0)
415 pmac_call_feature(
416 PMAC_FTR_SCC_ENABLE,
417 channel_node,
418 PMAC_SCC_ASYNC | PMAC_SCC_FLAG_XMON, 1);
419 printk(KERN_INFO "Serial port locked ON by debugger !\n");
420 if (via_modem && channel_node != 0) {
421 unsigned int t0;
422
423 pmac_call_feature(
424 PMAC_FTR_MODEM_ENABLE,
425 channel_node, 0, 1);
426 printk(KERN_INFO "Modem powered up by debugger !\n");
427 t0 = readtb();
428 while (readtb() - t0 < 3*TB_SPEED)
429 eieio();
430 }
431 /* use the B channel if requested */
432 if (xmon_use_sccb) {
433 sccc = (volatile unsigned char *)
434 ((unsigned long)sccc & ~0x20);
435 sccd = sccc + 0x10;
436 }
437 for (i = 20000; i != 0; --i) {
438 x = *sccc; eieio();
439 }
440 *sccc = 9; eieio(); /* reset A or B side */
441 *sccc = ((unsigned long)sccc & 0x20)? 0x80: 0x40; eieio();
442 for (i = 0; i < sizeof(scc_inittab); ++i) {
443 *sccc = scc_inittab[i];
444 eieio();
445 }
446 }
447 scc_initialized = 1;
448 if (via_modem) {
449 for (;;) {
450 xmon_write(NULL, "ATE1V1\r", 7);
451 if (xmon_expect("OK", 5)) {
452 xmon_write(NULL, "ATA\r", 4);
453 if (xmon_expect("CONNECT", 40))
454 break;
455 }
456 xmon_write(NULL, "+++", 3);
457 xmon_expect("OK", 3);
458 }
459 }
460}
461
462void *xmon_stdin;
463void *xmon_stdout;
464void *xmon_stderr;
465
466int xmon_putc(int c, void *f)
467{
468 char ch = c;
469
470 if (c == '\n')
471 xmon_putc('\r', f);
472 return xmon_write(f, &ch, 1) == 1? c: -1;
473}
474
475int xmon_putchar(int c)
476{
477 return xmon_putc(c, xmon_stdout);
478}
479
480int xmon_fputs(char *str, void *f)
481{
482 int n = strlen(str);
483
484 return xmon_write(f, str, n) == n? 0: -1;
485}
486
487int
488xmon_readchar(void)
489{
490 char ch;
491
492 for (;;) {
493 switch (xmon_read(xmon_stdin, &ch, 1)) {
494 case 1:
495 return ch;
496 case -1:
497 xmon_printf("read(stdin) returned -1\r\n", 0, 0);
498 return -1;
499 }
500 }
501}
502
503static char line[256];
504static char *lineptr;
505static int lineleft;
506
507int xmon_expect(const char *str, unsigned int timeout)
508{
509 int c;
510 unsigned int t0;
511
512 timeout *= TB_SPEED;
513 t0 = readtb();
514 do {
515 lineptr = line;
516 for (;;) {
517 c = xmon_read_poll();
518 if (c == -1) {
519 if (readtb() - t0 > timeout)
520 return 0;
521 continue;
522 }
523 if (c == '\n')
524 break;
525 if (c != '\r' && lineptr < &line[sizeof(line) - 1])
526 *lineptr++ = c;
527 }
528 *lineptr = 0;
529 } while (strstr(line, str) == NULL);
530 return 1;
531}
532
533int
534xmon_getchar(void)
535{
536 int c;
537
538 if (lineleft == 0) {
539 lineptr = line;
540 for (;;) {
541 c = xmon_readchar();
542 if (c == -1 || c == 4)
543 break;
544 if (c == '\r' || c == '\n') {
545 *lineptr++ = '\n';
546 xmon_putchar('\n');
547 break;
548 }
549 switch (c) {
550 case 0177:
551 case '\b':
552 if (lineptr > line) {
553 xmon_putchar('\b');
554 xmon_putchar(' ');
555 xmon_putchar('\b');
556 --lineptr;
557 }
558 break;
559 case 'U' & 0x1F:
560 while (lineptr > line) {
561 xmon_putchar('\b');
562 xmon_putchar(' ');
563 xmon_putchar('\b');
564 --lineptr;
565 }
566 break;
567 default:
568 if (lineptr >= &line[sizeof(line) - 1])
569 xmon_putchar('\a');
570 else {
571 xmon_putchar(c);
572 *lineptr++ = c;
573 }
574 }
575 }
576 lineleft = lineptr - line;
577 lineptr = line;
578 }
579 if (lineleft == 0)
580 return -1;
581 --lineleft;
582 return *lineptr++;
583}
584
585char *
586xmon_fgets(char *str, int nb, void *f)
587{
588 char *p;
589 int c;
590
591 for (p = str; p < str + nb - 1; ) {
592 c = xmon_getchar();
593 if (c == -1) {
594 if (p == str)
595 return NULL;
596 break;
597 }
598 *p++ = c;
599 if (c == '\n')
600 break;
601 }
602 *p = 0;
603 return str;
604}
605
606void
607xmon_enter(void)
608{
609#ifdef CONFIG_ADB_PMU
610 if (_machine == _MACH_Pmac) {
611 pmu_suspend();
612 }
613#endif
614}
615
616void
617xmon_leave(void)
618{
619#ifdef CONFIG_ADB_PMU
620 if (_machine == _MACH_Pmac) {
621 pmu_resume();
622 }
623#endif
624}
diff --git a/arch/ppc64/xmon/start.c b/arch/powerpc/xmon/start_64.c
index e50c158191e1..e50c158191e1 100644
--- a/arch/ppc64/xmon/start.c
+++ b/arch/powerpc/xmon/start_64.c
diff --git a/arch/powerpc/xmon/start_8xx.c b/arch/powerpc/xmon/start_8xx.c
new file mode 100644
index 000000000000..a48bd594cf61
--- /dev/null
+++ b/arch/powerpc/xmon/start_8xx.c
@@ -0,0 +1,287 @@
1/*
2 * Copyright (C) 1996 Paul Mackerras.
3 * Copyright (C) 2000 Dan Malek.
4 * Quick hack of Paul's code to make XMON work on 8xx processors. Lots
5 * of assumptions, like the SMC1 is used, it has been initialized by the
6 * loader at some point, and we can just stuff and suck bytes.
7 * We rely upon the 8xx uart driver to support us, as the interface
8 * changes between boot up and operational phases of the kernel.
9 */
10#include <linux/string.h>
11#include <asm/machdep.h>
12#include <asm/io.h>
13#include <asm/page.h>
14#include <linux/kernel.h>
15#include <asm/8xx_immap.h>
16#include <asm/mpc8xx.h>
17#include <asm/commproc.h>
18
19extern void xmon_printf(const char *fmt, ...);
20extern int xmon_8xx_write(char *str, int nb);
21extern int xmon_8xx_read_poll(void);
22extern int xmon_8xx_read_char(void);
23void prom_drawhex(uint);
24void prom_drawstring(const char *str);
25
26static int use_screen = 1; /* default */
27
28#define TB_SPEED 25000000
29
30static inline unsigned int readtb(void)
31{
32 unsigned int ret;
33
34 asm volatile("mftb %0" : "=r" (ret) :);
35 return ret;
36}
37
38void buf_access(void)
39{
40}
41
42void
43xmon_map_scc(void)
44{
45
46 cpmp = (cpm8xx_t *)&(((immap_t *)IMAP_ADDR)->im_cpm);
47 use_screen = 0;
48
49 prom_drawstring("xmon uses serial port\n");
50}
51
52static int scc_initialized = 0;
53
54void xmon_init_scc(void);
55
56int
57xmon_write(void *handle, void *ptr, int nb)
58{
59 char *p = ptr;
60 int i, c, ct;
61
62 if (!scc_initialized)
63 xmon_init_scc();
64
65 return(xmon_8xx_write(ptr, nb));
66}
67
68int xmon_wants_key;
69
70int
71xmon_read(void *handle, void *ptr, int nb)
72{
73 char *p = ptr;
74 int i;
75
76 if (!scc_initialized)
77 xmon_init_scc();
78
79 for (i = 0; i < nb; ++i) {
80 *p++ = xmon_8xx_read_char();
81 }
82 return i;
83}
84
85int
86xmon_read_poll(void)
87{
88 return(xmon_8xx_read_poll());
89}
90
91void
92xmon_init_scc()
93{
94 scc_initialized = 1;
95}
96
97#if 0
98extern int (*prom_entry)(void *);
99
100int
101xmon_exit(void)
102{
103 struct prom_args {
104 char *service;
105 } args;
106
107 for (;;) {
108 args.service = "exit";
109 (*prom_entry)(&args);
110 }
111}
112#endif
113
114void *xmon_stdin;
115void *xmon_stdout;
116void *xmon_stderr;
117
118void
119xmon_init(void)
120{
121}
122
123int
124xmon_putc(int c, void *f)
125{
126 char ch = c;
127
128 if (c == '\n')
129 xmon_putc('\r', f);
130 return xmon_write(f, &ch, 1) == 1? c: -1;
131}
132
133int
134xmon_putchar(int c)
135{
136 return xmon_putc(c, xmon_stdout);
137}
138
139int
140xmon_fputs(char *str, void *f)
141{
142 int n = strlen(str);
143
144 return xmon_write(f, str, n) == n? 0: -1;
145}
146
147int
148xmon_readchar(void)
149{
150 char ch;
151
152 for (;;) {
153 switch (xmon_read(xmon_stdin, &ch, 1)) {
154 case 1:
155 return ch;
156 case -1:
157 xmon_printf("read(stdin) returned -1\r\n", 0, 0);
158 return -1;
159 }
160 }
161}
162
163static char line[256];
164static char *lineptr;
165static int lineleft;
166
167#if 0
168int xmon_expect(const char *str, unsigned int timeout)
169{
170 int c;
171 unsigned int t0;
172
173 timeout *= TB_SPEED;
174 t0 = readtb();
175 do {
176 lineptr = line;
177 for (;;) {
178 c = xmon_read_poll();
179 if (c == -1) {
180 if (readtb() - t0 > timeout)
181 return 0;
182 continue;
183 }
184 if (c == '\n')
185 break;
186 if (c != '\r' && lineptr < &line[sizeof(line) - 1])
187 *lineptr++ = c;
188 }
189 *lineptr = 0;
190 } while (strstr(line, str) == NULL);
191 return 1;
192}
193#endif
194
195int
196xmon_getchar(void)
197{
198 int c;
199
200 if (lineleft == 0) {
201 lineptr = line;
202 for (;;) {
203 c = xmon_readchar();
204 if (c == -1 || c == 4)
205 break;
206 if (c == '\r' || c == '\n') {
207 *lineptr++ = '\n';
208 xmon_putchar('\n');
209 break;
210 }
211 switch (c) {
212 case 0177:
213 case '\b':
214 if (lineptr > line) {
215 xmon_putchar('\b');
216 xmon_putchar(' ');
217 xmon_putchar('\b');
218 --lineptr;
219 }
220 break;
221 case 'U' & 0x1F:
222 while (lineptr > line) {
223 xmon_putchar('\b');
224 xmon_putchar(' ');
225 xmon_putchar('\b');
226 --lineptr;
227 }
228 break;
229 default:
230 if (lineptr >= &line[sizeof(line) - 1])
231 xmon_putchar('\a');
232 else {
233 xmon_putchar(c);
234 *lineptr++ = c;
235 }
236 }
237 }
238 lineleft = lineptr - line;
239 lineptr = line;
240 }
241 if (lineleft == 0)
242 return -1;
243 --lineleft;
244 return *lineptr++;
245}
246
247char *
248xmon_fgets(char *str, int nb, void *f)
249{
250 char *p;
251 int c;
252
253 for (p = str; p < str + nb - 1; ) {
254 c = xmon_getchar();
255 if (c == -1) {
256 if (p == str)
257 return 0;
258 break;
259 }
260 *p++ = c;
261 if (c == '\n')
262 break;
263 }
264 *p = 0;
265 return str;
266}
267
268void
269prom_drawhex(uint val)
270{
271 unsigned char buf[10];
272
273 int i;
274 for (i = 7; i >= 0; i--)
275 {
276 buf[i] = "0123456789abcdef"[val & 0x0f];
277 val >>= 4;
278 }
279 buf[8] = '\0';
280 xmon_fputs(buf, xmon_stdout);
281}
282
283void
284prom_drawstring(const char *str)
285{
286 xmon_fputs(str, xmon_stdout);
287}
diff --git a/arch/ppc64/xmon/subr_prf.c b/arch/powerpc/xmon/subr_prf.c
index 5242bd7d0959..b48738c6dd33 100644
--- a/arch/ppc64/xmon/subr_prf.c
+++ b/arch/powerpc/xmon/subr_prf.c
@@ -18,13 +18,13 @@
18 18
19#include <linux/kernel.h> 19#include <linux/kernel.h>
20#include <linux/string.h> 20#include <linux/string.h>
21#include <linux/module.h>
21#include <stdarg.h> 22#include <stdarg.h>
22#include "nonstdio.h" 23#include "nonstdio.h"
23 24
24extern int xmon_write(void *, void *, int); 25extern int xmon_write(void *, void *, int);
25 26
26void 27void xmon_vfprintf(void *f, const char *fmt, va_list ap)
27xmon_vfprintf(void *f, const char *fmt, va_list ap)
28{ 28{
29 static char xmon_buf[2048]; 29 static char xmon_buf[2048];
30 int n; 30 int n;
@@ -33,8 +33,7 @@ xmon_vfprintf(void *f, const char *fmt, va_list ap)
33 xmon_write(f, xmon_buf, n); 33 xmon_write(f, xmon_buf, n);
34} 34}
35 35
36void 36void xmon_printf(const char *fmt, ...)
37xmon_printf(const char *fmt, ...)
38{ 37{
39 va_list ap; 38 va_list ap;
40 39
@@ -42,9 +41,9 @@ xmon_printf(const char *fmt, ...)
42 xmon_vfprintf(stdout, fmt, ap); 41 xmon_vfprintf(stdout, fmt, ap);
43 va_end(ap); 42 va_end(ap);
44} 43}
44EXPORT_SYMBOL(xmon_printf);
45 45
46void 46void xmon_fprintf(void *f, const char *fmt, ...)
47xmon_fprintf(void *f, const char *fmt, ...)
48{ 47{
49 va_list ap; 48 va_list ap;
50 49
diff --git a/arch/ppc64/xmon/xmon.c b/arch/powerpc/xmon/xmon.c
index 74e63a886a69..1124f1146202 100644
--- a/arch/ppc64/xmon/xmon.c
+++ b/arch/powerpc/xmon/xmon.c
@@ -17,25 +17,31 @@
17#include <linux/delay.h> 17#include <linux/delay.h>
18#include <linux/kallsyms.h> 18#include <linux/kallsyms.h>
19#include <linux/cpumask.h> 19#include <linux/cpumask.h>
20#include <linux/module.h>
20 21
21#include <asm/ptrace.h> 22#include <asm/ptrace.h>
22#include <asm/string.h> 23#include <asm/string.h>
23#include <asm/prom.h> 24#include <asm/prom.h>
24#include <asm/machdep.h> 25#include <asm/machdep.h>
26#include <asm/xmon.h>
27#ifdef CONFIG_PMAC_BACKLIGHT
28#include <asm/backlight.h>
29#endif
25#include <asm/processor.h> 30#include <asm/processor.h>
26#include <asm/pgtable.h> 31#include <asm/pgtable.h>
27#include <asm/mmu.h> 32#include <asm/mmu.h>
28#include <asm/mmu_context.h> 33#include <asm/mmu_context.h>
29#include <asm/paca.h>
30#include <asm/ppcdebug.h>
31#include <asm/cputable.h> 34#include <asm/cputable.h>
32#include <asm/rtas.h> 35#include <asm/rtas.h>
33#include <asm/sstep.h> 36#include <asm/sstep.h>
34#include <asm/bug.h> 37#include <asm/bug.h>
38
39#ifdef CONFIG_PPC64
35#include <asm/hvcall.h> 40#include <asm/hvcall.h>
41#include <asm/paca.h>
42#endif
36 43
37#include "nonstdio.h" 44#include "nonstdio.h"
38#include "privinst.h"
39 45
40#define scanhex xmon_scanhex 46#define scanhex xmon_scanhex
41#define skipbl xmon_skipbl 47#define skipbl xmon_skipbl
@@ -58,7 +64,7 @@ static unsigned long ncsum = 4096;
58static int termch; 64static int termch;
59static char tmpstr[128]; 65static char tmpstr[128];
60 66
61#define JMP_BUF_LEN (184/sizeof(long)) 67#define JMP_BUF_LEN 23
62static long bus_error_jmp[JMP_BUF_LEN]; 68static long bus_error_jmp[JMP_BUF_LEN];
63static int catch_memory_errors; 69static int catch_memory_errors;
64static long *xmon_fault_jmp[NR_CPUS]; 70static long *xmon_fault_jmp[NR_CPUS];
@@ -130,23 +136,36 @@ static void cacheflush(void);
130static int cpu_cmd(void); 136static int cpu_cmd(void);
131static void csum(void); 137static void csum(void);
132static void bootcmds(void); 138static void bootcmds(void);
139static void proccall(void);
133void dump_segments(void); 140void dump_segments(void);
134static void symbol_lookup(void); 141static void symbol_lookup(void);
135static void xmon_print_symbol(unsigned long address, const char *mid, 142static void xmon_print_symbol(unsigned long address, const char *mid,
136 const char *after); 143 const char *after);
137static const char *getvecname(unsigned long vec); 144static const char *getvecname(unsigned long vec);
138 145
139static void debug_trace(void);
140
141extern int print_insn_powerpc(unsigned long, unsigned long, int); 146extern int print_insn_powerpc(unsigned long, unsigned long, int);
142extern void printf(const char *fmt, ...); 147extern void printf(const char *fmt, ...);
143extern void xmon_vfprintf(void *f, const char *fmt, va_list ap); 148extern void xmon_vfprintf(void *f, const char *fmt, va_list ap);
144extern int xmon_putc(int c, void *f); 149extern int xmon_putc(int c, void *f);
145extern int putchar(int ch); 150extern int putchar(int ch);
151
152extern void xmon_enter(void);
153extern void xmon_leave(void);
154
146extern int xmon_read_poll(void); 155extern int xmon_read_poll(void);
147extern int setjmp(long *); 156extern long setjmp(long *);
148extern void longjmp(long *, int); 157extern void longjmp(long *, long);
149extern unsigned long _ASR; 158extern void xmon_save_regs(struct pt_regs *);
159
160#ifdef CONFIG_PPC64
161#define REG "%.16lx"
162#define REGS_PER_LINE 4
163#define LAST_VOLATILE 13
164#else
165#define REG "%.8lx"
166#define REGS_PER_LINE 8
167#define LAST_VOLATILE 12
168#endif
150 169
151#define GETWORD(v) (((v)[0] << 24) + ((v)[1] << 16) + ((v)[2] << 8) + (v)[3]) 170#define GETWORD(v) (((v)[0] << 24) + ((v)[1] << 16) + ((v)[2] << 8) + (v)[3])
152 171
@@ -186,47 +205,45 @@ Commands:\n\
186 ml locate a block of memory\n\ 205 ml locate a block of memory\n\
187 mz zero a block of memory\n\ 206 mz zero a block of memory\n\
188 mi show information about memory allocation\n\ 207 mi show information about memory allocation\n\
189 p show the task list\n\ 208 p call a procedure\n\
190 r print registers\n\ 209 r print registers\n\
191 s single step\n\ 210 s single step\n\
192 S print special registers\n\ 211 S print special registers\n\
193 t print backtrace\n\ 212 t print backtrace\n\
194 T Enable/Disable PPCDBG flags\n\
195 x exit monitor and recover\n\ 213 x exit monitor and recover\n\
196 X exit monitor and dont recover\n\ 214 X exit monitor and dont recover\n"
197 u dump segment table or SLB\n\ 215#ifdef CONFIG_PPC64
198 ? help\n" 216" u dump segment table or SLB\n"
199 "\ 217#endif
200 zr reboot\n\ 218#ifdef CONFIG_PPC_STD_MMU_32
219" u dump segment registers\n"
220#endif
221" ? help\n"
222" zr reboot\n\
201 zh halt\n" 223 zh halt\n"
202; 224;
203 225
204static struct pt_regs *xmon_regs; 226static struct pt_regs *xmon_regs;
205 227
206extern inline void sync(void) 228static inline void sync(void)
207{ 229{
208 asm volatile("sync; isync"); 230 asm volatile("sync; isync");
209} 231}
210 232
211/* (Ref: 64-bit PowerPC ELF ABI Spplement; Ian Lance Taylor, Zembu Labs). 233static inline void store_inst(void *p)
212 A PPC stack frame looks like this: 234{
213 235 asm volatile ("dcbst 0,%0; sync; icbi 0,%0; isync" : : "r" (p));
214 High Address 236}
215 Back Chain 237
216 FP reg save area 238static inline void cflush(void *p)
217 GP reg save area 239{
218 Local var space 240 asm volatile ("dcbf 0,%0; icbi 0,%0" : : "r" (p));
219 Parameter save area (SP+48) 241}
220 TOC save area (SP+40) 242
221 link editor doubleword (SP+32) 243static inline void cinval(void *p)
222 compiler doubleword (SP+24) 244{
223 LR save (SP+16) 245 asm volatile ("dcbi 0,%0; icbi 0,%0" : : "r" (p));
224 CR save (SP+8) 246}
225 Back Chain (SP+0)
226
227 Note that the LR (ret addr) may not be saved in the current frame if
228 no functions have been called from the current function.
229 */
230 247
231/* 248/*
232 * Disable surveillance (the service processor watchdog function) 249 * Disable surveillance (the service processor watchdog function)
@@ -310,8 +327,8 @@ int xmon_core(struct pt_regs *regs, int fromipi)
310 unsigned long timeout; 327 unsigned long timeout;
311#endif 328#endif
312 329
313 msr = get_msr(); 330 msr = mfmsr();
314 set_msrd(msr & ~MSR_EE); /* disable interrupts */ 331 mtmsr(msr & ~MSR_EE); /* disable interrupts */
315 332
316 bp = in_breakpoint_table(regs->nip, &offset); 333 bp = in_breakpoint_table(regs->nip, &offset);
317 if (bp != NULL) { 334 if (bp != NULL) {
@@ -487,7 +504,7 @@ int xmon_core(struct pt_regs *regs, int fromipi)
487 504
488 insert_cpu_bpts(); 505 insert_cpu_bpts();
489 506
490 set_msrd(msr); /* restore interrupt enable */ 507 mtmsr(msr); /* restore interrupt enable */
491 508
492 return cmd != 'X'; 509 return cmd != 'X';
493} 510}
@@ -497,56 +514,23 @@ int xmon(struct pt_regs *excp)
497 struct pt_regs regs; 514 struct pt_regs regs;
498 515
499 if (excp == NULL) { 516 if (excp == NULL) {
500 /* Ok, grab regs as they are now. 517 xmon_save_regs(&regs);
501 This won't do a particularily good job because the
502 prologue has already been executed.
503 ToDo: We could reach back into the callers save
504 area to do a better job of representing the
505 caller's state.
506 */
507 asm volatile ("std 0,0(%0)\n\
508 std 1,8(%0)\n\
509 std 2,16(%0)\n\
510 std 3,24(%0)\n\
511 std 4,32(%0)\n\
512 std 5,40(%0)\n\
513 std 6,48(%0)\n\
514 std 7,56(%0)\n\
515 std 8,64(%0)\n\
516 std 9,72(%0)\n\
517 std 10,80(%0)\n\
518 std 11,88(%0)\n\
519 std 12,96(%0)\n\
520 std 13,104(%0)\n\
521 std 14,112(%0)\n\
522 std 15,120(%0)\n\
523 std 16,128(%0)\n\
524 std 17,136(%0)\n\
525 std 18,144(%0)\n\
526 std 19,152(%0)\n\
527 std 20,160(%0)\n\
528 std 21,168(%0)\n\
529 std 22,176(%0)\n\
530 std 23,184(%0)\n\
531 std 24,192(%0)\n\
532 std 25,200(%0)\n\
533 std 26,208(%0)\n\
534 std 27,216(%0)\n\
535 std 28,224(%0)\n\
536 std 29,232(%0)\n\
537 std 30,240(%0)\n\
538 std 31,248(%0)" : : "b" (&regs));
539
540 regs.nip = regs.link = ((unsigned long *)(regs.gpr[1]))[2];
541 regs.msr = get_msr();
542 regs.ctr = get_ctr();
543 regs.xer = get_xer();
544 regs.ccr = get_cr();
545 regs.trap = 0;
546 excp = &regs; 518 excp = &regs;
547 } 519 }
548 return xmon_core(excp, 0); 520 return xmon_core(excp, 0);
549} 521}
522EXPORT_SYMBOL(xmon);
523
524irqreturn_t
525xmon_irq(int irq, void *d, struct pt_regs *regs)
526{
527 unsigned long flags;
528 local_irq_save(flags);
529 printf("Keyboard interrupt\n");
530 xmon(regs);
531 local_irq_restore(flags);
532 return IRQ_HANDLED;
533}
550 534
551int xmon_bpt(struct pt_regs *regs) 535int xmon_bpt(struct pt_regs *regs)
552{ 536{
@@ -718,7 +702,7 @@ static void insert_cpu_bpts(void)
718 if (dabr.enabled) 702 if (dabr.enabled)
719 set_dabr(dabr.address | (dabr.enabled & 7)); 703 set_dabr(dabr.address | (dabr.enabled & 7));
720 if (iabr && cpu_has_feature(CPU_FTR_IABR)) 704 if (iabr && cpu_has_feature(CPU_FTR_IABR))
721 set_iabr(iabr->address 705 mtspr(SPRN_IABR, iabr->address
722 | (iabr->enabled & (BP_IABR|BP_IABR_TE))); 706 | (iabr->enabled & (BP_IABR|BP_IABR_TE)));
723} 707}
724 708
@@ -746,7 +730,7 @@ static void remove_cpu_bpts(void)
746{ 730{
747 set_dabr(0); 731 set_dabr(0);
748 if (cpu_has_feature(CPU_FTR_IABR)) 732 if (cpu_has_feature(CPU_FTR_IABR))
749 set_iabr(0); 733 mtspr(SPRN_IABR, 0);
750} 734}
751 735
752/* Command interpreting routine */ 736/* Command interpreting routine */
@@ -830,9 +814,6 @@ cmds(struct pt_regs *excp)
830 case '?': 814 case '?':
831 printf(help_string); 815 printf(help_string);
832 break; 816 break;
833 case 'p':
834 show_state();
835 break;
836 case 'b': 817 case 'b':
837 bpt_cmds(); 818 bpt_cmds();
838 break; 819 break;
@@ -846,12 +827,14 @@ cmds(struct pt_regs *excp)
846 case 'z': 827 case 'z':
847 bootcmds(); 828 bootcmds();
848 break; 829 break;
849 case 'T': 830 case 'p':
850 debug_trace(); 831 proccall();
851 break; 832 break;
833#ifdef CONFIG_PPC_STD_MMU
852 case 'u': 834 case 'u':
853 dump_segments(); 835 dump_segments();
854 break; 836 break;
837#endif
855 default: 838 default:
856 printf("Unrecognized command: "); 839 printf("Unrecognized command: ");
857 do { 840 do {
@@ -1070,6 +1053,7 @@ bpt_cmds(void)
1070 1053
1071 cmd = inchar(); 1054 cmd = inchar();
1072 switch (cmd) { 1055 switch (cmd) {
1056#ifndef CONFIG_8xx
1073 case 'd': /* bd - hardware data breakpoint */ 1057 case 'd': /* bd - hardware data breakpoint */
1074 mode = 7; 1058 mode = 7;
1075 cmd = inchar(); 1059 cmd = inchar();
@@ -1111,6 +1095,7 @@ bpt_cmds(void)
1111 iabr = bp; 1095 iabr = bp;
1112 } 1096 }
1113 break; 1097 break;
1098#endif
1114 1099
1115 case 'c': 1100 case 'c':
1116 if (!scanhex(&a)) { 1101 if (!scanhex(&a)) {
@@ -1152,7 +1137,7 @@ bpt_cmds(void)
1152 /* print all breakpoints */ 1137 /* print all breakpoints */
1153 printf(" type address\n"); 1138 printf(" type address\n");
1154 if (dabr.enabled) { 1139 if (dabr.enabled) {
1155 printf(" data %.16lx [", dabr.address); 1140 printf(" data "REG" [", dabr.address);
1156 if (dabr.enabled & 1) 1141 if (dabr.enabled & 1)
1157 printf("r"); 1142 printf("r");
1158 if (dabr.enabled & 2) 1143 if (dabr.enabled & 2)
@@ -1231,6 +1216,18 @@ static void get_function_bounds(unsigned long pc, unsigned long *startp,
1231 1216
1232static int xmon_depth_to_print = 64; 1217static int xmon_depth_to_print = 64;
1233 1218
1219#ifdef CONFIG_PPC64
1220#define LRSAVE_OFFSET 0x10
1221#define REG_FRAME_MARKER 0x7265677368657265ul /* "regshere" */
1222#define MARKER_OFFSET 0x60
1223#define REGS_OFFSET 0x70
1224#else
1225#define LRSAVE_OFFSET 4
1226#define REG_FRAME_MARKER 0x72656773
1227#define MARKER_OFFSET 8
1228#define REGS_OFFSET 16
1229#endif
1230
1234static void xmon_show_stack(unsigned long sp, unsigned long lr, 1231static void xmon_show_stack(unsigned long sp, unsigned long lr,
1235 unsigned long pc) 1232 unsigned long pc)
1236{ 1233{
@@ -1247,7 +1244,7 @@ static void xmon_show_stack(unsigned long sp, unsigned long lr,
1247 break; 1244 break;
1248 } 1245 }
1249 1246
1250 if (!mread(sp + 16, &ip, sizeof(unsigned long)) 1247 if (!mread(sp + LRSAVE_OFFSET, &ip, sizeof(unsigned long))
1251 || !mread(sp, &newsp, sizeof(unsigned long))) { 1248 || !mread(sp, &newsp, sizeof(unsigned long))) {
1252 printf("Couldn't read stack frame at %lx\n", sp); 1249 printf("Couldn't read stack frame at %lx\n", sp);
1253 break; 1250 break;
@@ -1266,7 +1263,7 @@ static void xmon_show_stack(unsigned long sp, unsigned long lr,
1266 get_function_bounds(pc, &fnstart, &fnend); 1263 get_function_bounds(pc, &fnstart, &fnend);
1267 nextip = 0; 1264 nextip = 0;
1268 if (newsp > sp) 1265 if (newsp > sp)
1269 mread(newsp + 16, &nextip, 1266 mread(newsp + LRSAVE_OFFSET, &nextip,
1270 sizeof(unsigned long)); 1267 sizeof(unsigned long));
1271 if (lr == ip) { 1268 if (lr == ip) {
1272 if (lr < PAGE_OFFSET 1269 if (lr < PAGE_OFFSET
@@ -1280,24 +1277,24 @@ static void xmon_show_stack(unsigned long sp, unsigned long lr,
1280 xmon_print_symbol(lr, " ", "\n"); 1277 xmon_print_symbol(lr, " ", "\n");
1281 } 1278 }
1282 if (printip) { 1279 if (printip) {
1283 printf("[%.16lx] ", sp); 1280 printf("["REG"] ", sp);
1284 xmon_print_symbol(ip, " ", " (unreliable)\n"); 1281 xmon_print_symbol(ip, " ", " (unreliable)\n");
1285 } 1282 }
1286 pc = lr = 0; 1283 pc = lr = 0;
1287 1284
1288 } else { 1285 } else {
1289 printf("[%.16lx] ", sp); 1286 printf("["REG"] ", sp);
1290 xmon_print_symbol(ip, " ", "\n"); 1287 xmon_print_symbol(ip, " ", "\n");
1291 } 1288 }
1292 1289
1293 /* Look for "regshere" marker to see if this is 1290 /* Look for "regshere" marker to see if this is
1294 an exception frame. */ 1291 an exception frame. */
1295 if (mread(sp + 0x60, &marker, sizeof(unsigned long)) 1292 if (mread(sp + MARKER_OFFSET, &marker, sizeof(unsigned long))
1296 && marker == 0x7265677368657265ul) { 1293 && marker == REG_FRAME_MARKER) {
1297 if (mread(sp + 0x70, &regs, sizeof(regs)) 1294 if (mread(sp + REGS_OFFSET, &regs, sizeof(regs))
1298 != sizeof(regs)) { 1295 != sizeof(regs)) {
1299 printf("Couldn't read registers at %lx\n", 1296 printf("Couldn't read registers at %lx\n",
1300 sp + 0x70); 1297 sp + REGS_OFFSET);
1301 break; 1298 break;
1302 } 1299 }
1303 printf("--- Exception: %lx %s at ", regs.trap, 1300 printf("--- Exception: %lx %s at ", regs.trap,
@@ -1371,7 +1368,9 @@ void excprint(struct pt_regs *fp)
1371 } 1368 }
1372 1369
1373 printf(" current = 0x%lx\n", current); 1370 printf(" current = 0x%lx\n", current);
1371#ifdef CONFIG_PPC64
1374 printf(" paca = 0x%lx\n", get_paca()); 1372 printf(" paca = 0x%lx\n", get_paca());
1373#endif
1375 if (current) { 1374 if (current) {
1376 printf(" pid = %ld, comm = %s\n", 1375 printf(" pid = %ld, comm = %s\n",
1377 current->pid, current->comm); 1376 current->pid, current->comm);
@@ -1383,7 +1382,7 @@ void excprint(struct pt_regs *fp)
1383 1382
1384void prregs(struct pt_regs *fp) 1383void prregs(struct pt_regs *fp)
1385{ 1384{
1386 int n; 1385 int n, trap;
1387 unsigned long base; 1386 unsigned long base;
1388 struct pt_regs regs; 1387 struct pt_regs regs;
1389 1388
@@ -1396,7 +1395,7 @@ void prregs(struct pt_regs *fp)
1396 __delay(200); 1395 __delay(200);
1397 } else { 1396 } else {
1398 catch_memory_errors = 0; 1397 catch_memory_errors = 0;
1399 printf("*** Error reading registers from %.16lx\n", 1398 printf("*** Error reading registers from "REG"\n",
1400 base); 1399 base);
1401 return; 1400 return;
1402 } 1401 }
@@ -1404,22 +1403,36 @@ void prregs(struct pt_regs *fp)
1404 fp = &regs; 1403 fp = &regs;
1405 } 1404 }
1406 1405
1406#ifdef CONFIG_PPC64
1407 if (FULL_REGS(fp)) { 1407 if (FULL_REGS(fp)) {
1408 for (n = 0; n < 16; ++n) 1408 for (n = 0; n < 16; ++n)
1409 printf("R%.2ld = %.16lx R%.2ld = %.16lx\n", 1409 printf("R%.2ld = "REG" R%.2ld = "REG"\n",
1410 n, fp->gpr[n], n+16, fp->gpr[n+16]); 1410 n, fp->gpr[n], n+16, fp->gpr[n+16]);
1411 } else { 1411 } else {
1412 for (n = 0; n < 7; ++n) 1412 for (n = 0; n < 7; ++n)
1413 printf("R%.2ld = %.16lx R%.2ld = %.16lx\n", 1413 printf("R%.2ld = "REG" R%.2ld = "REG"\n",
1414 n, fp->gpr[n], n+7, fp->gpr[n+7]); 1414 n, fp->gpr[n], n+7, fp->gpr[n+7]);
1415 } 1415 }
1416#else
1417 for (n = 0; n < 32; ++n) {
1418 printf("R%.2d = %.8x%s", n, fp->gpr[n],
1419 (n & 3) == 3? "\n": " ");
1420 if (n == 12 && !FULL_REGS(fp)) {
1421 printf("\n");
1422 break;
1423 }
1424 }
1425#endif
1416 printf("pc = "); 1426 printf("pc = ");
1417 xmon_print_symbol(fp->nip, " ", "\n"); 1427 xmon_print_symbol(fp->nip, " ", "\n");
1418 printf("lr = "); 1428 printf("lr = ");
1419 xmon_print_symbol(fp->link, " ", "\n"); 1429 xmon_print_symbol(fp->link, " ", "\n");
1420 printf("msr = %.16lx cr = %.8lx\n", fp->msr, fp->ccr); 1430 printf("msr = "REG" cr = %.8lx\n", fp->msr, fp->ccr);
1421 printf("ctr = %.16lx xer = %.16lx trap = %8lx\n", 1431 printf("ctr = "REG" xer = "REG" trap = %4lx\n",
1422 fp->ctr, fp->xer, fp->trap); 1432 fp->ctr, fp->xer, fp->trap);
1433 trap = TRAP(fp);
1434 if (trap == 0x300 || trap == 0x380 || trap == 0x600)
1435 printf("dar = "REG" dsisr = %.8lx\n", fp->dar, fp->dsisr);
1423} 1436}
1424 1437
1425void cacheflush(void) 1438void cacheflush(void)
@@ -1519,8 +1532,7 @@ static unsigned long regno;
1519extern char exc_prolog; 1532extern char exc_prolog;
1520extern char dec_exc; 1533extern char dec_exc;
1521 1534
1522void 1535void super_regs(void)
1523super_regs(void)
1524{ 1536{
1525 int cmd; 1537 int cmd;
1526 unsigned long val; 1538 unsigned long val;
@@ -1536,12 +1548,14 @@ super_regs(void)
1536 asm("mr %0,1" : "=r" (sp) :); 1548 asm("mr %0,1" : "=r" (sp) :);
1537 asm("mr %0,2" : "=r" (toc) :); 1549 asm("mr %0,2" : "=r" (toc) :);
1538 1550
1539 printf("msr = %.16lx sprg0= %.16lx\n", get_msr(), get_sprg0()); 1551 printf("msr = "REG" sprg0= "REG"\n",
1540 printf("pvr = %.16lx sprg1= %.16lx\n", get_pvr(), get_sprg1()); 1552 mfmsr(), mfspr(SPRN_SPRG0));
1541 printf("dec = %.16lx sprg2= %.16lx\n", get_dec(), get_sprg2()); 1553 printf("pvr = "REG" sprg1= "REG"\n",
1542 printf("sp = %.16lx sprg3= %.16lx\n", sp, get_sprg3()); 1554 mfspr(SPRN_PVR), mfspr(SPRN_SPRG1));
1543 printf("toc = %.16lx dar = %.16lx\n", toc, get_dar()); 1555 printf("dec = "REG" sprg2= "REG"\n",
1544 printf("srr0 = %.16lx srr1 = %.16lx\n", get_srr0(), get_srr1()); 1556 mfspr(SPRN_DEC), mfspr(SPRN_SPRG2));
1557 printf("sp = "REG" sprg3= "REG"\n", sp, mfspr(SPRN_SPRG3));
1558 printf("toc = "REG" dar = "REG"\n", toc, mfspr(SPRN_DAR));
1545#ifdef CONFIG_PPC_ISERIES 1559#ifdef CONFIG_PPC_ISERIES
1546 // Dump out relevant Paca data areas. 1560 // Dump out relevant Paca data areas.
1547 printf("Paca: \n"); 1561 printf("Paca: \n");
@@ -1578,11 +1592,6 @@ super_regs(void)
1578 case 'r': 1592 case 'r':
1579 printf("spr %lx = %lx\n", regno, read_spr(regno)); 1593 printf("spr %lx = %lx\n", regno, read_spr(regno));
1580 break; 1594 break;
1581 case 'm':
1582 val = get_msr();
1583 scanhex(&val);
1584 set_msrd(val);
1585 break;
1586 } 1595 }
1587 scannl(); 1596 scannl();
1588} 1597}
@@ -1604,13 +1613,13 @@ mread(unsigned long adrs, void *buf, int size)
1604 q = (char *)buf; 1613 q = (char *)buf;
1605 switch (size) { 1614 switch (size) {
1606 case 2: 1615 case 2:
1607 *(short *)q = *(short *)p; 1616 *(u16 *)q = *(u16 *)p;
1608 break; 1617 break;
1609 case 4: 1618 case 4:
1610 *(int *)q = *(int *)p; 1619 *(u32 *)q = *(u32 *)p;
1611 break; 1620 break;
1612 case 8: 1621 case 8:
1613 *(long *)q = *(long *)p; 1622 *(u64 *)q = *(u64 *)p;
1614 break; 1623 break;
1615 default: 1624 default:
1616 for( ; n < size; ++n) { 1625 for( ; n < size; ++n) {
@@ -1641,13 +1650,13 @@ mwrite(unsigned long adrs, void *buf, int size)
1641 q = (char *) buf; 1650 q = (char *) buf;
1642 switch (size) { 1651 switch (size) {
1643 case 2: 1652 case 2:
1644 *(short *)p = *(short *)q; 1653 *(u16 *)p = *(u16 *)q;
1645 break; 1654 break;
1646 case 4: 1655 case 4:
1647 *(int *)p = *(int *)q; 1656 *(u32 *)p = *(u32 *)q;
1648 break; 1657 break;
1649 case 8: 1658 case 8:
1650 *(long *)p = *(long *)q; 1659 *(u64 *)p = *(u64 *)q;
1651 break; 1660 break;
1652 default: 1661 default:
1653 for ( ; n < size; ++n) { 1662 for ( ; n < size; ++n) {
@@ -1667,11 +1676,12 @@ mwrite(unsigned long adrs, void *buf, int size)
1667} 1676}
1668 1677
1669static int fault_type; 1678static int fault_type;
1679static int fault_except;
1670static char *fault_chars[] = { "--", "**", "##" }; 1680static char *fault_chars[] = { "--", "**", "##" };
1671 1681
1672static int 1682static int handle_fault(struct pt_regs *regs)
1673handle_fault(struct pt_regs *regs)
1674{ 1683{
1684 fault_except = TRAP(regs);
1675 switch (TRAP(regs)) { 1685 switch (TRAP(regs)) {
1676 case 0x200: 1686 case 0x200:
1677 fault_type = 0; 1687 fault_type = 0;
@@ -1960,7 +1970,7 @@ prdump(unsigned long adrs, long ndump)
1960 unsigned char temp[16]; 1970 unsigned char temp[16];
1961 1971
1962 for (n = ndump; n > 0;) { 1972 for (n = ndump; n > 0;) {
1963 printf("%.16lx", adrs); 1973 printf(REG, adrs);
1964 putchar(' '); 1974 putchar(' ');
1965 r = n < 16? n: 16; 1975 r = n < 16? n: 16;
1966 nr = mread(adrs, temp, r); 1976 nr = mread(adrs, temp, r);
@@ -2008,7 +2018,7 @@ ppc_inst_dump(unsigned long adr, long count, int praddr)
2008 if (nr == 0) { 2018 if (nr == 0) {
2009 if (praddr) { 2019 if (praddr) {
2010 const char *x = fault_chars[fault_type]; 2020 const char *x = fault_chars[fault_type];
2011 printf("%.16lx %s%s%s%s\n", adr, x, x, x, x); 2021 printf(REG" %s%s%s%s\n", adr, x, x, x, x);
2012 } 2022 }
2013 break; 2023 break;
2014 } 2024 }
@@ -2023,7 +2033,7 @@ ppc_inst_dump(unsigned long adr, long count, int praddr)
2023 dotted = 0; 2033 dotted = 0;
2024 last_inst = inst; 2034 last_inst = inst;
2025 if (praddr) 2035 if (praddr)
2026 printf("%.16lx %.8x", adr, inst); 2036 printf(REG" %.8x", adr, inst);
2027 printf("\t"); 2037 printf("\t");
2028 print_insn_powerpc(inst, adr, 0); /* always returns 4 */ 2038 print_insn_powerpc(inst, adr, 0); /* always returns 4 */
2029 printf("\n"); 2039 printf("\n");
@@ -2152,6 +2162,42 @@ memzcan(void)
2152 printf("%.8x\n", a - mskip); 2162 printf("%.8x\n", a - mskip);
2153} 2163}
2154 2164
2165void proccall(void)
2166{
2167 unsigned long args[8];
2168 unsigned long ret;
2169 int i;
2170 typedef unsigned long (*callfunc_t)(unsigned long, unsigned long,
2171 unsigned long, unsigned long, unsigned long,
2172 unsigned long, unsigned long, unsigned long);
2173 callfunc_t func;
2174
2175 if (!scanhex(&adrs))
2176 return;
2177 if (termch != '\n')
2178 termch = 0;
2179 for (i = 0; i < 8; ++i)
2180 args[i] = 0;
2181 for (i = 0; i < 8; ++i) {
2182 if (!scanhex(&args[i]) || termch == '\n')
2183 break;
2184 termch = 0;
2185 }
2186 func = (callfunc_t) adrs;
2187 ret = 0;
2188 if (setjmp(bus_error_jmp) == 0) {
2189 catch_memory_errors = 1;
2190 sync();
2191 ret = func(args[0], args[1], args[2], args[3],
2192 args[4], args[5], args[6], args[7]);
2193 sync();
2194 printf("return value is %x\n", ret);
2195 } else {
2196 printf("*** %x exception occurred\n", fault_except);
2197 }
2198 catch_memory_errors = 0;
2199}
2200
2155/* Input scanning routines */ 2201/* Input scanning routines */
2156int 2202int
2157skipbl(void) 2203skipbl(void)
@@ -2174,7 +2220,12 @@ static char *regnames[N_PTREGS] = {
2174 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15", 2220 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
2175 "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23", 2221 "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
2176 "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31", 2222 "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31",
2177 "pc", "msr", "or3", "ctr", "lr", "xer", "ccr", "softe", 2223 "pc", "msr", "or3", "ctr", "lr", "xer", "ccr",
2224#ifdef CONFIG_PPC64
2225 "softe",
2226#else
2227 "mq",
2228#endif
2178 "trap", "dar", "dsisr", "res" 2229 "trap", "dar", "dsisr", "res"
2179}; 2230};
2180 2231
@@ -2280,8 +2331,7 @@ scannl(void)
2280 c = inchar(); 2331 c = inchar();
2281} 2332}
2282 2333
2283int 2334int hexdigit(int c)
2284hexdigit(int c)
2285{ 2335{
2286 if( '0' <= c && c <= '9' ) 2336 if( '0' <= c && c <= '9' )
2287 return c - '0'; 2337 return c - '0';
@@ -2378,7 +2428,7 @@ static void xmon_print_symbol(unsigned long address, const char *mid,
2378 const char *name = NULL; 2428 const char *name = NULL;
2379 unsigned long offset, size; 2429 unsigned long offset, size;
2380 2430
2381 printf("%.16lx", address); 2431 printf(REG, address);
2382 if (setjmp(bus_error_jmp) == 0) { 2432 if (setjmp(bus_error_jmp) == 0) {
2383 catch_memory_errors = 1; 2433 catch_memory_errors = 1;
2384 sync(); 2434 sync();
@@ -2399,55 +2449,7 @@ static void xmon_print_symbol(unsigned long address, const char *mid,
2399 printf("%s", after); 2449 printf("%s", after);
2400} 2450}
2401 2451
2402static void debug_trace(void) 2452#ifdef CONFIG_PPC64
2403{
2404 unsigned long val, cmd, on;
2405
2406 cmd = skipbl();
2407 if (cmd == '\n') {
2408 /* show current state */
2409 unsigned long i;
2410 printf("ppc64_debug_switch = 0x%lx\n", ppc64_debug_switch);
2411 for (i = 0; i < PPCDBG_NUM_FLAGS ;i++) {
2412 on = PPCDBG_BITVAL(i) & ppc64_debug_switch;
2413 printf("%02x %s %12s ", i, on ? "on " : "off", trace_names[i] ? trace_names[i] : "");
2414 if (((i+1) % 3) == 0)
2415 printf("\n");
2416 }
2417 printf("\n");
2418 return;
2419 }
2420 while (cmd != '\n') {
2421 on = 1; /* default if no sign given */
2422 while (cmd == '+' || cmd == '-') {
2423 on = (cmd == '+');
2424 cmd = inchar();
2425 if (cmd == ' ' || cmd == '\n') { /* Turn on or off based on + or - */
2426 ppc64_debug_switch = on ? PPCDBG_ALL:PPCDBG_NONE;
2427 printf("Setting all values to %s...\n", on ? "on" : "off");
2428 if (cmd == '\n') return;
2429 else cmd = skipbl();
2430 }
2431 else
2432 termch = cmd;
2433 }
2434 termch = cmd; /* not +/- ... let scanhex see it */
2435 scanhex((void *)&val);
2436 if (val >= 64) {
2437 printf("Value %x out of range:\n", val);
2438 return;
2439 }
2440 if (on) {
2441 ppc64_debug_switch |= PPCDBG_BITVAL(val);
2442 printf("enable debug %x %s\n", val, trace_names[val] ? trace_names[val] : "");
2443 } else {
2444 ppc64_debug_switch &= ~PPCDBG_BITVAL(val);
2445 printf("disable debug %x %s\n", val, trace_names[val] ? trace_names[val] : "");
2446 }
2447 cmd = skipbl();
2448 }
2449}
2450
2451static void dump_slb(void) 2453static void dump_slb(void)
2452{ 2454{
2453 int i; 2455 int i;
@@ -2484,6 +2486,27 @@ static void dump_stab(void)
2484 } 2486 }
2485} 2487}
2486 2488
2489void dump_segments(void)
2490{
2491 if (cpu_has_feature(CPU_FTR_SLB))
2492 dump_slb();
2493 else
2494 dump_stab();
2495}
2496#endif
2497
2498#ifdef CONFIG_PPC_STD_MMU_32
2499void dump_segments(void)
2500{
2501 int i;
2502
2503 printf("sr0-15 =");
2504 for (i = 0; i < 16; ++i)
2505 printf(" %x", mfsrin(i));
2506 printf("\n");
2507}
2508#endif
2509
2487void xmon_init(int enable) 2510void xmon_init(int enable)
2488{ 2511{
2489 if (enable) { 2512 if (enable) {
@@ -2504,11 +2527,3 @@ void xmon_init(int enable)
2504 __debugger_fault_handler = NULL; 2527 __debugger_fault_handler = NULL;
2505 } 2528 }
2506} 2529}
2507
2508void dump_segments(void)
2509{
2510 if (cpu_has_feature(CPU_FTR_SLB))
2511 dump_slb();
2512 else
2513 dump_stab();
2514}
diff --git a/arch/ppc/8xx_io/commproc.c b/arch/ppc/8xx_io/commproc.c
index 11726e2a4ec8..b42789f8eb76 100644
--- a/arch/ppc/8xx_io/commproc.c
+++ b/arch/ppc/8xx_io/commproc.c
@@ -73,7 +73,7 @@ cpm_mask_irq(unsigned int irq)
73{ 73{
74 int cpm_vec = irq - CPM_IRQ_OFFSET; 74 int cpm_vec = irq - CPM_IRQ_OFFSET;
75 75
76 ((immap_t *)IMAP_ADDR)->im_cpic.cpic_cimr &= ~(1 << cpm_vec); 76 out_be32(&((immap_t *)IMAP_ADDR)->im_cpic.cpic_cimr, in_be32(&((immap_t *)IMAP_ADDR)->im_cpic.cpic_cimr) & ~(1 << cpm_vec));
77} 77}
78 78
79static void 79static void
@@ -81,7 +81,7 @@ cpm_unmask_irq(unsigned int irq)
81{ 81{
82 int cpm_vec = irq - CPM_IRQ_OFFSET; 82 int cpm_vec = irq - CPM_IRQ_OFFSET;
83 83
84 ((immap_t *)IMAP_ADDR)->im_cpic.cpic_cimr |= (1 << cpm_vec); 84 out_be32(&((immap_t *)IMAP_ADDR)->im_cpic.cpic_cimr, in_be32(&((immap_t *)IMAP_ADDR)->im_cpic.cpic_cimr) | (1 << cpm_vec));
85} 85}
86 86
87static void 87static void
@@ -95,7 +95,7 @@ cpm_eoi(unsigned int irq)
95{ 95{
96 int cpm_vec = irq - CPM_IRQ_OFFSET; 96 int cpm_vec = irq - CPM_IRQ_OFFSET;
97 97
98 ((immap_t *)IMAP_ADDR)->im_cpic.cpic_cisr = (1 << cpm_vec); 98 out_be32(&((immap_t *)IMAP_ADDR)->im_cpic.cpic_cisr, (1 << cpm_vec));
99} 99}
100 100
101struct hw_interrupt_type cpm_pic = { 101struct hw_interrupt_type cpm_pic = {
@@ -133,7 +133,7 @@ m8xx_cpm_reset(void)
133 * manual recommends it. 133 * manual recommends it.
134 * Bit 25, FAM can also be set to use FEC aggressive mode (860T). 134 * Bit 25, FAM can also be set to use FEC aggressive mode (860T).
135 */ 135 */
136 imp->im_siu_conf.sc_sdcr = 1; 136 out_be32(&imp->im_siu_conf.sc_sdcr, 1),
137 137
138 /* Reclaim the DP memory for our use. */ 138 /* Reclaim the DP memory for our use. */
139 m8xx_cpm_dpinit(); 139 m8xx_cpm_dpinit();
@@ -178,10 +178,10 @@ cpm_interrupt_init(void)
178 178
179 /* Initialize the CPM interrupt controller. 179 /* Initialize the CPM interrupt controller.
180 */ 180 */
181 ((immap_t *)IMAP_ADDR)->im_cpic.cpic_cicr = 181 out_be32(&((immap_t *)IMAP_ADDR)->im_cpic.cpic_cicr,
182 (CICR_SCD_SCC4 | CICR_SCC_SCC3 | CICR_SCB_SCC2 | CICR_SCA_SCC1) | 182 (CICR_SCD_SCC4 | CICR_SCC_SCC3 | CICR_SCB_SCC2 | CICR_SCA_SCC1) |
183 ((CPM_INTERRUPT/2) << 13) | CICR_HP_MASK; 183 ((CPM_INTERRUPT/2) << 13) | CICR_HP_MASK);
184 ((immap_t *)IMAP_ADDR)->im_cpic.cpic_cimr = 0; 184 out_be32(&((immap_t *)IMAP_ADDR)->im_cpic.cpic_cimr, 0);
185 185
186 /* install the CPM interrupt controller routines for the CPM 186 /* install the CPM interrupt controller routines for the CPM
187 * interrupt vectors 187 * interrupt vectors
@@ -198,7 +198,7 @@ cpm_interrupt_init(void)
198 if (setup_irq(CPM_IRQ_OFFSET + CPMVEC_ERROR, &cpm_error_irqaction)) 198 if (setup_irq(CPM_IRQ_OFFSET + CPMVEC_ERROR, &cpm_error_irqaction))
199 panic("Could not allocate CPM error IRQ!"); 199 panic("Could not allocate CPM error IRQ!");
200 200
201 ((immap_t *)IMAP_ADDR)->im_cpic.cpic_cicr |= CICR_IEN; 201 out_be32(&((immap_t *)IMAP_ADDR)->im_cpic.cpic_cicr, in_be32(&((immap_t *)IMAP_ADDR)->im_cpic.cpic_cicr) | CICR_IEN);
202} 202}
203 203
204/* 204/*
@@ -212,8 +212,8 @@ cpm_get_irq(struct pt_regs *regs)
212 /* Get the vector by setting the ACK bit and then reading 212 /* Get the vector by setting the ACK bit and then reading
213 * the register. 213 * the register.
214 */ 214 */
215 ((volatile immap_t *)IMAP_ADDR)->im_cpic.cpic_civr = 1; 215 out_be16(&((volatile immap_t *)IMAP_ADDR)->im_cpic.cpic_civr, 1);
216 cpm_vec = ((volatile immap_t *)IMAP_ADDR)->im_cpic.cpic_civr; 216 cpm_vec = in_be16(&((volatile immap_t *)IMAP_ADDR)->im_cpic.cpic_civr);
217 cpm_vec >>= 11; 217 cpm_vec >>= 11;
218 218
219 return cpm_vec; 219 return cpm_vec;
diff --git a/arch/ppc/Kconfig b/arch/ppc/Kconfig
index 776941c75672..114b90fdea24 100644
--- a/arch/ppc/Kconfig
+++ b/arch/ppc/Kconfig
@@ -568,6 +568,7 @@ config CHESTNUT
568 568
569config SPRUCE 569config SPRUCE
570 bool "IBM-Spruce" 570 bool "IBM-Spruce"
571 select PPC_INDIRECT_PCI
571 572
572config HDPU 573config HDPU
573 bool "Sky-HDPU" 574 bool "Sky-HDPU"
@@ -588,27 +589,35 @@ config EV64260
588 589
589config LOPEC 590config LOPEC
590 bool "Motorola-LoPEC" 591 bool "Motorola-LoPEC"
592 select PPC_I8259
591 593
592config MVME5100 594config MVME5100
593 bool "Motorola-MVME5100" 595 bool "Motorola-MVME5100"
596 select PPC_INDIRECT_PCI
594 597
595config PPLUS 598config PPLUS
596 bool "Motorola-PowerPlus" 599 bool "Motorola-PowerPlus"
600 select PPC_I8259
601 select PPC_INDIRECT_PCI
597 602
598config PRPMC750 603config PRPMC750
599 bool "Motorola-PrPMC750" 604 bool "Motorola-PrPMC750"
605 select PPC_INDIRECT_PCI
600 606
601config PRPMC800 607config PRPMC800
602 bool "Motorola-PrPMC800" 608 bool "Motorola-PrPMC800"
609 select PPC_INDIRECT_PCI
603 610
604config SANDPOINT 611config SANDPOINT
605 bool "Motorola-Sandpoint" 612 bool "Motorola-Sandpoint"
613 select PPC_I8259
606 help 614 help
607 Select SANDPOINT if configuring for a Motorola Sandpoint X3 615 Select SANDPOINT if configuring for a Motorola Sandpoint X3
608 (any flavor). 616 (any flavor).
609 617
610config RADSTONE_PPC7D 618config RADSTONE_PPC7D
611 bool "Radstone Technology PPC7D board" 619 bool "Radstone Technology PPC7D board"
620 select PPC_I8259
612 621
613config PAL4 622config PAL4
614 bool "SBS-Palomar4" 623 bool "SBS-Palomar4"
@@ -616,6 +625,7 @@ config PAL4
616config GEMINI 625config GEMINI
617 bool "Synergy-Gemini" 626 bool "Synergy-Gemini"
618 depends on BROKEN 627 depends on BROKEN
628 select PPC_INDIRECT_PCI
619 help 629 help
620 Select Gemini if configuring for a Synergy Microsystems' Gemini 630 Select Gemini if configuring for a Synergy Microsystems' Gemini
621 series Single Board Computer. More information is available at: 631 series Single Board Computer. More information is available at:
@@ -747,13 +757,16 @@ config CPM2
747 on it (826x, 827x, 8560). 757 on it (826x, 827x, 8560).
748 758
749config PPC_CHRP 759config PPC_CHRP
750 bool 760 bool " Common Hardware Reference Platform (CHRP) based machines"
751 depends on PPC_MULTIPLATFORM 761 depends on PPC_MULTIPLATFORM
762 select PPC_I8259
763 select PPC_INDIRECT_PCI
752 default y 764 default y
753 765
754config PPC_PMAC 766config PPC_PMAC
755 bool 767 bool " Apple PowerMac based machines"
756 depends on PPC_MULTIPLATFORM 768 depends on PPC_MULTIPLATFORM
769 select PPC_INDIRECT_PCI
757 default y 770 default y
758 771
759config PPC_PMAC64 772config PPC_PMAC64
@@ -762,8 +775,10 @@ config PPC_PMAC64
762 default y 775 default y
763 776
764config PPC_PREP 777config PPC_PREP
765 bool 778 bool " PowerPC Reference Platform (PReP) based machines"
766 depends on PPC_MULTIPLATFORM 779 depends on PPC_MULTIPLATFORM
780 select PPC_I8259
781 select PPC_INDIRECT_PCI
767 default y 782 default y
768 783
769config PPC_OF 784config PPC_OF
@@ -797,6 +812,7 @@ config MV64360 # Really MV64360 & MV64460
797config MV64X60 812config MV64X60
798 bool 813 bool
799 depends on (GT64260 || MV64360) 814 depends on (GT64260 || MV64360)
815 select PPC_INDIRECT_PCI
800 default y 816 default y
801 817
802menu "Set bridge options" 818menu "Set bridge options"
@@ -845,6 +861,7 @@ config EPIC_SERIAL_MODE
845config MPC10X_BRIDGE 861config MPC10X_BRIDGE
846 bool 862 bool
847 depends on POWERPMC250 || LOPEC || SANDPOINT 863 depends on POWERPMC250 || LOPEC || SANDPOINT
864 select PPC_INDIRECT_PCI
848 default y 865 default y
849 866
850config MPC10X_OPENPIC 867config MPC10X_OPENPIC
@@ -870,6 +887,7 @@ config HARRIER_STORE_GATHERING
870config MVME5100_IPMC761_PRESENT 887config MVME5100_IPMC761_PRESENT
871 bool "MVME5100 configured with an IPMC761" 888 bool "MVME5100 configured with an IPMC761"
872 depends on MVME5100 889 depends on MVME5100
890 select PPC_I8259
873 891
874config SPRUCE_BAUD_33M 892config SPRUCE_BAUD_33M
875 bool "Spruce baud clock support" 893 bool "Spruce baud clock support"
@@ -1127,6 +1145,7 @@ menu "Bus options"
1127config ISA 1145config ISA
1128 bool "Support for ISA-bus hardware" 1146 bool "Support for ISA-bus hardware"
1129 depends on PPC_PREP || PPC_CHRP 1147 depends on PPC_PREP || PPC_CHRP
1148 select PPC_I8259
1130 help 1149 help
1131 Find out whether you have ISA slots on your motherboard. ISA is the 1150 Find out whether you have ISA slots on your motherboard. ISA is the
1132 name of a bus system, i.e. the way the CPU talks to the other stuff 1151 name of a bus system, i.e. the way the CPU talks to the other stuff
@@ -1139,6 +1158,17 @@ config GENERIC_ISA_DMA
1139 depends on POWER3 || POWER4 || 6xx && !CPM2 1158 depends on POWER3 || POWER4 || 6xx && !CPM2
1140 default y 1159 default y
1141 1160
1161config PPC_I8259
1162 bool
1163 default y if 85xx
1164 default n
1165
1166config PPC_INDIRECT_PCI
1167 bool
1168 depends on PCI
1169 default y if 40x || 44x || 85xx || 83xx
1170 default n
1171
1142config EISA 1172config EISA
1143 bool 1173 bool
1144 help 1174 help
@@ -1175,6 +1205,7 @@ config MPC83xx_PCI2
1175config PCI_QSPAN 1205config PCI_QSPAN
1176 bool "QSpan PCI" 1206 bool "QSpan PCI"
1177 depends on !4xx && !CPM2 && 8xx 1207 depends on !4xx && !CPM2 && 8xx
1208 select PPC_I8259
1178 help 1209 help
1179 Say Y here if you have a system based on a Motorola 8xx-series 1210 Say Y here if you have a system based on a Motorola 8xx-series
1180 embedded processor with a QSPAN PCI interface, otherwise say N. 1211 embedded processor with a QSPAN PCI interface, otherwise say N.
@@ -1182,6 +1213,7 @@ config PCI_QSPAN
1182config PCI_8260 1213config PCI_8260
1183 bool 1214 bool
1184 depends on PCI && 8260 1215 depends on PCI && 8260
1216 select PPC_INDIRECT_PCI
1185 default y 1217 default y
1186 1218
1187config 8260_PCI9 1219config 8260_PCI9
@@ -1368,7 +1400,7 @@ endmenu
1368 1400
1369source "lib/Kconfig" 1401source "lib/Kconfig"
1370 1402
1371source "arch/ppc/oprofile/Kconfig" 1403source "arch/powerpc/oprofile/Kconfig"
1372 1404
1373source "arch/ppc/Kconfig.debug" 1405source "arch/ppc/Kconfig.debug"
1374 1406
diff --git a/arch/ppc/Makefile b/arch/ppc/Makefile
index 16e2675f3270..94d5716fa7c3 100644
--- a/arch/ppc/Makefile
+++ b/arch/ppc/Makefile
@@ -26,6 +26,10 @@ CPPFLAGS += -Iarch/$(ARCH) -Iarch/$(ARCH)/include
26AFLAGS += -Iarch/$(ARCH) 26AFLAGS += -Iarch/$(ARCH)
27CFLAGS += -Iarch/$(ARCH) -msoft-float -pipe \ 27CFLAGS += -Iarch/$(ARCH) -msoft-float -pipe \
28 -ffixed-r2 -mmultiple 28 -ffixed-r2 -mmultiple
29
30# No AltiVec instruction when building kernel
31CFLAGS += $(call cc-option, -mno-altivec)
32
29CPP = $(CC) -E $(CFLAGS) 33CPP = $(CC) -E $(CFLAGS)
30# Temporary hack until we have migrated to asm-powerpc 34# Temporary hack until we have migrated to asm-powerpc
31LINUXINCLUDE += -Iarch/$(ARCH)/include 35LINUXINCLUDE += -Iarch/$(ARCH)/include
@@ -57,10 +61,12 @@ head-$(CONFIG_FSL_BOOKE) := arch/ppc/kernel/head_fsl_booke.o
57 61
58head-$(CONFIG_6xx) += arch/ppc/kernel/idle_6xx.o 62head-$(CONFIG_6xx) += arch/ppc/kernel/idle_6xx.o
59head-$(CONFIG_POWER4) += arch/ppc/kernel/idle_power4.o 63head-$(CONFIG_POWER4) += arch/ppc/kernel/idle_power4.o
60head-$(CONFIG_PPC_FPU) += arch/ppc/kernel/fpu.o 64head-$(CONFIG_PPC_FPU) += arch/powerpc/kernel/fpu.o
61 65
62core-y += arch/ppc/kernel/ arch/ppc/platforms/ \ 66core-y += arch/ppc/kernel/ arch/powerpc/kernel/ \
63 arch/ppc/mm/ arch/ppc/lib/ arch/ppc/syslib/ 67 arch/ppc/platforms/ \
68 arch/ppc/mm/ arch/ppc/lib/ \
69 arch/ppc/syslib/ arch/powerpc/sysdev/
64core-$(CONFIG_4xx) += arch/ppc/platforms/4xx/ 70core-$(CONFIG_4xx) += arch/ppc/platforms/4xx/
65core-$(CONFIG_83xx) += arch/ppc/platforms/83xx/ 71core-$(CONFIG_83xx) += arch/ppc/platforms/83xx/
66core-$(CONFIG_85xx) += arch/ppc/platforms/85xx/ 72core-$(CONFIG_85xx) += arch/ppc/platforms/85xx/
@@ -71,7 +77,7 @@ drivers-$(CONFIG_8xx) += arch/ppc/8xx_io/
71drivers-$(CONFIG_4xx) += arch/ppc/4xx_io/ 77drivers-$(CONFIG_4xx) += arch/ppc/4xx_io/
72drivers-$(CONFIG_CPM2) += arch/ppc/8260_io/ 78drivers-$(CONFIG_CPM2) += arch/ppc/8260_io/
73 79
74drivers-$(CONFIG_OPROFILE) += arch/ppc/oprofile/ 80drivers-$(CONFIG_OPROFILE) += arch/powerpc/oprofile/
75 81
76BOOT_TARGETS = zImage zImage.initrd znetboot znetboot.initrd vmlinux.sm 82BOOT_TARGETS = zImage zImage.initrd znetboot znetboot.initrd vmlinux.sm
77 83
diff --git a/arch/ppc/boot/of1275/claim.c b/arch/ppc/boot/of1275/claim.c
index e060292ae2a7..13169a5c4339 100644
--- a/arch/ppc/boot/of1275/claim.c
+++ b/arch/ppc/boot/of1275/claim.c
@@ -29,6 +29,7 @@ claim(unsigned int virt, unsigned int size, unsigned int align)
29 args.virt = virt; 29 args.virt = virt;
30 args.size = size; 30 args.size = size;
31 args.align = align; 31 args.align = align;
32 args.ret = (void *) 0;
32 (*of_prom_entry)(&args); 33 (*of_prom_entry)(&args);
33 return args.ret; 34 return args.ret;
34} 35}
diff --git a/arch/ppc/boot/openfirmware/chrpmain.c b/arch/ppc/boot/openfirmware/chrpmain.c
index effe4a0624b0..245dbd9fc120 100644
--- a/arch/ppc/boot/openfirmware/chrpmain.c
+++ b/arch/ppc/boot/openfirmware/chrpmain.c
@@ -78,7 +78,7 @@ boot(int a1, int a2, void *prom)
78 begin_avail = avail_high = avail_ram; 78 begin_avail = avail_high = avail_ram;
79 end_avail = scratch + sizeof(scratch); 79 end_avail = scratch + sizeof(scratch);
80 printf("gunzipping (0x%p <- 0x%p:0x%p)...", dst, im, im+len); 80 printf("gunzipping (0x%p <- 0x%p:0x%p)...", dst, im, im+len);
81 gunzip(dst, 0x400000, im, &len); 81 gunzip(dst, PROG_SIZE - PROG_START, im, &len);
82 printf("done %u bytes\n\r", len); 82 printf("done %u bytes\n\r", len);
83 printf("%u bytes of heap consumed, max in use %u\n\r", 83 printf("%u bytes of heap consumed, max in use %u\n\r",
84 avail_high - begin_avail, heap_max); 84 avail_high - begin_avail, heap_max);
diff --git a/arch/ppc/boot/openfirmware/coffmain.c b/arch/ppc/boot/openfirmware/coffmain.c
index 04ba9d57e110..2da8855e2be0 100644
--- a/arch/ppc/boot/openfirmware/coffmain.c
+++ b/arch/ppc/boot/openfirmware/coffmain.c
@@ -38,7 +38,7 @@ static char heap[SCRATCH_SIZE];
38static unsigned long ram_start = 0; 38static unsigned long ram_start = 0;
39static unsigned long ram_end = 0x1000000; 39static unsigned long ram_end = 0x1000000;
40 40
41static unsigned long prog_start = 0x900000; 41static unsigned long prog_start = 0x800000;
42static unsigned long prog_size = 0x700000; 42static unsigned long prog_size = 0x700000;
43 43
44typedef void (*kernel_start_t)(int, int, void *); 44typedef void (*kernel_start_t)(int, int, void *);
diff --git a/arch/ppc/kernel/Makefile b/arch/ppc/kernel/Makefile
index b1457a8a9c0f..b35346df1e37 100644
--- a/arch/ppc/kernel/Makefile
+++ b/arch/ppc/kernel/Makefile
@@ -1,6 +1,7 @@
1# 1#
2# Makefile for the linux kernel. 2# Makefile for the linux kernel.
3# 3#
4ifneq ($(CONFIG_PPC_MERGE),y)
4 5
5extra-$(CONFIG_PPC_STD_MMU) := head.o 6extra-$(CONFIG_PPC_STD_MMU) := head.o
6extra-$(CONFIG_40x) := head_4xx.o 7extra-$(CONFIG_40x) := head_4xx.o
@@ -9,13 +10,12 @@ extra-$(CONFIG_FSL_BOOKE) := head_fsl_booke.o
9extra-$(CONFIG_8xx) := head_8xx.o 10extra-$(CONFIG_8xx) := head_8xx.o
10extra-$(CONFIG_6xx) += idle_6xx.o 11extra-$(CONFIG_6xx) += idle_6xx.o
11extra-$(CONFIG_POWER4) += idle_power4.o 12extra-$(CONFIG_POWER4) += idle_power4.o
12extra-$(CONFIG_PPC_FPU) += fpu.o
13extra-y += vmlinux.lds 13extra-y += vmlinux.lds
14 14
15obj-y := entry.o traps.o irq.o idle.o time.o misc.o \ 15obj-y := entry.o traps.o irq.o idle.o time.o misc.o \
16 process.o signal.o ptrace.o align.o \ 16 process.o align.o \
17 semaphore.o syscalls.o setup.o \ 17 setup.o \
18 cputable.o ppc_htab.o perfmon.o 18 ppc_htab.o
19obj-$(CONFIG_6xx) += l2cr.o cpu_setup_6xx.o 19obj-$(CONFIG_6xx) += l2cr.o cpu_setup_6xx.o
20obj-$(CONFIG_SOFTWARE_SUSPEND) += swsusp.o 20obj-$(CONFIG_SOFTWARE_SUSPEND) += swsusp.o
21obj-$(CONFIG_POWER4) += cpu_setup_power4.o 21obj-$(CONFIG_POWER4) += cpu_setup_power4.o
@@ -25,7 +25,6 @@ obj-$(CONFIG_PCI) += pci.o
25obj-$(CONFIG_KGDB) += ppc-stub.o 25obj-$(CONFIG_KGDB) += ppc-stub.o
26obj-$(CONFIG_SMP) += smp.o smp-tbsync.o 26obj-$(CONFIG_SMP) += smp.o smp-tbsync.o
27obj-$(CONFIG_TAU) += temp.o 27obj-$(CONFIG_TAU) += temp.o
28obj-$(CONFIG_ALTIVEC) += vecemu.o vector.o
29ifndef CONFIG_E200 28ifndef CONFIG_E200
30obj-$(CONFIG_FSL_BOOKE) += perfmon_fsl_booke.o 29obj-$(CONFIG_FSL_BOOKE) += perfmon_fsl_booke.o
31endif 30endif
@@ -35,3 +34,21 @@ ifndef CONFIG_MATH_EMULATION
35obj-$(CONFIG_8xx) += softemu8xx.o 34obj-$(CONFIG_8xx) += softemu8xx.o
36endif 35endif
37 36
37# These are here while we do the architecture merge
38
39else
40obj-y := irq.o idle.o \
41 align.o
42obj-$(CONFIG_6xx) += l2cr.o cpu_setup_6xx.o
43obj-$(CONFIG_SOFTWARE_SUSPEND) += swsusp.o
44obj-$(CONFIG_MODULES) += module.o
45obj-$(CONFIG_NOT_COHERENT_CACHE) += dma-mapping.o
46obj-$(CONFIG_PCI) += pci.o
47obj-$(CONFIG_KGDB) += ppc-stub.o
48obj-$(CONFIG_SMP) += smp.o smp-tbsync.o
49obj-$(CONFIG_TAU) += temp.o
50ifndef CONFIG_E200
51obj-$(CONFIG_FSL_BOOKE) += perfmon_fsl_booke.o
52endif
53obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o
54endif
diff --git a/arch/ppc/kernel/align.c b/arch/ppc/kernel/align.c
index ff81da9598d8..ab398c4b70b6 100644
--- a/arch/ppc/kernel/align.c
+++ b/arch/ppc/kernel/align.c
@@ -375,7 +375,7 @@ fix_alignment(struct pt_regs *regs)
375#ifdef CONFIG_PPC_FPU 375#ifdef CONFIG_PPC_FPU
376 preempt_disable(); 376 preempt_disable();
377 enable_kernel_fp(); 377 enable_kernel_fp();
378 cvt_fd(&data.f, &data.d, &current->thread.fpscr); 378 cvt_fd(&data.f, &data.d, &current->thread);
379 preempt_enable(); 379 preempt_enable();
380#else 380#else
381 return 0; 381 return 0;
@@ -385,7 +385,7 @@ fix_alignment(struct pt_regs *regs)
385#ifdef CONFIG_PPC_FPU 385#ifdef CONFIG_PPC_FPU
386 preempt_disable(); 386 preempt_disable();
387 enable_kernel_fp(); 387 enable_kernel_fp();
388 cvt_df(&data.d, &data.f, &current->thread.fpscr); 388 cvt_df(&data.d, &data.f, &current->thread);
389 preempt_enable(); 389 preempt_enable();
390#else 390#else
391 return 0; 391 return 0;
diff --git a/arch/ppc/kernel/asm-offsets.c b/arch/ppc/kernel/asm-offsets.c
index d9ad1d776d0e..968261d69572 100644
--- a/arch/ppc/kernel/asm-offsets.c
+++ b/arch/ppc/kernel/asm-offsets.c
@@ -130,10 +130,10 @@ main(void)
130 DEFINE(CPU_SPEC_FEATURES, offsetof(struct cpu_spec, cpu_features)); 130 DEFINE(CPU_SPEC_FEATURES, offsetof(struct cpu_spec, cpu_features));
131 DEFINE(CPU_SPEC_SETUP, offsetof(struct cpu_spec, cpu_setup)); 131 DEFINE(CPU_SPEC_SETUP, offsetof(struct cpu_spec, cpu_setup));
132 132
133 DEFINE(TI_SC_NOERR, offsetof(struct thread_info, syscall_noerror));
133 DEFINE(TI_TASK, offsetof(struct thread_info, task)); 134 DEFINE(TI_TASK, offsetof(struct thread_info, task));
134 DEFINE(TI_EXECDOMAIN, offsetof(struct thread_info, exec_domain)); 135 DEFINE(TI_EXECDOMAIN, offsetof(struct thread_info, exec_domain));
135 DEFINE(TI_FLAGS, offsetof(struct thread_info, flags)); 136 DEFINE(TI_FLAGS, offsetof(struct thread_info, flags));
136 DEFINE(TI_LOCAL_FLAGS, offsetof(struct thread_info, local_flags));
137 DEFINE(TI_CPU, offsetof(struct thread_info, cpu)); 137 DEFINE(TI_CPU, offsetof(struct thread_info, cpu));
138 DEFINE(TI_PREEMPT, offsetof(struct thread_info, preempt_count)); 138 DEFINE(TI_PREEMPT, offsetof(struct thread_info, preempt_count));
139 139
@@ -141,6 +141,7 @@ main(void)
141 DEFINE(pbe_orig_address, offsetof(struct pbe, orig_address)); 141 DEFINE(pbe_orig_address, offsetof(struct pbe, orig_address));
142 DEFINE(pbe_next, offsetof(struct pbe, next)); 142 DEFINE(pbe_next, offsetof(struct pbe, next));
143 143
144 DEFINE(TASK_SIZE, TASK_SIZE);
144 DEFINE(NUM_USER_SEGMENTS, TASK_SIZE>>28); 145 DEFINE(NUM_USER_SEGMENTS, TASK_SIZE>>28);
145 return 0; 146 return 0;
146} 147}
diff --git a/arch/ppc/kernel/cpu_setup_6xx.S b/arch/ppc/kernel/cpu_setup_6xx.S
index ba396438ede3..55ed7716636f 100644
--- a/arch/ppc/kernel/cpu_setup_6xx.S
+++ b/arch/ppc/kernel/cpu_setup_6xx.S
@@ -17,8 +17,6 @@
17#include <asm/asm-offsets.h> 17#include <asm/asm-offsets.h>
18#include <asm/cache.h> 18#include <asm/cache.h>
19 19
20_GLOBAL(__setup_cpu_601)
21 blr
22_GLOBAL(__setup_cpu_603) 20_GLOBAL(__setup_cpu_603)
23 b setup_common_caches 21 b setup_common_caches
24_GLOBAL(__setup_cpu_604) 22_GLOBAL(__setup_cpu_604)
@@ -292,10 +290,10 @@ _GLOBAL(__init_fpu_registers)
292#define CS_SIZE 32 290#define CS_SIZE 32
293 291
294 .data 292 .data
295 .balign L1_CACHE_LINE_SIZE 293 .balign L1_CACHE_BYTES
296cpu_state_storage: 294cpu_state_storage:
297 .space CS_SIZE 295 .space CS_SIZE
298 .balign L1_CACHE_LINE_SIZE,0 296 .balign L1_CACHE_BYTES,0
299 .text 297 .text
300 298
301/* Called in normal context to backup CPU 0 state. This 299/* Called in normal context to backup CPU 0 state. This
diff --git a/arch/ppc/kernel/cpu_setup_power4.S b/arch/ppc/kernel/cpu_setup_power4.S
index 7e4fbb653724..d7bfd60e21fc 100644
--- a/arch/ppc/kernel/cpu_setup_power4.S
+++ b/arch/ppc/kernel/cpu_setup_power4.S
@@ -63,8 +63,6 @@ _GLOBAL(__970_cpu_preinit)
63 isync 63 isync
64 blr 64 blr
65 65
66_GLOBAL(__setup_cpu_power4)
67 blr
68_GLOBAL(__setup_cpu_ppc970) 66_GLOBAL(__setup_cpu_ppc970)
69 mfspr r0,SPRN_HID0 67 mfspr r0,SPRN_HID0
70 li r11,5 /* clear DOZE and SLEEP */ 68 li r11,5 /* clear DOZE and SLEEP */
@@ -88,10 +86,10 @@ _GLOBAL(__setup_cpu_ppc970)
88#define CS_SIZE 32 86#define CS_SIZE 32
89 87
90 .data 88 .data
91 .balign L1_CACHE_LINE_SIZE 89 .balign L1_CACHE_BYTES
92cpu_state_storage: 90cpu_state_storage:
93 .space CS_SIZE 91 .space CS_SIZE
94 .balign L1_CACHE_LINE_SIZE,0 92 .balign L1_CACHE_BYTES,0
95 .text 93 .text
96 94
97/* Called in normal context to backup CPU 0 state. This 95/* Called in normal context to backup CPU 0 state. This
diff --git a/arch/ppc/kernel/entry.S b/arch/ppc/kernel/entry.S
index 03d4886869f3..f044edbb454f 100644
--- a/arch/ppc/kernel/entry.S
+++ b/arch/ppc/kernel/entry.S
@@ -200,9 +200,8 @@ _GLOBAL(DoSyscall)
200 bl do_show_syscall 200 bl do_show_syscall
201#endif /* SHOW_SYSCALLS */ 201#endif /* SHOW_SYSCALLS */
202 rlwinm r10,r1,0,0,18 /* current_thread_info() */ 202 rlwinm r10,r1,0,0,18 /* current_thread_info() */
203 lwz r11,TI_LOCAL_FLAGS(r10) 203 li r11,0
204 rlwinm r11,r11,0,~_TIFL_FORCE_NOERROR 204 stb r11,TI_SC_NOERR(r10)
205 stw r11,TI_LOCAL_FLAGS(r10)
206 lwz r11,TI_FLAGS(r10) 205 lwz r11,TI_FLAGS(r10)
207 andi. r11,r11,_TIF_SYSCALL_T_OR_A 206 andi. r11,r11,_TIF_SYSCALL_T_OR_A
208 bne- syscall_dotrace 207 bne- syscall_dotrace
@@ -227,8 +226,8 @@ ret_from_syscall:
227 cmplw 0,r3,r11 226 cmplw 0,r3,r11
228 rlwinm r12,r1,0,0,18 /* current_thread_info() */ 227 rlwinm r12,r1,0,0,18 /* current_thread_info() */
229 blt+ 30f 228 blt+ 30f
230 lwz r11,TI_LOCAL_FLAGS(r12) 229 lbz r11,TI_SC_NOERR(r12)
231 andi. r11,r11,_TIFL_FORCE_NOERROR 230 cmpwi r11,0
232 bne 30f 231 bne 30f
233 neg r3,r3 232 neg r3,r3
234 lwz r10,_CCR(r1) /* Set SO bit in CR */ 233 lwz r10,_CCR(r1) /* Set SO bit in CR */
@@ -633,7 +632,8 @@ sigreturn_exit:
633 rlwinm r12,r1,0,0,18 /* current_thread_info() */ 632 rlwinm r12,r1,0,0,18 /* current_thread_info() */
634 lwz r9,TI_FLAGS(r12) 633 lwz r9,TI_FLAGS(r12)
635 andi. r0,r9,_TIF_SYSCALL_T_OR_A 634 andi. r0,r9,_TIF_SYSCALL_T_OR_A
636 bnel- do_syscall_trace_leave 635 beq+ ret_from_except_full
636 bl do_syscall_trace_leave
637 /* fall through */ 637 /* fall through */
638 638
639 .globl ret_from_except_full 639 .globl ret_from_except_full
diff --git a/arch/ppc/kernel/head.S b/arch/ppc/kernel/head.S
index 1960fb8c259c..c5a890dca9cf 100644
--- a/arch/ppc/kernel/head.S
+++ b/arch/ppc/kernel/head.S
@@ -349,12 +349,12 @@ i##n: \
349 349
350/* System reset */ 350/* System reset */
351/* core99 pmac starts the seconary here by changing the vector, and 351/* core99 pmac starts the seconary here by changing the vector, and
352 putting it back to what it was (UnknownException) when done. */ 352 putting it back to what it was (unknown_exception) when done. */
353#if defined(CONFIG_GEMINI) && defined(CONFIG_SMP) 353#if defined(CONFIG_GEMINI) && defined(CONFIG_SMP)
354 . = 0x100 354 . = 0x100
355 b __secondary_start_gemini 355 b __secondary_start_gemini
356#else 356#else
357 EXCEPTION(0x100, Reset, UnknownException, EXC_XFER_STD) 357 EXCEPTION(0x100, Reset, unknown_exception, EXC_XFER_STD)
358#endif 358#endif
359 359
360/* Machine check */ 360/* Machine check */
@@ -389,7 +389,7 @@ i##n: \
389 cmpwi cr1,r4,0 389 cmpwi cr1,r4,0
390 bne cr1,1f 390 bne cr1,1f
391#endif 391#endif
392 EXC_XFER_STD(0x200, MachineCheckException) 392 EXC_XFER_STD(0x200, machine_check_exception)
393#ifdef CONFIG_PPC_CHRP 393#ifdef CONFIG_PPC_CHRP
3941: b machine_check_in_rtas 3941: b machine_check_in_rtas
395#endif 395#endif
@@ -456,10 +456,10 @@ Alignment:
456 mfspr r5,SPRN_DSISR 456 mfspr r5,SPRN_DSISR
457 stw r5,_DSISR(r11) 457 stw r5,_DSISR(r11)
458 addi r3,r1,STACK_FRAME_OVERHEAD 458 addi r3,r1,STACK_FRAME_OVERHEAD
459 EXC_XFER_EE(0x600, AlignmentException) 459 EXC_XFER_EE(0x600, alignment_exception)
460 460
461/* Program check exception */ 461/* Program check exception */
462 EXCEPTION(0x700, ProgramCheck, ProgramCheckException, EXC_XFER_STD) 462 EXCEPTION(0x700, ProgramCheck, program_check_exception, EXC_XFER_STD)
463 463
464/* Floating-point unavailable */ 464/* Floating-point unavailable */
465 . = 0x800 465 . = 0x800
@@ -467,13 +467,13 @@ FPUnavailable:
467 EXCEPTION_PROLOG 467 EXCEPTION_PROLOG
468 bne load_up_fpu /* if from user, just load it up */ 468 bne load_up_fpu /* if from user, just load it up */
469 addi r3,r1,STACK_FRAME_OVERHEAD 469 addi r3,r1,STACK_FRAME_OVERHEAD
470 EXC_XFER_EE_LITE(0x800, KernelFP) 470 EXC_XFER_EE_LITE(0x800, kernel_fp_unavailable_exception)
471 471
472/* Decrementer */ 472/* Decrementer */
473 EXCEPTION(0x900, Decrementer, timer_interrupt, EXC_XFER_LITE) 473 EXCEPTION(0x900, Decrementer, timer_interrupt, EXC_XFER_LITE)
474 474
475 EXCEPTION(0xa00, Trap_0a, UnknownException, EXC_XFER_EE) 475 EXCEPTION(0xa00, Trap_0a, unknown_exception, EXC_XFER_EE)
476 EXCEPTION(0xb00, Trap_0b, UnknownException, EXC_XFER_EE) 476 EXCEPTION(0xb00, Trap_0b, unknown_exception, EXC_XFER_EE)
477 477
478/* System call */ 478/* System call */
479 . = 0xc00 479 . = 0xc00
@@ -482,8 +482,8 @@ SystemCall:
482 EXC_XFER_EE_LITE(0xc00, DoSyscall) 482 EXC_XFER_EE_LITE(0xc00, DoSyscall)
483 483
484/* Single step - not used on 601 */ 484/* Single step - not used on 601 */
485 EXCEPTION(0xd00, SingleStep, SingleStepException, EXC_XFER_STD) 485 EXCEPTION(0xd00, SingleStep, single_step_exception, EXC_XFER_STD)
486 EXCEPTION(0xe00, Trap_0e, UnknownException, EXC_XFER_EE) 486 EXCEPTION(0xe00, Trap_0e, unknown_exception, EXC_XFER_EE)
487 487
488/* 488/*
489 * The Altivec unavailable trap is at 0x0f20. Foo. 489 * The Altivec unavailable trap is at 0x0f20. Foo.
@@ -502,7 +502,7 @@ SystemCall:
502Trap_0f: 502Trap_0f:
503 EXCEPTION_PROLOG 503 EXCEPTION_PROLOG
504 addi r3,r1,STACK_FRAME_OVERHEAD 504 addi r3,r1,STACK_FRAME_OVERHEAD
505 EXC_XFER_EE(0xf00, UnknownException) 505 EXC_XFER_EE(0xf00, unknown_exception)
506 506
507/* 507/*
508 * Handle TLB miss for instruction on 603/603e. 508 * Handle TLB miss for instruction on 603/603e.
@@ -702,44 +702,44 @@ DataStoreTLBMiss:
702 rfi 702 rfi
703 703
704#ifndef CONFIG_ALTIVEC 704#ifndef CONFIG_ALTIVEC
705#define AltivecAssistException UnknownException 705#define altivec_assist_exception unknown_exception
706#endif 706#endif
707 707
708 EXCEPTION(0x1300, Trap_13, InstructionBreakpoint, EXC_XFER_EE) 708 EXCEPTION(0x1300, Trap_13, instruction_breakpoint_exception, EXC_XFER_EE)
709 EXCEPTION(0x1400, SMI, SMIException, EXC_XFER_EE) 709 EXCEPTION(0x1400, SMI, SMIException, EXC_XFER_EE)
710 EXCEPTION(0x1500, Trap_15, UnknownException, EXC_XFER_EE) 710 EXCEPTION(0x1500, Trap_15, unknown_exception, EXC_XFER_EE)
711#ifdef CONFIG_POWER4 711#ifdef CONFIG_POWER4
712 EXCEPTION(0x1600, Trap_16, UnknownException, EXC_XFER_EE) 712 EXCEPTION(0x1600, Trap_16, unknown_exception, EXC_XFER_EE)
713 EXCEPTION(0x1700, Trap_17, AltivecAssistException, EXC_XFER_EE) 713 EXCEPTION(0x1700, Trap_17, altivec_assist_exception, EXC_XFER_EE)
714 EXCEPTION(0x1800, Trap_18, TAUException, EXC_XFER_STD) 714 EXCEPTION(0x1800, Trap_18, TAUException, EXC_XFER_STD)
715#else /* !CONFIG_POWER4 */ 715#else /* !CONFIG_POWER4 */
716 EXCEPTION(0x1600, Trap_16, AltivecAssistException, EXC_XFER_EE) 716 EXCEPTION(0x1600, Trap_16, altivec_assist_exception, EXC_XFER_EE)
717 EXCEPTION(0x1700, Trap_17, TAUException, EXC_XFER_STD) 717 EXCEPTION(0x1700, Trap_17, TAUException, EXC_XFER_STD)
718 EXCEPTION(0x1800, Trap_18, UnknownException, EXC_XFER_EE) 718 EXCEPTION(0x1800, Trap_18, unknown_exception, EXC_XFER_EE)
719#endif /* CONFIG_POWER4 */ 719#endif /* CONFIG_POWER4 */
720 EXCEPTION(0x1900, Trap_19, UnknownException, EXC_XFER_EE) 720 EXCEPTION(0x1900, Trap_19, unknown_exception, EXC_XFER_EE)
721 EXCEPTION(0x1a00, Trap_1a, UnknownException, EXC_XFER_EE) 721 EXCEPTION(0x1a00, Trap_1a, unknown_exception, EXC_XFER_EE)
722 EXCEPTION(0x1b00, Trap_1b, UnknownException, EXC_XFER_EE) 722 EXCEPTION(0x1b00, Trap_1b, unknown_exception, EXC_XFER_EE)
723 EXCEPTION(0x1c00, Trap_1c, UnknownException, EXC_XFER_EE) 723 EXCEPTION(0x1c00, Trap_1c, unknown_exception, EXC_XFER_EE)
724 EXCEPTION(0x1d00, Trap_1d, UnknownException, EXC_XFER_EE) 724 EXCEPTION(0x1d00, Trap_1d, unknown_exception, EXC_XFER_EE)
725 EXCEPTION(0x1e00, Trap_1e, UnknownException, EXC_XFER_EE) 725 EXCEPTION(0x1e00, Trap_1e, unknown_exception, EXC_XFER_EE)
726 EXCEPTION(0x1f00, Trap_1f, UnknownException, EXC_XFER_EE) 726 EXCEPTION(0x1f00, Trap_1f, unknown_exception, EXC_XFER_EE)
727 EXCEPTION(0x2000, RunMode, RunModeException, EXC_XFER_EE) 727 EXCEPTION(0x2000, RunMode, RunModeException, EXC_XFER_EE)
728 EXCEPTION(0x2100, Trap_21, UnknownException, EXC_XFER_EE) 728 EXCEPTION(0x2100, Trap_21, unknown_exception, EXC_XFER_EE)
729 EXCEPTION(0x2200, Trap_22, UnknownException, EXC_XFER_EE) 729 EXCEPTION(0x2200, Trap_22, unknown_exception, EXC_XFER_EE)
730 EXCEPTION(0x2300, Trap_23, UnknownException, EXC_XFER_EE) 730 EXCEPTION(0x2300, Trap_23, unknown_exception, EXC_XFER_EE)
731 EXCEPTION(0x2400, Trap_24, UnknownException, EXC_XFER_EE) 731 EXCEPTION(0x2400, Trap_24, unknown_exception, EXC_XFER_EE)
732 EXCEPTION(0x2500, Trap_25, UnknownException, EXC_XFER_EE) 732 EXCEPTION(0x2500, Trap_25, unknown_exception, EXC_XFER_EE)
733 EXCEPTION(0x2600, Trap_26, UnknownException, EXC_XFER_EE) 733 EXCEPTION(0x2600, Trap_26, unknown_exception, EXC_XFER_EE)
734 EXCEPTION(0x2700, Trap_27, UnknownException, EXC_XFER_EE) 734 EXCEPTION(0x2700, Trap_27, unknown_exception, EXC_XFER_EE)
735 EXCEPTION(0x2800, Trap_28, UnknownException, EXC_XFER_EE) 735 EXCEPTION(0x2800, Trap_28, unknown_exception, EXC_XFER_EE)
736 EXCEPTION(0x2900, Trap_29, UnknownException, EXC_XFER_EE) 736 EXCEPTION(0x2900, Trap_29, unknown_exception, EXC_XFER_EE)
737 EXCEPTION(0x2a00, Trap_2a, UnknownException, EXC_XFER_EE) 737 EXCEPTION(0x2a00, Trap_2a, unknown_exception, EXC_XFER_EE)
738 EXCEPTION(0x2b00, Trap_2b, UnknownException, EXC_XFER_EE) 738 EXCEPTION(0x2b00, Trap_2b, unknown_exception, EXC_XFER_EE)
739 EXCEPTION(0x2c00, Trap_2c, UnknownException, EXC_XFER_EE) 739 EXCEPTION(0x2c00, Trap_2c, unknown_exception, EXC_XFER_EE)
740 EXCEPTION(0x2d00, Trap_2d, UnknownException, EXC_XFER_EE) 740 EXCEPTION(0x2d00, Trap_2d, unknown_exception, EXC_XFER_EE)
741 EXCEPTION(0x2e00, Trap_2e, UnknownException, EXC_XFER_EE) 741 EXCEPTION(0x2e00, Trap_2e, unknown_exception, EXC_XFER_EE)
742 EXCEPTION(0x2f00, MOLTrampoline, UnknownException, EXC_XFER_EE_LITE) 742 EXCEPTION(0x2f00, MOLTrampoline, unknown_exception, EXC_XFER_EE_LITE)
743 743
744 .globl mol_trampoline 744 .globl mol_trampoline
745 .set mol_trampoline, i0x2f00 745 .set mol_trampoline, i0x2f00
@@ -751,7 +751,7 @@ AltiVecUnavailable:
751#ifdef CONFIG_ALTIVEC 751#ifdef CONFIG_ALTIVEC
752 bne load_up_altivec /* if from user, just load it up */ 752 bne load_up_altivec /* if from user, just load it up */
753#endif /* CONFIG_ALTIVEC */ 753#endif /* CONFIG_ALTIVEC */
754 EXC_XFER_EE_LITE(0xf20, AltivecUnavailException) 754 EXC_XFER_EE_LITE(0xf20, altivec_unavailable_exception)
755 755
756#ifdef CONFIG_PPC64BRIDGE 756#ifdef CONFIG_PPC64BRIDGE
757DataAccess: 757DataAccess:
@@ -767,12 +767,12 @@ DataSegment:
767 addi r3,r1,STACK_FRAME_OVERHEAD 767 addi r3,r1,STACK_FRAME_OVERHEAD
768 mfspr r4,SPRN_DAR 768 mfspr r4,SPRN_DAR
769 stw r4,_DAR(r11) 769 stw r4,_DAR(r11)
770 EXC_XFER_STD(0x380, UnknownException) 770 EXC_XFER_STD(0x380, unknown_exception)
771 771
772InstructionSegment: 772InstructionSegment:
773 EXCEPTION_PROLOG 773 EXCEPTION_PROLOG
774 addi r3,r1,STACK_FRAME_OVERHEAD 774 addi r3,r1,STACK_FRAME_OVERHEAD
775 EXC_XFER_STD(0x480, UnknownException) 775 EXC_XFER_STD(0x480, unknown_exception)
776#endif /* CONFIG_PPC64BRIDGE */ 776#endif /* CONFIG_PPC64BRIDGE */
777 777
778#ifdef CONFIG_ALTIVEC 778#ifdef CONFIG_ALTIVEC
@@ -804,7 +804,7 @@ load_up_altivec:
804 beq 1f 804 beq 1f
805 add r4,r4,r6 805 add r4,r4,r6
806 addi r4,r4,THREAD /* want THREAD of last_task_used_altivec */ 806 addi r4,r4,THREAD /* want THREAD of last_task_used_altivec */
807 SAVE_32VR(0,r10,r4) 807 SAVE_32VRS(0,r10,r4)
808 mfvscr vr0 808 mfvscr vr0
809 li r10,THREAD_VSCR 809 li r10,THREAD_VSCR
810 stvx vr0,r10,r4 810 stvx vr0,r10,r4
@@ -824,7 +824,7 @@ load_up_altivec:
824 stw r4,THREAD_USED_VR(r5) 824 stw r4,THREAD_USED_VR(r5)
825 lvx vr0,r10,r5 825 lvx vr0,r10,r5
826 mtvscr vr0 826 mtvscr vr0
827 REST_32VR(0,r10,r5) 827 REST_32VRS(0,r10,r5)
828#ifndef CONFIG_SMP 828#ifndef CONFIG_SMP
829 subi r4,r5,THREAD 829 subi r4,r5,THREAD
830 sub r4,r4,r6 830 sub r4,r4,r6
@@ -870,7 +870,7 @@ giveup_altivec:
870 addi r3,r3,THREAD /* want THREAD of task */ 870 addi r3,r3,THREAD /* want THREAD of task */
871 lwz r5,PT_REGS(r3) 871 lwz r5,PT_REGS(r3)
872 cmpwi 0,r5,0 872 cmpwi 0,r5,0
873 SAVE_32VR(0, r4, r3) 873 SAVE_32VRS(0, r4, r3)
874 mfvscr vr0 874 mfvscr vr0
875 li r4,THREAD_VSCR 875 li r4,THREAD_VSCR
876 stvx vr0,r4,r3 876 stvx vr0,r4,r3
@@ -916,7 +916,7 @@ relocate_kernel:
916copy_and_flush: 916copy_and_flush:
917 addi r5,r5,-4 917 addi r5,r5,-4
918 addi r6,r6,-4 918 addi r6,r6,-4
9194: li r0,L1_CACHE_LINE_SIZE/4 9194: li r0,L1_CACHE_BYTES/4
920 mtctr r0 920 mtctr r0
9213: addi r6,r6,4 /* copy a cache line */ 9213: addi r6,r6,4 /* copy a cache line */
922 lwzx r0,r6,r4 922 lwzx r0,r6,r4
@@ -1059,7 +1059,6 @@ __secondary_start:
1059 1059
1060 lis r3,-KERNELBASE@h 1060 lis r3,-KERNELBASE@h
1061 mr r4,r24 1061 mr r4,r24
1062 bl identify_cpu
1063 bl call_setup_cpu /* Call setup_cpu for this CPU */ 1062 bl call_setup_cpu /* Call setup_cpu for this CPU */
1064#ifdef CONFIG_6xx 1063#ifdef CONFIG_6xx
1065 lis r3,-KERNELBASE@h 1064 lis r3,-KERNELBASE@h
@@ -1109,11 +1108,6 @@ __secondary_start:
1109 * Those generic dummy functions are kept for CPUs not 1108 * Those generic dummy functions are kept for CPUs not
1110 * included in CONFIG_6xx 1109 * included in CONFIG_6xx
1111 */ 1110 */
1112_GLOBAL(__setup_cpu_power3)
1113 blr
1114_GLOBAL(__setup_cpu_generic)
1115 blr
1116
1117#if !defined(CONFIG_6xx) && !defined(CONFIG_POWER4) 1111#if !defined(CONFIG_6xx) && !defined(CONFIG_POWER4)
1118_GLOBAL(__save_cpu_setup) 1112_GLOBAL(__save_cpu_setup)
1119 blr 1113 blr
diff --git a/arch/ppc/kernel/head_44x.S b/arch/ppc/kernel/head_44x.S
index 599245b0407e..8b49679fad54 100644
--- a/arch/ppc/kernel/head_44x.S
+++ b/arch/ppc/kernel/head_44x.S
@@ -309,13 +309,13 @@ skpinv: addi r4,r4,1 /* Increment */
309 309
310interrupt_base: 310interrupt_base:
311 /* Critical Input Interrupt */ 311 /* Critical Input Interrupt */
312 CRITICAL_EXCEPTION(0x0100, CriticalInput, UnknownException) 312 CRITICAL_EXCEPTION(0x0100, CriticalInput, unknown_exception)
313 313
314 /* Machine Check Interrupt */ 314 /* Machine Check Interrupt */
315#ifdef CONFIG_440A 315#ifdef CONFIG_440A
316 MCHECK_EXCEPTION(0x0200, MachineCheck, MachineCheckException) 316 MCHECK_EXCEPTION(0x0200, MachineCheck, machine_check_exception)
317#else 317#else
318 CRITICAL_EXCEPTION(0x0200, MachineCheck, MachineCheckException) 318 CRITICAL_EXCEPTION(0x0200, MachineCheck, machine_check_exception)
319#endif 319#endif
320 320
321 /* Data Storage Interrupt */ 321 /* Data Storage Interrupt */
@@ -442,7 +442,7 @@ interrupt_base:
442#ifdef CONFIG_PPC_FPU 442#ifdef CONFIG_PPC_FPU
443 FP_UNAVAILABLE_EXCEPTION 443 FP_UNAVAILABLE_EXCEPTION
444#else 444#else
445 EXCEPTION(0x2010, FloatingPointUnavailable, UnknownException, EXC_XFER_EE) 445 EXCEPTION(0x2010, FloatingPointUnavailable, unknown_exception, EXC_XFER_EE)
446#endif 446#endif
447 447
448 /* System Call Interrupt */ 448 /* System Call Interrupt */
@@ -451,21 +451,21 @@ interrupt_base:
451 EXC_XFER_EE_LITE(0x0c00, DoSyscall) 451 EXC_XFER_EE_LITE(0x0c00, DoSyscall)
452 452
453 /* Auxillary Processor Unavailable Interrupt */ 453 /* Auxillary Processor Unavailable Interrupt */
454 EXCEPTION(0x2020, AuxillaryProcessorUnavailable, UnknownException, EXC_XFER_EE) 454 EXCEPTION(0x2020, AuxillaryProcessorUnavailable, unknown_exception, EXC_XFER_EE)
455 455
456 /* Decrementer Interrupt */ 456 /* Decrementer Interrupt */
457 DECREMENTER_EXCEPTION 457 DECREMENTER_EXCEPTION
458 458
459 /* Fixed Internal Timer Interrupt */ 459 /* Fixed Internal Timer Interrupt */
460 /* TODO: Add FIT support */ 460 /* TODO: Add FIT support */
461 EXCEPTION(0x1010, FixedIntervalTimer, UnknownException, EXC_XFER_EE) 461 EXCEPTION(0x1010, FixedIntervalTimer, unknown_exception, EXC_XFER_EE)
462 462
463 /* Watchdog Timer Interrupt */ 463 /* Watchdog Timer Interrupt */
464 /* TODO: Add watchdog support */ 464 /* TODO: Add watchdog support */
465#ifdef CONFIG_BOOKE_WDT 465#ifdef CONFIG_BOOKE_WDT
466 CRITICAL_EXCEPTION(0x1020, WatchdogTimer, WatchdogException) 466 CRITICAL_EXCEPTION(0x1020, WatchdogTimer, WatchdogException)
467#else 467#else
468 CRITICAL_EXCEPTION(0x1020, WatchdogTimer, UnknownException) 468 CRITICAL_EXCEPTION(0x1020, WatchdogTimer, unknown_exception)
469#endif 469#endif
470 470
471 /* Data TLB Error Interrupt */ 471 /* Data TLB Error Interrupt */
@@ -743,14 +743,18 @@ _GLOBAL(set_context)
743 * goes at the beginning of the data segment, which is page-aligned. 743 * goes at the beginning of the data segment, which is page-aligned.
744 */ 744 */
745 .data 745 .data
746_GLOBAL(sdata) 746 .align 12
747_GLOBAL(empty_zero_page) 747 .globl sdata
748sdata:
749 .globl empty_zero_page
750empty_zero_page:
748 .space 4096 751 .space 4096
749 752
750/* 753/*
751 * To support >32-bit physical addresses, we use an 8KB pgdir. 754 * To support >32-bit physical addresses, we use an 8KB pgdir.
752 */ 755 */
753_GLOBAL(swapper_pg_dir) 756 .globl swapper_pg_dir
757swapper_pg_dir:
754 .space 8192 758 .space 8192
755 759
756/* Reserved 4k for the critical exception stack & 4k for the machine 760/* Reserved 4k for the critical exception stack & 4k for the machine
@@ -759,13 +763,15 @@ _GLOBAL(swapper_pg_dir)
759 .align 12 763 .align 12
760exception_stack_bottom: 764exception_stack_bottom:
761 .space BOOKE_EXCEPTION_STACK_SIZE 765 .space BOOKE_EXCEPTION_STACK_SIZE
762_GLOBAL(exception_stack_top) 766 .globl exception_stack_top
767exception_stack_top:
763 768
764/* 769/*
765 * This space gets a copy of optional info passed to us by the bootstrap 770 * This space gets a copy of optional info passed to us by the bootstrap
766 * which is used to pass parameters into the kernel like root=/dev/sda1, etc. 771 * which is used to pass parameters into the kernel like root=/dev/sda1, etc.
767 */ 772 */
768_GLOBAL(cmd_line) 773 .globl cmd_line
774cmd_line:
769 .space 512 775 .space 512
770 776
771/* 777/*
@@ -774,5 +780,3 @@ _GLOBAL(cmd_line)
774 */ 780 */
775abatron_pteptrs: 781abatron_pteptrs:
776 .space 8 782 .space 8
777
778
diff --git a/arch/ppc/kernel/head_4xx.S b/arch/ppc/kernel/head_4xx.S
index 8562b807b37c..10c261c67021 100644
--- a/arch/ppc/kernel/head_4xx.S
+++ b/arch/ppc/kernel/head_4xx.S
@@ -245,12 +245,12 @@ label:
245/* 245/*
246 * 0x0100 - Critical Interrupt Exception 246 * 0x0100 - Critical Interrupt Exception
247 */ 247 */
248 CRITICAL_EXCEPTION(0x0100, CriticalInterrupt, UnknownException) 248 CRITICAL_EXCEPTION(0x0100, CriticalInterrupt, unknown_exception)
249 249
250/* 250/*
251 * 0x0200 - Machine Check Exception 251 * 0x0200 - Machine Check Exception
252 */ 252 */
253 CRITICAL_EXCEPTION(0x0200, MachineCheck, MachineCheckException) 253 CRITICAL_EXCEPTION(0x0200, MachineCheck, machine_check_exception)
254 254
255/* 255/*
256 * 0x0300 - Data Storage Exception 256 * 0x0300 - Data Storage Exception
@@ -405,7 +405,7 @@ label:
405 mfspr r4,SPRN_DEAR /* Grab the DEAR and save it */ 405 mfspr r4,SPRN_DEAR /* Grab the DEAR and save it */
406 stw r4,_DEAR(r11) 406 stw r4,_DEAR(r11)
407 addi r3,r1,STACK_FRAME_OVERHEAD 407 addi r3,r1,STACK_FRAME_OVERHEAD
408 EXC_XFER_EE(0x600, AlignmentException) 408 EXC_XFER_EE(0x600, alignment_exception)
409 409
410/* 0x0700 - Program Exception */ 410/* 0x0700 - Program Exception */
411 START_EXCEPTION(0x0700, ProgramCheck) 411 START_EXCEPTION(0x0700, ProgramCheck)
@@ -413,21 +413,21 @@ label:
413 mfspr r4,SPRN_ESR /* Grab the ESR and save it */ 413 mfspr r4,SPRN_ESR /* Grab the ESR and save it */
414 stw r4,_ESR(r11) 414 stw r4,_ESR(r11)
415 addi r3,r1,STACK_FRAME_OVERHEAD 415 addi r3,r1,STACK_FRAME_OVERHEAD
416 EXC_XFER_STD(0x700, ProgramCheckException) 416 EXC_XFER_STD(0x700, program_check_exception)
417 417
418 EXCEPTION(0x0800, Trap_08, UnknownException, EXC_XFER_EE) 418 EXCEPTION(0x0800, Trap_08, unknown_exception, EXC_XFER_EE)
419 EXCEPTION(0x0900, Trap_09, UnknownException, EXC_XFER_EE) 419 EXCEPTION(0x0900, Trap_09, unknown_exception, EXC_XFER_EE)
420 EXCEPTION(0x0A00, Trap_0A, UnknownException, EXC_XFER_EE) 420 EXCEPTION(0x0A00, Trap_0A, unknown_exception, EXC_XFER_EE)
421 EXCEPTION(0x0B00, Trap_0B, UnknownException, EXC_XFER_EE) 421 EXCEPTION(0x0B00, Trap_0B, unknown_exception, EXC_XFER_EE)
422 422
423/* 0x0C00 - System Call Exception */ 423/* 0x0C00 - System Call Exception */
424 START_EXCEPTION(0x0C00, SystemCall) 424 START_EXCEPTION(0x0C00, SystemCall)
425 NORMAL_EXCEPTION_PROLOG 425 NORMAL_EXCEPTION_PROLOG
426 EXC_XFER_EE_LITE(0xc00, DoSyscall) 426 EXC_XFER_EE_LITE(0xc00, DoSyscall)
427 427
428 EXCEPTION(0x0D00, Trap_0D, UnknownException, EXC_XFER_EE) 428 EXCEPTION(0x0D00, Trap_0D, unknown_exception, EXC_XFER_EE)
429 EXCEPTION(0x0E00, Trap_0E, UnknownException, EXC_XFER_EE) 429 EXCEPTION(0x0E00, Trap_0E, unknown_exception, EXC_XFER_EE)
430 EXCEPTION(0x0F00, Trap_0F, UnknownException, EXC_XFER_EE) 430 EXCEPTION(0x0F00, Trap_0F, unknown_exception, EXC_XFER_EE)
431 431
432/* 0x1000 - Programmable Interval Timer (PIT) Exception */ 432/* 0x1000 - Programmable Interval Timer (PIT) Exception */
433 START_EXCEPTION(0x1000, Decrementer) 433 START_EXCEPTION(0x1000, Decrementer)
@@ -444,14 +444,14 @@ label:
444 444
445/* 0x1010 - Fixed Interval Timer (FIT) Exception 445/* 0x1010 - Fixed Interval Timer (FIT) Exception
446*/ 446*/
447 STND_EXCEPTION(0x1010, FITException, UnknownException) 447 STND_EXCEPTION(0x1010, FITException, unknown_exception)
448 448
449/* 0x1020 - Watchdog Timer (WDT) Exception 449/* 0x1020 - Watchdog Timer (WDT) Exception
450*/ 450*/
451#ifdef CONFIG_BOOKE_WDT 451#ifdef CONFIG_BOOKE_WDT
452 CRITICAL_EXCEPTION(0x1020, WDTException, WatchdogException) 452 CRITICAL_EXCEPTION(0x1020, WDTException, WatchdogException)
453#else 453#else
454 CRITICAL_EXCEPTION(0x1020, WDTException, UnknownException) 454 CRITICAL_EXCEPTION(0x1020, WDTException, unknown_exception)
455#endif 455#endif
456#endif 456#endif
457 457
@@ -656,25 +656,25 @@ label:
656 mfspr r10, SPRN_SPRG0 656 mfspr r10, SPRN_SPRG0
657 b InstructionAccess 657 b InstructionAccess
658 658
659 EXCEPTION(0x1300, Trap_13, UnknownException, EXC_XFER_EE) 659 EXCEPTION(0x1300, Trap_13, unknown_exception, EXC_XFER_EE)
660 EXCEPTION(0x1400, Trap_14, UnknownException, EXC_XFER_EE) 660 EXCEPTION(0x1400, Trap_14, unknown_exception, EXC_XFER_EE)
661 EXCEPTION(0x1500, Trap_15, UnknownException, EXC_XFER_EE) 661 EXCEPTION(0x1500, Trap_15, unknown_exception, EXC_XFER_EE)
662 EXCEPTION(0x1600, Trap_16, UnknownException, EXC_XFER_EE) 662 EXCEPTION(0x1600, Trap_16, unknown_exception, EXC_XFER_EE)
663#ifdef CONFIG_IBM405_ERR51 663#ifdef CONFIG_IBM405_ERR51
664 /* 405GP errata 51 */ 664 /* 405GP errata 51 */
665 START_EXCEPTION(0x1700, Trap_17) 665 START_EXCEPTION(0x1700, Trap_17)
666 b DTLBMiss 666 b DTLBMiss
667#else 667#else
668 EXCEPTION(0x1700, Trap_17, UnknownException, EXC_XFER_EE) 668 EXCEPTION(0x1700, Trap_17, unknown_exception, EXC_XFER_EE)
669#endif 669#endif
670 EXCEPTION(0x1800, Trap_18, UnknownException, EXC_XFER_EE) 670 EXCEPTION(0x1800, Trap_18, unknown_exception, EXC_XFER_EE)
671 EXCEPTION(0x1900, Trap_19, UnknownException, EXC_XFER_EE) 671 EXCEPTION(0x1900, Trap_19, unknown_exception, EXC_XFER_EE)
672 EXCEPTION(0x1A00, Trap_1A, UnknownException, EXC_XFER_EE) 672 EXCEPTION(0x1A00, Trap_1A, unknown_exception, EXC_XFER_EE)
673 EXCEPTION(0x1B00, Trap_1B, UnknownException, EXC_XFER_EE) 673 EXCEPTION(0x1B00, Trap_1B, unknown_exception, EXC_XFER_EE)
674 EXCEPTION(0x1C00, Trap_1C, UnknownException, EXC_XFER_EE) 674 EXCEPTION(0x1C00, Trap_1C, unknown_exception, EXC_XFER_EE)
675 EXCEPTION(0x1D00, Trap_1D, UnknownException, EXC_XFER_EE) 675 EXCEPTION(0x1D00, Trap_1D, unknown_exception, EXC_XFER_EE)
676 EXCEPTION(0x1E00, Trap_1E, UnknownException, EXC_XFER_EE) 676 EXCEPTION(0x1E00, Trap_1E, unknown_exception, EXC_XFER_EE)
677 EXCEPTION(0x1F00, Trap_1F, UnknownException, EXC_XFER_EE) 677 EXCEPTION(0x1F00, Trap_1F, unknown_exception, EXC_XFER_EE)
678 678
679/* Check for a single step debug exception while in an exception 679/* Check for a single step debug exception while in an exception
680 * handler before state has been saved. This is to catch the case 680 * handler before state has been saved. This is to catch the case
@@ -988,10 +988,14 @@ _GLOBAL(set_context)
988 * goes at the beginning of the data segment, which is page-aligned. 988 * goes at the beginning of the data segment, which is page-aligned.
989 */ 989 */
990 .data 990 .data
991_GLOBAL(sdata) 991 .align 12
992_GLOBAL(empty_zero_page) 992 .globl sdata
993sdata:
994 .globl empty_zero_page
995empty_zero_page:
993 .space 4096 996 .space 4096
994_GLOBAL(swapper_pg_dir) 997 .globl swapper_pg_dir
998swapper_pg_dir:
995 .space 4096 999 .space 4096
996 1000
997 1001
@@ -1001,12 +1005,14 @@ _GLOBAL(swapper_pg_dir)
1001exception_stack_bottom: 1005exception_stack_bottom:
1002 .space 4096 1006 .space 4096
1003critical_stack_top: 1007critical_stack_top:
1004_GLOBAL(exception_stack_top) 1008 .globl exception_stack_top
1009exception_stack_top:
1005 1010
1006/* This space gets a copy of optional info passed to us by the bootstrap 1011/* This space gets a copy of optional info passed to us by the bootstrap
1007 * which is used to pass parameters into the kernel like root=/dev/sda1, etc. 1012 * which is used to pass parameters into the kernel like root=/dev/sda1, etc.
1008 */ 1013 */
1009_GLOBAL(cmd_line) 1014 .globl cmd_line
1015cmd_line:
1010 .space 512 1016 .space 512
1011 1017
1012/* Room for two PTE pointers, usually the kernel and current user pointers 1018/* Room for two PTE pointers, usually the kernel and current user pointers
diff --git a/arch/ppc/kernel/head_8xx.S b/arch/ppc/kernel/head_8xx.S
index cb1a3a54a026..de0978742221 100644
--- a/arch/ppc/kernel/head_8xx.S
+++ b/arch/ppc/kernel/head_8xx.S
@@ -203,7 +203,7 @@ i##n: \
203 ret_from_except) 203 ret_from_except)
204 204
205/* System reset */ 205/* System reset */
206 EXCEPTION(0x100, Reset, UnknownException, EXC_XFER_STD) 206 EXCEPTION(0x100, Reset, unknown_exception, EXC_XFER_STD)
207 207
208/* Machine check */ 208/* Machine check */
209 . = 0x200 209 . = 0x200
@@ -214,7 +214,7 @@ MachineCheck:
214 mfspr r5,SPRN_DSISR 214 mfspr r5,SPRN_DSISR
215 stw r5,_DSISR(r11) 215 stw r5,_DSISR(r11)
216 addi r3,r1,STACK_FRAME_OVERHEAD 216 addi r3,r1,STACK_FRAME_OVERHEAD
217 EXC_XFER_STD(0x200, MachineCheckException) 217 EXC_XFER_STD(0x200, machine_check_exception)
218 218
219/* Data access exception. 219/* Data access exception.
220 * This is "never generated" by the MPC8xx. We jump to it for other 220 * This is "never generated" by the MPC8xx. We jump to it for other
@@ -252,20 +252,20 @@ Alignment:
252 mfspr r5,SPRN_DSISR 252 mfspr r5,SPRN_DSISR
253 stw r5,_DSISR(r11) 253 stw r5,_DSISR(r11)
254 addi r3,r1,STACK_FRAME_OVERHEAD 254 addi r3,r1,STACK_FRAME_OVERHEAD
255 EXC_XFER_EE(0x600, AlignmentException) 255 EXC_XFER_EE(0x600, alignment_exception)
256 256
257/* Program check exception */ 257/* Program check exception */
258 EXCEPTION(0x700, ProgramCheck, ProgramCheckException, EXC_XFER_STD) 258 EXCEPTION(0x700, ProgramCheck, program_check_exception, EXC_XFER_STD)
259 259
260/* No FPU on MPC8xx. This exception is not supposed to happen. 260/* No FPU on MPC8xx. This exception is not supposed to happen.
261*/ 261*/
262 EXCEPTION(0x800, FPUnavailable, UnknownException, EXC_XFER_STD) 262 EXCEPTION(0x800, FPUnavailable, unknown_exception, EXC_XFER_STD)
263 263
264/* Decrementer */ 264/* Decrementer */
265 EXCEPTION(0x900, Decrementer, timer_interrupt, EXC_XFER_LITE) 265 EXCEPTION(0x900, Decrementer, timer_interrupt, EXC_XFER_LITE)
266 266
267 EXCEPTION(0xa00, Trap_0a, UnknownException, EXC_XFER_EE) 267 EXCEPTION(0xa00, Trap_0a, unknown_exception, EXC_XFER_EE)
268 EXCEPTION(0xb00, Trap_0b, UnknownException, EXC_XFER_EE) 268 EXCEPTION(0xb00, Trap_0b, unknown_exception, EXC_XFER_EE)
269 269
270/* System call */ 270/* System call */
271 . = 0xc00 271 . = 0xc00
@@ -274,9 +274,9 @@ SystemCall:
274 EXC_XFER_EE_LITE(0xc00, DoSyscall) 274 EXC_XFER_EE_LITE(0xc00, DoSyscall)
275 275
276/* Single step - not used on 601 */ 276/* Single step - not used on 601 */
277 EXCEPTION(0xd00, SingleStep, SingleStepException, EXC_XFER_STD) 277 EXCEPTION(0xd00, SingleStep, single_step_exception, EXC_XFER_STD)
278 EXCEPTION(0xe00, Trap_0e, UnknownException, EXC_XFER_EE) 278 EXCEPTION(0xe00, Trap_0e, unknown_exception, EXC_XFER_EE)
279 EXCEPTION(0xf00, Trap_0f, UnknownException, EXC_XFER_EE) 279 EXCEPTION(0xf00, Trap_0f, unknown_exception, EXC_XFER_EE)
280 280
281/* On the MPC8xx, this is a software emulation interrupt. It occurs 281/* On the MPC8xx, this is a software emulation interrupt. It occurs
282 * for all unimplemented and illegal instructions. 282 * for all unimplemented and illegal instructions.
@@ -540,22 +540,22 @@ DataTLBError:
540#endif 540#endif
541 b DataAccess 541 b DataAccess
542 542
543 EXCEPTION(0x1500, Trap_15, UnknownException, EXC_XFER_EE) 543 EXCEPTION(0x1500, Trap_15, unknown_exception, EXC_XFER_EE)
544 EXCEPTION(0x1600, Trap_16, UnknownException, EXC_XFER_EE) 544 EXCEPTION(0x1600, Trap_16, unknown_exception, EXC_XFER_EE)
545 EXCEPTION(0x1700, Trap_17, UnknownException, EXC_XFER_EE) 545 EXCEPTION(0x1700, Trap_17, unknown_exception, EXC_XFER_EE)
546 EXCEPTION(0x1800, Trap_18, UnknownException, EXC_XFER_EE) 546 EXCEPTION(0x1800, Trap_18, unknown_exception, EXC_XFER_EE)
547 EXCEPTION(0x1900, Trap_19, UnknownException, EXC_XFER_EE) 547 EXCEPTION(0x1900, Trap_19, unknown_exception, EXC_XFER_EE)
548 EXCEPTION(0x1a00, Trap_1a, UnknownException, EXC_XFER_EE) 548 EXCEPTION(0x1a00, Trap_1a, unknown_exception, EXC_XFER_EE)
549 EXCEPTION(0x1b00, Trap_1b, UnknownException, EXC_XFER_EE) 549 EXCEPTION(0x1b00, Trap_1b, unknown_exception, EXC_XFER_EE)
550 550
551/* On the MPC8xx, these next four traps are used for development 551/* On the MPC8xx, these next four traps are used for development
552 * support of breakpoints and such. Someday I will get around to 552 * support of breakpoints and such. Someday I will get around to
553 * using them. 553 * using them.
554 */ 554 */
555 EXCEPTION(0x1c00, Trap_1c, UnknownException, EXC_XFER_EE) 555 EXCEPTION(0x1c00, Trap_1c, unknown_exception, EXC_XFER_EE)
556 EXCEPTION(0x1d00, Trap_1d, UnknownException, EXC_XFER_EE) 556 EXCEPTION(0x1d00, Trap_1d, unknown_exception, EXC_XFER_EE)
557 EXCEPTION(0x1e00, Trap_1e, UnknownException, EXC_XFER_EE) 557 EXCEPTION(0x1e00, Trap_1e, unknown_exception, EXC_XFER_EE)
558 EXCEPTION(0x1f00, Trap_1f, UnknownException, EXC_XFER_EE) 558 EXCEPTION(0x1f00, Trap_1f, unknown_exception, EXC_XFER_EE)
559 559
560 . = 0x2000 560 . = 0x2000
561 561
diff --git a/arch/ppc/kernel/head_booke.h b/arch/ppc/kernel/head_booke.h
index 9342acf12e72..aeb349b47af3 100644
--- a/arch/ppc/kernel/head_booke.h
+++ b/arch/ppc/kernel/head_booke.h
@@ -335,7 +335,7 @@ label:
335 mfspr r4,SPRN_DEAR; /* Grab the DEAR and save it */ \ 335 mfspr r4,SPRN_DEAR; /* Grab the DEAR and save it */ \
336 stw r4,_DEAR(r11); \ 336 stw r4,_DEAR(r11); \
337 addi r3,r1,STACK_FRAME_OVERHEAD; \ 337 addi r3,r1,STACK_FRAME_OVERHEAD; \
338 EXC_XFER_EE(0x0600, AlignmentException) 338 EXC_XFER_EE(0x0600, alignment_exception)
339 339
340#define PROGRAM_EXCEPTION \ 340#define PROGRAM_EXCEPTION \
341 START_EXCEPTION(Program) \ 341 START_EXCEPTION(Program) \
@@ -343,7 +343,7 @@ label:
343 mfspr r4,SPRN_ESR; /* Grab the ESR and save it */ \ 343 mfspr r4,SPRN_ESR; /* Grab the ESR and save it */ \
344 stw r4,_ESR(r11); \ 344 stw r4,_ESR(r11); \
345 addi r3,r1,STACK_FRAME_OVERHEAD; \ 345 addi r3,r1,STACK_FRAME_OVERHEAD; \
346 EXC_XFER_STD(0x0700, ProgramCheckException) 346 EXC_XFER_STD(0x0700, program_check_exception)
347 347
348#define DECREMENTER_EXCEPTION \ 348#define DECREMENTER_EXCEPTION \
349 START_EXCEPTION(Decrementer) \ 349 START_EXCEPTION(Decrementer) \
diff --git a/arch/ppc/kernel/head_fsl_booke.S b/arch/ppc/kernel/head_fsl_booke.S
index 8e52e8408316..5063c603fad4 100644
--- a/arch/ppc/kernel/head_fsl_booke.S
+++ b/arch/ppc/kernel/head_fsl_booke.S
@@ -426,14 +426,14 @@ skpinv: addi r6,r6,1 /* Increment */
426 426
427interrupt_base: 427interrupt_base:
428 /* Critical Input Interrupt */ 428 /* Critical Input Interrupt */
429 CRITICAL_EXCEPTION(0x0100, CriticalInput, UnknownException) 429 CRITICAL_EXCEPTION(0x0100, CriticalInput, unknown_exception)
430 430
431 /* Machine Check Interrupt */ 431 /* Machine Check Interrupt */
432#ifdef CONFIG_E200 432#ifdef CONFIG_E200
433 /* no RFMCI, MCSRRs on E200 */ 433 /* no RFMCI, MCSRRs on E200 */
434 CRITICAL_EXCEPTION(0x0200, MachineCheck, MachineCheckException) 434 CRITICAL_EXCEPTION(0x0200, MachineCheck, machine_check_exception)
435#else 435#else
436 MCHECK_EXCEPTION(0x0200, MachineCheck, MachineCheckException) 436 MCHECK_EXCEPTION(0x0200, MachineCheck, machine_check_exception)
437#endif 437#endif
438 438
439 /* Data Storage Interrupt */ 439 /* Data Storage Interrupt */
@@ -542,9 +542,9 @@ interrupt_base:
542#else 542#else
543#ifdef CONFIG_E200 543#ifdef CONFIG_E200
544 /* E200 treats 'normal' floating point instructions as FP Unavail exception */ 544 /* E200 treats 'normal' floating point instructions as FP Unavail exception */
545 EXCEPTION(0x0800, FloatingPointUnavailable, ProgramCheckException, EXC_XFER_EE) 545 EXCEPTION(0x0800, FloatingPointUnavailable, program_check_exception, EXC_XFER_EE)
546#else 546#else
547 EXCEPTION(0x0800, FloatingPointUnavailable, UnknownException, EXC_XFER_EE) 547 EXCEPTION(0x0800, FloatingPointUnavailable, unknown_exception, EXC_XFER_EE)
548#endif 548#endif
549#endif 549#endif
550 550
@@ -554,20 +554,20 @@ interrupt_base:
554 EXC_XFER_EE_LITE(0x0c00, DoSyscall) 554 EXC_XFER_EE_LITE(0x0c00, DoSyscall)
555 555
556 /* Auxillary Processor Unavailable Interrupt */ 556 /* Auxillary Processor Unavailable Interrupt */
557 EXCEPTION(0x2900, AuxillaryProcessorUnavailable, UnknownException, EXC_XFER_EE) 557 EXCEPTION(0x2900, AuxillaryProcessorUnavailable, unknown_exception, EXC_XFER_EE)
558 558
559 /* Decrementer Interrupt */ 559 /* Decrementer Interrupt */
560 DECREMENTER_EXCEPTION 560 DECREMENTER_EXCEPTION
561 561
562 /* Fixed Internal Timer Interrupt */ 562 /* Fixed Internal Timer Interrupt */
563 /* TODO: Add FIT support */ 563 /* TODO: Add FIT support */
564 EXCEPTION(0x3100, FixedIntervalTimer, UnknownException, EXC_XFER_EE) 564 EXCEPTION(0x3100, FixedIntervalTimer, unknown_exception, EXC_XFER_EE)
565 565
566 /* Watchdog Timer Interrupt */ 566 /* Watchdog Timer Interrupt */
567#ifdef CONFIG_BOOKE_WDT 567#ifdef CONFIG_BOOKE_WDT
568 CRITICAL_EXCEPTION(0x3200, WatchdogTimer, WatchdogException) 568 CRITICAL_EXCEPTION(0x3200, WatchdogTimer, WatchdogException)
569#else 569#else
570 CRITICAL_EXCEPTION(0x3200, WatchdogTimer, UnknownException) 570 CRITICAL_EXCEPTION(0x3200, WatchdogTimer, unknown_exception)
571#endif 571#endif
572 572
573 /* Data TLB Error Interrupt */ 573 /* Data TLB Error Interrupt */
@@ -696,21 +696,21 @@ interrupt_base:
696 addi r3,r1,STACK_FRAME_OVERHEAD 696 addi r3,r1,STACK_FRAME_OVERHEAD
697 EXC_XFER_EE_LITE(0x2010, KernelSPE) 697 EXC_XFER_EE_LITE(0x2010, KernelSPE)
698#else 698#else
699 EXCEPTION(0x2020, SPEUnavailable, UnknownException, EXC_XFER_EE) 699 EXCEPTION(0x2020, SPEUnavailable, unknown_exception, EXC_XFER_EE)
700#endif /* CONFIG_SPE */ 700#endif /* CONFIG_SPE */
701 701
702 /* SPE Floating Point Data */ 702 /* SPE Floating Point Data */
703#ifdef CONFIG_SPE 703#ifdef CONFIG_SPE
704 EXCEPTION(0x2030, SPEFloatingPointData, SPEFloatingPointException, EXC_XFER_EE); 704 EXCEPTION(0x2030, SPEFloatingPointData, SPEFloatingPointException, EXC_XFER_EE);
705#else 705#else
706 EXCEPTION(0x2040, SPEFloatingPointData, UnknownException, EXC_XFER_EE) 706 EXCEPTION(0x2040, SPEFloatingPointData, unknown_exception, EXC_XFER_EE)
707#endif /* CONFIG_SPE */ 707#endif /* CONFIG_SPE */
708 708
709 /* SPE Floating Point Round */ 709 /* SPE Floating Point Round */
710 EXCEPTION(0x2050, SPEFloatingPointRound, UnknownException, EXC_XFER_EE) 710 EXCEPTION(0x2050, SPEFloatingPointRound, unknown_exception, EXC_XFER_EE)
711 711
712 /* Performance Monitor */ 712 /* Performance Monitor */
713 EXCEPTION(0x2060, PerformanceMonitor, PerformanceMonitorException, EXC_XFER_STD) 713 EXCEPTION(0x2060, PerformanceMonitor, performance_monitor_exception, EXC_XFER_STD)
714 714
715 715
716 /* Debug Interrupt */ 716 /* Debug Interrupt */
@@ -853,7 +853,7 @@ load_up_spe:
853 cmpi 0,r4,0 853 cmpi 0,r4,0
854 beq 1f 854 beq 1f
855 addi r4,r4,THREAD /* want THREAD of last_task_used_spe */ 855 addi r4,r4,THREAD /* want THREAD of last_task_used_spe */
856 SAVE_32EVR(0,r10,r4) 856 SAVE_32EVRS(0,r10,r4)
857 evxor evr10, evr10, evr10 /* clear out evr10 */ 857 evxor evr10, evr10, evr10 /* clear out evr10 */
858 evmwumiaa evr10, evr10, evr10 /* evr10 <- ACC = 0 * 0 + ACC */ 858 evmwumiaa evr10, evr10, evr10 /* evr10 <- ACC = 0 * 0 + ACC */
859 li r5,THREAD_ACC 859 li r5,THREAD_ACC
@@ -873,7 +873,7 @@ load_up_spe:
873 stw r4,THREAD_USED_SPE(r5) 873 stw r4,THREAD_USED_SPE(r5)
874 evlddx evr4,r10,r5 874 evlddx evr4,r10,r5
875 evmra evr4,evr4 875 evmra evr4,evr4
876 REST_32EVR(0,r10,r5) 876 REST_32EVRS(0,r10,r5)
877#ifndef CONFIG_SMP 877#ifndef CONFIG_SMP
878 subi r4,r5,THREAD 878 subi r4,r5,THREAD
879 stw r4,last_task_used_spe@l(r3) 879 stw r4,last_task_used_spe@l(r3)
@@ -963,7 +963,7 @@ _GLOBAL(giveup_spe)
963 addi r3,r3,THREAD /* want THREAD of task */ 963 addi r3,r3,THREAD /* want THREAD of task */
964 lwz r5,PT_REGS(r3) 964 lwz r5,PT_REGS(r3)
965 cmpi 0,r5,0 965 cmpi 0,r5,0
966 SAVE_32EVR(0, r4, r3) 966 SAVE_32EVRS(0, r4, r3)
967 evxor evr6, evr6, evr6 /* clear out evr6 */ 967 evxor evr6, evr6, evr6 /* clear out evr6 */
968 evmwumiaa evr6, evr6, evr6 /* evr6 <- ACC = 0 * 0 + ACC */ 968 evmwumiaa evr6, evr6, evr6 /* evr6 <- ACC = 0 * 0 + ACC */
969 li r4,THREAD_ACC 969 li r4,THREAD_ACC
@@ -1028,10 +1028,14 @@ _GLOBAL(set_context)
1028 * goes at the beginning of the data segment, which is page-aligned. 1028 * goes at the beginning of the data segment, which is page-aligned.
1029 */ 1029 */
1030 .data 1030 .data
1031_GLOBAL(sdata) 1031 .align 12
1032_GLOBAL(empty_zero_page) 1032 .globl sdata
1033sdata:
1034 .globl empty_zero_page
1035empty_zero_page:
1033 .space 4096 1036 .space 4096
1034_GLOBAL(swapper_pg_dir) 1037 .globl swapper_pg_dir
1038swapper_pg_dir:
1035 .space 4096 1039 .space 4096
1036 1040
1037/* Reserved 4k for the critical exception stack & 4k for the machine 1041/* Reserved 4k for the critical exception stack & 4k for the machine
@@ -1040,13 +1044,15 @@ _GLOBAL(swapper_pg_dir)
1040 .align 12 1044 .align 12
1041exception_stack_bottom: 1045exception_stack_bottom:
1042 .space BOOKE_EXCEPTION_STACK_SIZE * NR_CPUS 1046 .space BOOKE_EXCEPTION_STACK_SIZE * NR_CPUS
1043_GLOBAL(exception_stack_top) 1047 .globl exception_stack_top
1048exception_stack_top:
1044 1049
1045/* 1050/*
1046 * This space gets a copy of optional info passed to us by the bootstrap 1051 * This space gets a copy of optional info passed to us by the bootstrap
1047 * which is used to pass parameters into the kernel like root=/dev/sda1, etc. 1052 * which is used to pass parameters into the kernel like root=/dev/sda1, etc.
1048 */ 1053 */
1049_GLOBAL(cmd_line) 1054 .globl cmd_line
1055cmd_line:
1050 .space 512 1056 .space 512
1051 1057
1052/* 1058/*
@@ -1055,4 +1061,3 @@ _GLOBAL(cmd_line)
1055 */ 1061 */
1056abatron_pteptrs: 1062abatron_pteptrs:
1057 .space 8 1063 .space 8
1058
diff --git a/arch/ppc/kernel/idle.c b/arch/ppc/kernel/idle.c
index fba29c876b62..11e5b44713f7 100644
--- a/arch/ppc/kernel/idle.c
+++ b/arch/ppc/kernel/idle.c
@@ -32,6 +32,7 @@
32#include <asm/cache.h> 32#include <asm/cache.h>
33#include <asm/cputable.h> 33#include <asm/cputable.h>
34#include <asm/machdep.h> 34#include <asm/machdep.h>
35#include <asm/smp.h>
35 36
36void default_idle(void) 37void default_idle(void)
37{ 38{
@@ -74,7 +75,7 @@ void cpu_idle(void)
74/* 75/*
75 * Register the sysctl to set/clear powersave_nap. 76 * Register the sysctl to set/clear powersave_nap.
76 */ 77 */
77extern unsigned long powersave_nap; 78extern int powersave_nap;
78 79
79static ctl_table powersave_nap_ctl_table[]={ 80static ctl_table powersave_nap_ctl_table[]={
80 { 81 {
diff --git a/arch/ppc/kernel/irq.c b/arch/ppc/kernel/irq.c
index 8843f3af230f..772e428aaa59 100644
--- a/arch/ppc/kernel/irq.c
+++ b/arch/ppc/kernel/irq.c
@@ -57,6 +57,7 @@
57#include <asm/cache.h> 57#include <asm/cache.h>
58#include <asm/prom.h> 58#include <asm/prom.h>
59#include <asm/ptrace.h> 59#include <asm/ptrace.h>
60#include <asm/machdep.h>
60 61
61#define NR_MASK_WORDS ((NR_IRQS + 31) / 32) 62#define NR_MASK_WORDS ((NR_IRQS + 31) / 32)
62 63
diff --git a/arch/ppc/kernel/l2cr.S b/arch/ppc/kernel/l2cr.S
index 861115249b35..d7f4e982b539 100644
--- a/arch/ppc/kernel/l2cr.S
+++ b/arch/ppc/kernel/l2cr.S
@@ -203,7 +203,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_SPEC7450)
203 * L1 icache 203 * L1 icache
204 */ 204 */
205 b 20f 205 b 20f
206 .balign L1_CACHE_LINE_SIZE 206 .balign L1_CACHE_BYTES
20722: 20722:
208 sync 208 sync
209 mtspr SPRN_L2CR,r3 209 mtspr SPRN_L2CR,r3
diff --git a/arch/ppc/kernel/misc.S b/arch/ppc/kernel/misc.S
index 90d917d2e856..3056ede2424d 100644
--- a/arch/ppc/kernel/misc.S
+++ b/arch/ppc/kernel/misc.S
@@ -125,9 +125,8 @@ _GLOBAL(identify_cpu)
1251: 1251:
126 addis r6,r3,cur_cpu_spec@ha 126 addis r6,r3,cur_cpu_spec@ha
127 addi r6,r6,cur_cpu_spec@l 127 addi r6,r6,cur_cpu_spec@l
128 slwi r4,r4,2
129 sub r8,r8,r3 128 sub r8,r8,r3
130 stwx r8,r4,r6 129 stw r8,0(r6)
131 blr 130 blr
132 131
133/* 132/*
@@ -186,19 +185,18 @@ _GLOBAL(do_cpu_ftr_fixups)
186 * 185 *
187 * Setup function is called with: 186 * Setup function is called with:
188 * r3 = data offset 187 * r3 = data offset
189 * r4 = CPU number 188 * r4 = ptr to CPU spec (relocated)
190 * r5 = ptr to CPU spec (relocated)
191 */ 189 */
192_GLOBAL(call_setup_cpu) 190_GLOBAL(call_setup_cpu)
193 addis r5,r3,cur_cpu_spec@ha 191 addis r4,r3,cur_cpu_spec@ha
194 addi r5,r5,cur_cpu_spec@l 192 addi r4,r4,cur_cpu_spec@l
195 slwi r4,r24,2 193 lwz r4,0(r4)
196 lwzx r5,r4,r5 194 add r4,r4,r3
195 lwz r5,CPU_SPEC_SETUP(r4)
196 cmpi 0,r5,0
197 add r5,r5,r3 197 add r5,r5,r3
198 lwz r6,CPU_SPEC_SETUP(r5) 198 beqlr
199 add r6,r6,r3 199 mtctr r5
200 mtctr r6
201 mr r4,r24
202 bctr 200 bctr
203 201
204#if defined(CONFIG_CPU_FREQ_PMAC) && defined(CONFIG_6xx) 202#if defined(CONFIG_CPU_FREQ_PMAC) && defined(CONFIG_6xx)
@@ -273,134 +271,6 @@ _GLOBAL(low_choose_7447a_dfs)
273 271
274#endif /* CONFIG_CPU_FREQ_PMAC && CONFIG_6xx */ 272#endif /* CONFIG_CPU_FREQ_PMAC && CONFIG_6xx */
275 273
276/* void local_save_flags_ptr(unsigned long *flags) */
277_GLOBAL(local_save_flags_ptr)
278 mfmsr r4
279 stw r4,0(r3)
280 blr
281 /*
282 * Need these nops here for taking over save/restore to
283 * handle lost intrs
284 * -- Cort
285 */
286 nop
287 nop
288 nop
289 nop
290 nop
291 nop
292 nop
293 nop
294 nop
295 nop
296 nop
297 nop
298 nop
299 nop
300 nop
301 nop
302 nop
303_GLOBAL(local_save_flags_ptr_end)
304
305/* void local_irq_restore(unsigned long flags) */
306_GLOBAL(local_irq_restore)
307/*
308 * Just set/clear the MSR_EE bit through restore/flags but do not
309 * change anything else. This is needed by the RT system and makes
310 * sense anyway.
311 * -- Cort
312 */
313 mfmsr r4
314 /* Copy all except the MSR_EE bit from r4 (current MSR value)
315 to r3. This is the sort of thing the rlwimi instruction is
316 designed for. -- paulus. */
317 rlwimi r3,r4,0,17,15
318 /* Check if things are setup the way we want _already_. */
319 cmpw 0,r3,r4
320 beqlr
3211: SYNC
322 mtmsr r3
323 SYNC
324 blr
325 nop
326 nop
327 nop
328 nop
329 nop
330 nop
331 nop
332 nop
333 nop
334 nop
335 nop
336 nop
337 nop
338 nop
339 nop
340 nop
341 nop
342 nop
343 nop
344_GLOBAL(local_irq_restore_end)
345
346_GLOBAL(local_irq_disable)
347 mfmsr r0 /* Get current interrupt state */
348 rlwinm r3,r0,16+1,32-1,31 /* Extract old value of 'EE' */
349 rlwinm r0,r0,0,17,15 /* clear MSR_EE in r0 */
350 SYNC /* Some chip revs have problems here... */
351 mtmsr r0 /* Update machine state */
352 blr /* Done */
353 /*
354 * Need these nops here for taking over save/restore to
355 * handle lost intrs
356 * -- Cort
357 */
358 nop
359 nop
360 nop
361 nop
362 nop
363 nop
364 nop
365 nop
366 nop
367 nop
368 nop
369 nop
370 nop
371 nop
372 nop
373_GLOBAL(local_irq_disable_end)
374
375_GLOBAL(local_irq_enable)
376 mfmsr r3 /* Get current state */
377 ori r3,r3,MSR_EE /* Turn on 'EE' bit */
378 SYNC /* Some chip revs have problems here... */
379 mtmsr r3 /* Update machine state */
380 blr
381 /*
382 * Need these nops here for taking over save/restore to
383 * handle lost intrs
384 * -- Cort
385 */
386 nop
387 nop
388 nop
389 nop
390 nop
391 nop
392 nop
393 nop
394 nop
395 nop
396 nop
397 nop
398 nop
399 nop
400 nop
401 nop
402_GLOBAL(local_irq_enable_end)
403
404/* 274/*
405 * complement mask on the msr then "or" some values on. 275 * complement mask on the msr then "or" some values on.
406 * _nmask_and_or_msr(nmask, value_to_or) 276 * _nmask_and_or_msr(nmask, value_to_or)
@@ -628,21 +498,21 @@ _GLOBAL(flush_icache_range)
628BEGIN_FTR_SECTION 498BEGIN_FTR_SECTION
629 blr /* for 601, do nothing */ 499 blr /* for 601, do nothing */
630END_FTR_SECTION_IFCLR(CPU_FTR_SPLIT_ID_CACHE) 500END_FTR_SECTION_IFCLR(CPU_FTR_SPLIT_ID_CACHE)
631 li r5,L1_CACHE_LINE_SIZE-1 501 li r5,L1_CACHE_BYTES-1
632 andc r3,r3,r5 502 andc r3,r3,r5
633 subf r4,r3,r4 503 subf r4,r3,r4
634 add r4,r4,r5 504 add r4,r4,r5
635 srwi. r4,r4,LG_L1_CACHE_LINE_SIZE 505 srwi. r4,r4,L1_CACHE_SHIFT
636 beqlr 506 beqlr
637 mtctr r4 507 mtctr r4
638 mr r6,r3 508 mr r6,r3
6391: dcbst 0,r3 5091: dcbst 0,r3
640 addi r3,r3,L1_CACHE_LINE_SIZE 510 addi r3,r3,L1_CACHE_BYTES
641 bdnz 1b 511 bdnz 1b
642 sync /* wait for dcbst's to get to ram */ 512 sync /* wait for dcbst's to get to ram */
643 mtctr r4 513 mtctr r4
6442: icbi 0,r6 5142: icbi 0,r6
645 addi r6,r6,L1_CACHE_LINE_SIZE 515 addi r6,r6,L1_CACHE_BYTES
646 bdnz 2b 516 bdnz 2b
647 sync /* additional sync needed on g4 */ 517 sync /* additional sync needed on g4 */
648 isync 518 isync
@@ -655,16 +525,16 @@ END_FTR_SECTION_IFCLR(CPU_FTR_SPLIT_ID_CACHE)
655 * clean_dcache_range(unsigned long start, unsigned long stop) 525 * clean_dcache_range(unsigned long start, unsigned long stop)
656 */ 526 */
657_GLOBAL(clean_dcache_range) 527_GLOBAL(clean_dcache_range)
658 li r5,L1_CACHE_LINE_SIZE-1 528 li r5,L1_CACHE_BYTES-1
659 andc r3,r3,r5 529 andc r3,r3,r5
660 subf r4,r3,r4 530 subf r4,r3,r4
661 add r4,r4,r5 531 add r4,r4,r5
662 srwi. r4,r4,LG_L1_CACHE_LINE_SIZE 532 srwi. r4,r4,L1_CACHE_SHIFT
663 beqlr 533 beqlr
664 mtctr r4 534 mtctr r4
665 535
6661: dcbst 0,r3 5361: dcbst 0,r3
667 addi r3,r3,L1_CACHE_LINE_SIZE 537 addi r3,r3,L1_CACHE_BYTES
668 bdnz 1b 538 bdnz 1b
669 sync /* wait for dcbst's to get to ram */ 539 sync /* wait for dcbst's to get to ram */
670 blr 540 blr
@@ -676,16 +546,16 @@ _GLOBAL(clean_dcache_range)
676 * flush_dcache_range(unsigned long start, unsigned long stop) 546 * flush_dcache_range(unsigned long start, unsigned long stop)
677 */ 547 */
678_GLOBAL(flush_dcache_range) 548_GLOBAL(flush_dcache_range)
679 li r5,L1_CACHE_LINE_SIZE-1 549 li r5,L1_CACHE_BYTES-1
680 andc r3,r3,r5 550 andc r3,r3,r5
681 subf r4,r3,r4 551 subf r4,r3,r4
682 add r4,r4,r5 552 add r4,r4,r5
683 srwi. r4,r4,LG_L1_CACHE_LINE_SIZE 553 srwi. r4,r4,L1_CACHE_SHIFT
684 beqlr 554 beqlr
685 mtctr r4 555 mtctr r4
686 556
6871: dcbf 0,r3 5571: dcbf 0,r3
688 addi r3,r3,L1_CACHE_LINE_SIZE 558 addi r3,r3,L1_CACHE_BYTES
689 bdnz 1b 559 bdnz 1b
690 sync /* wait for dcbst's to get to ram */ 560 sync /* wait for dcbst's to get to ram */
691 blr 561 blr
@@ -698,16 +568,16 @@ _GLOBAL(flush_dcache_range)
698 * invalidate_dcache_range(unsigned long start, unsigned long stop) 568 * invalidate_dcache_range(unsigned long start, unsigned long stop)
699 */ 569 */
700_GLOBAL(invalidate_dcache_range) 570_GLOBAL(invalidate_dcache_range)
701 li r5,L1_CACHE_LINE_SIZE-1 571 li r5,L1_CACHE_BYTES-1
702 andc r3,r3,r5 572 andc r3,r3,r5
703 subf r4,r3,r4 573 subf r4,r3,r4
704 add r4,r4,r5 574 add r4,r4,r5
705 srwi. r4,r4,LG_L1_CACHE_LINE_SIZE 575 srwi. r4,r4,L1_CACHE_SHIFT
706 beqlr 576 beqlr
707 mtctr r4 577 mtctr r4
708 578
7091: dcbi 0,r3 5791: dcbi 0,r3
710 addi r3,r3,L1_CACHE_LINE_SIZE 580 addi r3,r3,L1_CACHE_BYTES
711 bdnz 1b 581 bdnz 1b
712 sync /* wait for dcbi's to get to ram */ 582 sync /* wait for dcbi's to get to ram */
713 blr 583 blr
@@ -728,7 +598,7 @@ _GLOBAL(flush_dcache_all)
728 mtctr r4 598 mtctr r4
729 lis r5, KERNELBASE@h 599 lis r5, KERNELBASE@h
7301: lwz r3, 0(r5) /* Load one word from every line */ 6001: lwz r3, 0(r5) /* Load one word from every line */
731 addi r5, r5, L1_CACHE_LINE_SIZE 601 addi r5, r5, L1_CACHE_BYTES
732 bdnz 1b 602 bdnz 1b
733 blr 603 blr
734#endif /* CONFIG_NOT_COHERENT_CACHE */ 604#endif /* CONFIG_NOT_COHERENT_CACHE */
@@ -746,16 +616,16 @@ BEGIN_FTR_SECTION
746 blr /* for 601, do nothing */ 616 blr /* for 601, do nothing */
747END_FTR_SECTION_IFCLR(CPU_FTR_SPLIT_ID_CACHE) 617END_FTR_SECTION_IFCLR(CPU_FTR_SPLIT_ID_CACHE)
748 rlwinm r3,r3,0,0,19 /* Get page base address */ 618 rlwinm r3,r3,0,0,19 /* Get page base address */
749 li r4,4096/L1_CACHE_LINE_SIZE /* Number of lines in a page */ 619 li r4,4096/L1_CACHE_BYTES /* Number of lines in a page */
750 mtctr r4 620 mtctr r4
751 mr r6,r3 621 mr r6,r3
7520: dcbst 0,r3 /* Write line to ram */ 6220: dcbst 0,r3 /* Write line to ram */
753 addi r3,r3,L1_CACHE_LINE_SIZE 623 addi r3,r3,L1_CACHE_BYTES
754 bdnz 0b 624 bdnz 0b
755 sync 625 sync
756 mtctr r4 626 mtctr r4
7571: icbi 0,r6 6271: icbi 0,r6
758 addi r6,r6,L1_CACHE_LINE_SIZE 628 addi r6,r6,L1_CACHE_BYTES
759 bdnz 1b 629 bdnz 1b
760 sync 630 sync
761 isync 631 isync
@@ -778,16 +648,16 @@ END_FTR_SECTION_IFCLR(CPU_FTR_SPLIT_ID_CACHE)
778 mtmsr r0 648 mtmsr r0
779 isync 649 isync
780 rlwinm r3,r3,0,0,19 /* Get page base address */ 650 rlwinm r3,r3,0,0,19 /* Get page base address */
781 li r4,4096/L1_CACHE_LINE_SIZE /* Number of lines in a page */ 651 li r4,4096/L1_CACHE_BYTES /* Number of lines in a page */
782 mtctr r4 652 mtctr r4
783 mr r6,r3 653 mr r6,r3
7840: dcbst 0,r3 /* Write line to ram */ 6540: dcbst 0,r3 /* Write line to ram */
785 addi r3,r3,L1_CACHE_LINE_SIZE 655 addi r3,r3,L1_CACHE_BYTES
786 bdnz 0b 656 bdnz 0b
787 sync 657 sync
788 mtctr r4 658 mtctr r4
7891: icbi 0,r6 6591: icbi 0,r6
790 addi r6,r6,L1_CACHE_LINE_SIZE 660 addi r6,r6,L1_CACHE_BYTES
791 bdnz 1b 661 bdnz 1b
792 sync 662 sync
793 mtmsr r10 /* restore DR */ 663 mtmsr r10 /* restore DR */
@@ -802,7 +672,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_SPLIT_ID_CACHE)
802 * void clear_pages(void *page, int order) ; 672 * void clear_pages(void *page, int order) ;
803 */ 673 */
804_GLOBAL(clear_pages) 674_GLOBAL(clear_pages)
805 li r0,4096/L1_CACHE_LINE_SIZE 675 li r0,4096/L1_CACHE_BYTES
806 slw r0,r0,r4 676 slw r0,r0,r4
807 mtctr r0 677 mtctr r0
808#ifdef CONFIG_8xx 678#ifdef CONFIG_8xx
@@ -814,7 +684,7 @@ _GLOBAL(clear_pages)
814#else 684#else
8151: dcbz 0,r3 6851: dcbz 0,r3
816#endif 686#endif
817 addi r3,r3,L1_CACHE_LINE_SIZE 687 addi r3,r3,L1_CACHE_BYTES
818 bdnz 1b 688 bdnz 1b
819 blr 689 blr
820 690
@@ -840,7 +710,7 @@ _GLOBAL(copy_page)
840 710
841#ifdef CONFIG_8xx 711#ifdef CONFIG_8xx
842 /* don't use prefetch on 8xx */ 712 /* don't use prefetch on 8xx */
843 li r0,4096/L1_CACHE_LINE_SIZE 713 li r0,4096/L1_CACHE_BYTES
844 mtctr r0 714 mtctr r0
8451: COPY_16_BYTES 7151: COPY_16_BYTES
846 bdnz 1b 716 bdnz 1b
@@ -854,13 +724,13 @@ _GLOBAL(copy_page)
854 li r11,4 724 li r11,4
855 mtctr r0 725 mtctr r0
85611: dcbt r11,r4 72611: dcbt r11,r4
857 addi r11,r11,L1_CACHE_LINE_SIZE 727 addi r11,r11,L1_CACHE_BYTES
858 bdnz 11b 728 bdnz 11b
859#else /* MAX_COPY_PREFETCH == 1 */ 729#else /* MAX_COPY_PREFETCH == 1 */
860 dcbt r5,r4 730 dcbt r5,r4
861 li r11,L1_CACHE_LINE_SIZE+4 731 li r11,L1_CACHE_BYTES+4
862#endif /* MAX_COPY_PREFETCH */ 732#endif /* MAX_COPY_PREFETCH */
863 li r0,4096/L1_CACHE_LINE_SIZE - MAX_COPY_PREFETCH 733 li r0,4096/L1_CACHE_BYTES - MAX_COPY_PREFETCH
864 crclr 4*cr0+eq 734 crclr 4*cr0+eq
8652: 7352:
866 mtctr r0 736 mtctr r0
@@ -868,12 +738,12 @@ _GLOBAL(copy_page)
868 dcbt r11,r4 738 dcbt r11,r4
869 dcbz r5,r3 739 dcbz r5,r3
870 COPY_16_BYTES 740 COPY_16_BYTES
871#if L1_CACHE_LINE_SIZE >= 32 741#if L1_CACHE_BYTES >= 32
872 COPY_16_BYTES 742 COPY_16_BYTES
873#if L1_CACHE_LINE_SIZE >= 64 743#if L1_CACHE_BYTES >= 64
874 COPY_16_BYTES 744 COPY_16_BYTES
875 COPY_16_BYTES 745 COPY_16_BYTES
876#if L1_CACHE_LINE_SIZE >= 128 746#if L1_CACHE_BYTES >= 128
877 COPY_16_BYTES 747 COPY_16_BYTES
878 COPY_16_BYTES 748 COPY_16_BYTES
879 COPY_16_BYTES 749 COPY_16_BYTES
@@ -1098,33 +968,6 @@ _GLOBAL(_get_SP)
1098 blr 968 blr
1099 969
1100/* 970/*
1101 * These are used in the alignment trap handler when emulating
1102 * single-precision loads and stores.
1103 * We restore and save the fpscr so the task gets the same result
1104 * and exceptions as if the cpu had performed the load or store.
1105 */
1106
1107#ifdef CONFIG_PPC_FPU
1108_GLOBAL(cvt_fd)
1109 lfd 0,-4(r5) /* load up fpscr value */
1110 mtfsf 0xff,0
1111 lfs 0,0(r3)
1112 stfd 0,0(r4)
1113 mffs 0 /* save new fpscr value */
1114 stfd 0,-4(r5)
1115 blr
1116
1117_GLOBAL(cvt_df)
1118 lfd 0,-4(r5) /* load up fpscr value */
1119 mtfsf 0xff,0
1120 lfd 0,0(r3)
1121 stfs 0,0(r4)
1122 mffs 0 /* save new fpscr value */
1123 stfd 0,-4(r5)
1124 blr
1125#endif
1126
1127/*
1128 * Create a kernel thread 971 * Create a kernel thread
1129 * kernel_thread(fn, arg, flags) 972 * kernel_thread(fn, arg, flags)
1130 */ 973 */
diff --git a/arch/ppc/kernel/pci.c b/arch/ppc/kernel/pci.c
index 854e45beb387..e8f4e576750a 100644
--- a/arch/ppc/kernel/pci.c
+++ b/arch/ppc/kernel/pci.c
@@ -21,6 +21,7 @@
21#include <asm/byteorder.h> 21#include <asm/byteorder.h>
22#include <asm/irq.h> 22#include <asm/irq.h>
23#include <asm/uaccess.h> 23#include <asm/uaccess.h>
24#include <asm/machdep.h>
24 25
25#undef DEBUG 26#undef DEBUG
26 27
@@ -53,7 +54,7 @@ static u8* pci_to_OF_bus_map;
53/* By default, we don't re-assign bus numbers. We do this only on 54/* By default, we don't re-assign bus numbers. We do this only on
54 * some pmacs 55 * some pmacs
55 */ 56 */
56int pci_assign_all_busses; 57int pci_assign_all_buses;
57 58
58struct pci_controller* hose_head; 59struct pci_controller* hose_head;
59struct pci_controller** hose_tail = &hose_head; 60struct pci_controller** hose_tail = &hose_head;
@@ -644,7 +645,7 @@ pcibios_alloc_controller(void)
644/* 645/*
645 * Functions below are used on OpenFirmware machines. 646 * Functions below are used on OpenFirmware machines.
646 */ 647 */
647static void __openfirmware 648static void
648make_one_node_map(struct device_node* node, u8 pci_bus) 649make_one_node_map(struct device_node* node, u8 pci_bus)
649{ 650{
650 int *bus_range; 651 int *bus_range;
@@ -678,7 +679,7 @@ make_one_node_map(struct device_node* node, u8 pci_bus)
678 } 679 }
679} 680}
680 681
681void __openfirmware 682void
682pcibios_make_OF_bus_map(void) 683pcibios_make_OF_bus_map(void)
683{ 684{
684 int i; 685 int i;
@@ -720,7 +721,7 @@ pcibios_make_OF_bus_map(void)
720 721
721typedef int (*pci_OF_scan_iterator)(struct device_node* node, void* data); 722typedef int (*pci_OF_scan_iterator)(struct device_node* node, void* data);
722 723
723static struct device_node* __openfirmware 724static struct device_node*
724scan_OF_pci_childs(struct device_node* node, pci_OF_scan_iterator filter, void* data) 725scan_OF_pci_childs(struct device_node* node, pci_OF_scan_iterator filter, void* data)
725{ 726{
726 struct device_node* sub_node; 727 struct device_node* sub_node;
@@ -761,7 +762,7 @@ scan_OF_pci_childs_iterator(struct device_node* node, void* data)
761 return 0; 762 return 0;
762} 763}
763 764
764static struct device_node* __openfirmware 765static struct device_node*
765scan_OF_childs_for_device(struct device_node* node, u8 bus, u8 dev_fn) 766scan_OF_childs_for_device(struct device_node* node, u8 bus, u8 dev_fn)
766{ 767{
767 u8 filter_data[2] = {bus, dev_fn}; 768 u8 filter_data[2] = {bus, dev_fn};
@@ -813,18 +814,20 @@ pci_busdev_to_OF_node(struct pci_bus *bus, int devfn)
813 /* Now, lookup childs of the hose */ 814 /* Now, lookup childs of the hose */
814 return scan_OF_childs_for_device(node->child, busnr, devfn); 815 return scan_OF_childs_for_device(node->child, busnr, devfn);
815} 816}
817EXPORT_SYMBOL(pci_busdev_to_OF_node);
816 818
817struct device_node* 819struct device_node*
818pci_device_to_OF_node(struct pci_dev *dev) 820pci_device_to_OF_node(struct pci_dev *dev)
819{ 821{
820 return pci_busdev_to_OF_node(dev->bus, dev->devfn); 822 return pci_busdev_to_OF_node(dev->bus, dev->devfn);
821} 823}
824EXPORT_SYMBOL(pci_device_to_OF_node);
822 825
823/* This routine is meant to be used early during boot, when the 826/* This routine is meant to be used early during boot, when the
824 * PCI bus numbers have not yet been assigned, and you need to 827 * PCI bus numbers have not yet been assigned, and you need to
825 * issue PCI config cycles to an OF device. 828 * issue PCI config cycles to an OF device.
826 * It could also be used to "fix" RTAS config cycles if you want 829 * It could also be used to "fix" RTAS config cycles if you want
827 * to set pci_assign_all_busses to 1 and still use RTAS for PCI 830 * to set pci_assign_all_buses to 1 and still use RTAS for PCI
828 * config cycles. 831 * config cycles.
829 */ 832 */
830struct pci_controller* 833struct pci_controller*
@@ -842,7 +845,7 @@ pci_find_hose_for_OF_device(struct device_node* node)
842 return NULL; 845 return NULL;
843} 846}
844 847
845static int __openfirmware 848static int
846find_OF_pci_device_filter(struct device_node* node, void* data) 849find_OF_pci_device_filter(struct device_node* node, void* data)
847{ 850{
848 return ((void *)node == data); 851 return ((void *)node == data);
@@ -890,6 +893,7 @@ pci_device_from_OF_node(struct device_node* node, u8* bus, u8* devfn)
890 } 893 }
891 return -ENODEV; 894 return -ENODEV;
892} 895}
896EXPORT_SYMBOL(pci_device_from_OF_node);
893 897
894void __init 898void __init
895pci_process_bridge_OF_ranges(struct pci_controller *hose, 899pci_process_bridge_OF_ranges(struct pci_controller *hose,
@@ -1030,6 +1034,10 @@ static ssize_t pci_show_devspec(struct device *dev, struct device_attribute *att
1030} 1034}
1031static DEVICE_ATTR(devspec, S_IRUGO, pci_show_devspec, NULL); 1035static DEVICE_ATTR(devspec, S_IRUGO, pci_show_devspec, NULL);
1032 1036
1037#else /* CONFIG_PPC_OF */
1038void pcibios_make_OF_bus_map(void)
1039{
1040}
1033#endif /* CONFIG_PPC_OF */ 1041#endif /* CONFIG_PPC_OF */
1034 1042
1035/* Add sysfs properties */ 1043/* Add sysfs properties */
@@ -1262,12 +1270,12 @@ pcibios_init(void)
1262 1270
1263 /* Scan all of the recorded PCI controllers. */ 1271 /* Scan all of the recorded PCI controllers. */
1264 for (next_busno = 0, hose = hose_head; hose; hose = hose->next) { 1272 for (next_busno = 0, hose = hose_head; hose; hose = hose->next) {
1265 if (pci_assign_all_busses) 1273 if (pci_assign_all_buses)
1266 hose->first_busno = next_busno; 1274 hose->first_busno = next_busno;
1267 hose->last_busno = 0xff; 1275 hose->last_busno = 0xff;
1268 bus = pci_scan_bus(hose->first_busno, hose->ops, hose); 1276 bus = pci_scan_bus(hose->first_busno, hose->ops, hose);
1269 hose->last_busno = bus->subordinate; 1277 hose->last_busno = bus->subordinate;
1270 if (pci_assign_all_busses || next_busno <= hose->last_busno) 1278 if (pci_assign_all_buses || next_busno <= hose->last_busno)
1271 next_busno = hose->last_busno + pcibios_assign_bus_offset; 1279 next_busno = hose->last_busno + pcibios_assign_bus_offset;
1272 } 1280 }
1273 pci_bus_count = next_busno; 1281 pci_bus_count = next_busno;
@@ -1276,7 +1284,7 @@ pcibios_init(void)
1276 * numbers vs. kernel bus numbers since we may have to 1284 * numbers vs. kernel bus numbers since we may have to
1277 * remap them. 1285 * remap them.
1278 */ 1286 */
1279 if (pci_assign_all_busses && have_of) 1287 if (pci_assign_all_buses && have_of)
1280 pcibios_make_OF_bus_map(); 1288 pcibios_make_OF_bus_map();
1281 1289
1282 /* Do machine dependent PCI interrupt routing */ 1290 /* Do machine dependent PCI interrupt routing */
@@ -1586,16 +1594,17 @@ static pgprot_t __pci_mmap_set_pgprot(struct pci_dev *dev, struct resource *rp,
1586 * above routine 1594 * above routine
1587 */ 1595 */
1588pgprot_t pci_phys_mem_access_prot(struct file *file, 1596pgprot_t pci_phys_mem_access_prot(struct file *file,
1589 unsigned long offset, 1597 unsigned long pfn,
1590 unsigned long size, 1598 unsigned long size,
1591 pgprot_t protection) 1599 pgprot_t protection)
1592{ 1600{
1593 struct pci_dev *pdev = NULL; 1601 struct pci_dev *pdev = NULL;
1594 struct resource *found = NULL; 1602 struct resource *found = NULL;
1595 unsigned long prot = pgprot_val(protection); 1603 unsigned long prot = pgprot_val(protection);
1604 unsigned long offset = pfn << PAGE_SHIFT;
1596 int i; 1605 int i;
1597 1606
1598 if (page_is_ram(offset >> PAGE_SHIFT)) 1607 if (page_is_ram(pfn))
1599 return prot; 1608 return prot;
1600 1609
1601 prot |= _PAGE_NO_CACHE | _PAGE_GUARDED; 1610 prot |= _PAGE_NO_CACHE | _PAGE_GUARDED;
diff --git a/arch/ppc/kernel/perfmon.c b/arch/ppc/kernel/perfmon.c
deleted file mode 100644
index 22df9a596a0f..000000000000
--- a/arch/ppc/kernel/perfmon.c
+++ /dev/null
@@ -1,96 +0,0 @@
1/* kernel/perfmon.c
2 * PPC 32 Performance Monitor Infrastructure
3 *
4 * Author: Andy Fleming
5 * Copyright (c) 2004 Freescale Semiconductor, Inc
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
11 */
12
13#include <linux/errno.h>
14#include <linux/sched.h>
15#include <linux/kernel.h>
16#include <linux/mm.h>
17#include <linux/stddef.h>
18#include <linux/unistd.h>
19#include <linux/ptrace.h>
20#include <linux/slab.h>
21#include <linux/user.h>
22#include <linux/a.out.h>
23#include <linux/interrupt.h>
24#include <linux/config.h>
25#include <linux/init.h>
26#include <linux/module.h>
27#include <linux/prctl.h>
28
29#include <asm/pgtable.h>
30#include <asm/uaccess.h>
31#include <asm/system.h>
32#include <asm/io.h>
33#include <asm/reg.h>
34#include <asm/xmon.h>
35
36/* A lock to regulate grabbing the interrupt */
37DEFINE_SPINLOCK(perfmon_lock);
38
39#if defined (CONFIG_FSL_BOOKE) && !defined (CONFIG_E200)
40static void dummy_perf(struct pt_regs *regs)
41{
42 unsigned int pmgc0 = mfpmr(PMRN_PMGC0);
43
44 pmgc0 &= ~PMGC0_PMIE;
45 mtpmr(PMRN_PMGC0, pmgc0);
46}
47
48#elif defined(CONFIG_6xx)
49/* Ensure exceptions are disabled */
50static void dummy_perf(struct pt_regs *regs)
51{
52 unsigned int mmcr0 = mfspr(SPRN_MMCR0);
53
54 mmcr0 &= ~MMCR0_PMXE;
55 mtspr(SPRN_MMCR0, mmcr0);
56}
57#else
58static void dummy_perf(struct pt_regs *regs)
59{
60}
61#endif
62
63void (*perf_irq)(struct pt_regs *) = dummy_perf;
64
65/* Grab the interrupt, if it's free.
66 * Returns 0 on success, -1 if the interrupt is taken already */
67int request_perfmon_irq(void (*handler)(struct pt_regs *))
68{
69 int err = 0;
70
71 spin_lock(&perfmon_lock);
72
73 if (perf_irq == dummy_perf)
74 perf_irq = handler;
75 else {
76 pr_info("perfmon irq already handled by %p\n", perf_irq);
77 err = -1;
78 }
79
80 spin_unlock(&perfmon_lock);
81
82 return err;
83}
84
85void free_perfmon_irq(void)
86{
87 spin_lock(&perfmon_lock);
88
89 perf_irq = dummy_perf;
90
91 spin_unlock(&perfmon_lock);
92}
93
94EXPORT_SYMBOL(perf_irq);
95EXPORT_SYMBOL(request_perfmon_irq);
96EXPORT_SYMBOL(free_perfmon_irq);
diff --git a/arch/ppc/kernel/perfmon_fsl_booke.c b/arch/ppc/kernel/perfmon_fsl_booke.c
index 03526bfb0840..32455dfcc36b 100644
--- a/arch/ppc/kernel/perfmon_fsl_booke.c
+++ b/arch/ppc/kernel/perfmon_fsl_booke.c
@@ -32,7 +32,7 @@
32#include <asm/io.h> 32#include <asm/io.h>
33#include <asm/reg.h> 33#include <asm/reg.h>
34#include <asm/xmon.h> 34#include <asm/xmon.h>
35#include <asm/perfmon.h> 35#include <asm/pmc.h>
36 36
37static inline u32 get_pmlca(int ctr); 37static inline u32 get_pmlca(int ctr);
38static inline void set_pmlca(int ctr, u32 pmlca); 38static inline void set_pmlca(int ctr, u32 pmlca);
diff --git a/arch/ppc/kernel/ppc_ksyms.c b/arch/ppc/kernel/ppc_ksyms.c
index 88f6bb7b6964..ae24196d78f6 100644
--- a/arch/ppc/kernel/ppc_ksyms.c
+++ b/arch/ppc/kernel/ppc_ksyms.c
@@ -53,10 +53,10 @@
53 53
54extern void transfer_to_handler(void); 54extern void transfer_to_handler(void);
55extern void do_IRQ(struct pt_regs *regs); 55extern void do_IRQ(struct pt_regs *regs);
56extern void MachineCheckException(struct pt_regs *regs); 56extern void machine_check_exception(struct pt_regs *regs);
57extern void AlignmentException(struct pt_regs *regs); 57extern void alignment_exception(struct pt_regs *regs);
58extern void ProgramCheckException(struct pt_regs *regs); 58extern void program_check_exception(struct pt_regs *regs);
59extern void SingleStepException(struct pt_regs *regs); 59extern void single_step_exception(struct pt_regs *regs);
60extern int do_signal(sigset_t *, struct pt_regs *); 60extern int do_signal(sigset_t *, struct pt_regs *);
61extern int pmac_newworld; 61extern int pmac_newworld;
62extern int sys_sigreturn(struct pt_regs *regs); 62extern int sys_sigreturn(struct pt_regs *regs);
@@ -72,10 +72,10 @@ EXPORT_SYMBOL(clear_user_page);
72EXPORT_SYMBOL(do_signal); 72EXPORT_SYMBOL(do_signal);
73EXPORT_SYMBOL(transfer_to_handler); 73EXPORT_SYMBOL(transfer_to_handler);
74EXPORT_SYMBOL(do_IRQ); 74EXPORT_SYMBOL(do_IRQ);
75EXPORT_SYMBOL(MachineCheckException); 75EXPORT_SYMBOL(machine_check_exception);
76EXPORT_SYMBOL(AlignmentException); 76EXPORT_SYMBOL(alignment_exception);
77EXPORT_SYMBOL(ProgramCheckException); 77EXPORT_SYMBOL(program_check_exception);
78EXPORT_SYMBOL(SingleStepException); 78EXPORT_SYMBOL(single_step_exception);
79EXPORT_SYMBOL(sys_sigreturn); 79EXPORT_SYMBOL(sys_sigreturn);
80EXPORT_SYMBOL(ppc_n_lost_interrupts); 80EXPORT_SYMBOL(ppc_n_lost_interrupts);
81EXPORT_SYMBOL(ppc_lost_interrupts); 81EXPORT_SYMBOL(ppc_lost_interrupts);
@@ -230,9 +230,6 @@ EXPORT_SYMBOL(find_all_nodes);
230EXPORT_SYMBOL(get_property); 230EXPORT_SYMBOL(get_property);
231EXPORT_SYMBOL(request_OF_resource); 231EXPORT_SYMBOL(request_OF_resource);
232EXPORT_SYMBOL(release_OF_resource); 232EXPORT_SYMBOL(release_OF_resource);
233EXPORT_SYMBOL(pci_busdev_to_OF_node);
234EXPORT_SYMBOL(pci_device_to_OF_node);
235EXPORT_SYMBOL(pci_device_from_OF_node);
236EXPORT_SYMBOL(of_find_node_by_name); 233EXPORT_SYMBOL(of_find_node_by_name);
237EXPORT_SYMBOL(of_find_node_by_type); 234EXPORT_SYMBOL(of_find_node_by_type);
238EXPORT_SYMBOL(of_find_compatible_node); 235EXPORT_SYMBOL(of_find_compatible_node);
@@ -272,16 +269,6 @@ EXPORT_SYMBOL(screen_info);
272#endif 269#endif
273 270
274EXPORT_SYMBOL(__delay); 271EXPORT_SYMBOL(__delay);
275#ifndef INLINE_IRQS
276EXPORT_SYMBOL(local_irq_enable);
277EXPORT_SYMBOL(local_irq_enable_end);
278EXPORT_SYMBOL(local_irq_disable);
279EXPORT_SYMBOL(local_irq_disable_end);
280EXPORT_SYMBOL(local_save_flags_ptr);
281EXPORT_SYMBOL(local_save_flags_ptr_end);
282EXPORT_SYMBOL(local_irq_restore);
283EXPORT_SYMBOL(local_irq_restore_end);
284#endif
285EXPORT_SYMBOL(timer_interrupt); 272EXPORT_SYMBOL(timer_interrupt);
286EXPORT_SYMBOL(irq_desc); 273EXPORT_SYMBOL(irq_desc);
287EXPORT_SYMBOL(tb_ticks_per_jiffy); 274EXPORT_SYMBOL(tb_ticks_per_jiffy);
@@ -335,11 +322,6 @@ EXPORT_SYMBOL(mmu_hash_lock); /* For MOL */
335extern long *intercept_table; 322extern long *intercept_table;
336EXPORT_SYMBOL(intercept_table); 323EXPORT_SYMBOL(intercept_table);
337#endif /* CONFIG_PPC_STD_MMU */ 324#endif /* CONFIG_PPC_STD_MMU */
338EXPORT_SYMBOL(cur_cpu_spec);
339#ifdef CONFIG_PPC_PMAC
340extern unsigned long agp_special_page;
341EXPORT_SYMBOL(agp_special_page);
342#endif
343#if defined(CONFIG_40x) || defined(CONFIG_BOOKE) 325#if defined(CONFIG_40x) || defined(CONFIG_BOOKE)
344EXPORT_SYMBOL(__mtdcr); 326EXPORT_SYMBOL(__mtdcr);
345EXPORT_SYMBOL(__mfdcr); 327EXPORT_SYMBOL(__mfdcr);
diff --git a/arch/ppc/kernel/process.c b/arch/ppc/kernel/process.c
index 82de66e4db6d..cb1c7b92f8c6 100644
--- a/arch/ppc/kernel/process.c
+++ b/arch/ppc/kernel/process.c
@@ -152,18 +152,66 @@ int check_stack(struct task_struct *tsk)
152} 152}
153#endif /* defined(CHECK_STACK) */ 153#endif /* defined(CHECK_STACK) */
154 154
155#ifdef CONFIG_ALTIVEC 155/*
156int 156 * Make sure the floating-point register state in the
157dump_altivec(struct pt_regs *regs, elf_vrregset_t *vrregs) 157 * the thread_struct is up to date for task tsk.
158 */
159void flush_fp_to_thread(struct task_struct *tsk)
158{ 160{
159 if (regs->msr & MSR_VEC) 161 if (tsk->thread.regs) {
160 giveup_altivec(current); 162 /*
161 memcpy(vrregs, &current->thread.vr[0], sizeof(*vrregs)); 163 * We need to disable preemption here because if we didn't,
164 * another process could get scheduled after the regs->msr
165 * test but before we have finished saving the FP registers
166 * to the thread_struct. That process could take over the
167 * FPU, and then when we get scheduled again we would store
168 * bogus values for the remaining FP registers.
169 */
170 preempt_disable();
171 if (tsk->thread.regs->msr & MSR_FP) {
172#ifdef CONFIG_SMP
173 /*
174 * This should only ever be called for current or
175 * for a stopped child process. Since we save away
176 * the FP register state on context switch on SMP,
177 * there is something wrong if a stopped child appears
178 * to still have its FP state in the CPU registers.
179 */
180 BUG_ON(tsk != current);
181#endif
182 giveup_fpu(current);
183 }
184 preempt_enable();
185 }
186}
187
188void enable_kernel_fp(void)
189{
190 WARN_ON(preemptible());
191
192#ifdef CONFIG_SMP
193 if (current->thread.regs && (current->thread.regs->msr & MSR_FP))
194 giveup_fpu(current);
195 else
196 giveup_fpu(NULL); /* just enables FP for kernel */
197#else
198 giveup_fpu(last_task_used_math);
199#endif /* CONFIG_SMP */
200}
201EXPORT_SYMBOL(enable_kernel_fp);
202
203int dump_task_fpu(struct task_struct *tsk, elf_fpregset_t *fpregs)
204{
205 preempt_disable();
206 if (tsk->thread.regs && (tsk->thread.regs->msr & MSR_FP))
207 giveup_fpu(tsk);
208 preempt_enable();
209 memcpy(fpregs, &tsk->thread.fpr[0], sizeof(*fpregs));
162 return 1; 210 return 1;
163} 211}
164 212
165void 213#ifdef CONFIG_ALTIVEC
166enable_kernel_altivec(void) 214void enable_kernel_altivec(void)
167{ 215{
168 WARN_ON(preemptible()); 216 WARN_ON(preemptible());
169 217
@@ -177,19 +225,35 @@ enable_kernel_altivec(void)
177#endif /* __SMP __ */ 225#endif /* __SMP __ */
178} 226}
179EXPORT_SYMBOL(enable_kernel_altivec); 227EXPORT_SYMBOL(enable_kernel_altivec);
180#endif /* CONFIG_ALTIVEC */
181 228
182#ifdef CONFIG_SPE 229/*
183int 230 * Make sure the VMX/Altivec register state in the
184dump_spe(struct pt_regs *regs, elf_vrregset_t *evrregs) 231 * the thread_struct is up to date for task tsk.
232 */
233void flush_altivec_to_thread(struct task_struct *tsk)
185{ 234{
186 if (regs->msr & MSR_SPE) 235 if (tsk->thread.regs) {
187 giveup_spe(current); 236 preempt_disable();
188 /* We copy u32 evr[32] + u64 acc + u32 spefscr -> 35 */ 237 if (tsk->thread.regs->msr & MSR_VEC) {
189 memcpy(evrregs, &current->thread.evr[0], sizeof(u32) * 35); 238#ifdef CONFIG_SMP
239 BUG_ON(tsk != current);
240#endif
241 giveup_altivec(current);
242 }
243 preempt_enable();
244 }
245}
246
247int dump_altivec(struct pt_regs *regs, elf_vrregset_t *vrregs)
248{
249 if (regs->msr & MSR_VEC)
250 giveup_altivec(current);
251 memcpy(vrregs, &current->thread.vr[0], sizeof(*vrregs));
190 return 1; 252 return 1;
191} 253}
254#endif /* CONFIG_ALTIVEC */
192 255
256#ifdef CONFIG_SPE
193void 257void
194enable_kernel_spe(void) 258enable_kernel_spe(void)
195{ 259{
@@ -205,34 +269,30 @@ enable_kernel_spe(void)
205#endif /* __SMP __ */ 269#endif /* __SMP __ */
206} 270}
207EXPORT_SYMBOL(enable_kernel_spe); 271EXPORT_SYMBOL(enable_kernel_spe);
208#endif /* CONFIG_SPE */
209 272
210void 273void flush_spe_to_thread(struct task_struct *tsk)
211enable_kernel_fp(void)
212{ 274{
213 WARN_ON(preemptible()); 275 if (tsk->thread.regs) {
214 276 preempt_disable();
277 if (tsk->thread.regs->msr & MSR_SPE) {
215#ifdef CONFIG_SMP 278#ifdef CONFIG_SMP
216 if (current->thread.regs && (current->thread.regs->msr & MSR_FP)) 279 BUG_ON(tsk != current);
217 giveup_fpu(current); 280#endif
218 else 281 giveup_spe(current);
219 giveup_fpu(NULL); /* just enables FP for kernel */ 282 }
220#else 283 preempt_enable();
221 giveup_fpu(last_task_used_math); 284 }
222#endif /* CONFIG_SMP */
223} 285}
224EXPORT_SYMBOL(enable_kernel_fp);
225 286
226int 287int dump_spe(struct pt_regs *regs, elf_vrregset_t *evrregs)
227dump_task_fpu(struct task_struct *tsk, elf_fpregset_t *fpregs)
228{ 288{
229 preempt_disable(); 289 if (regs->msr & MSR_SPE)
230 if (tsk->thread.regs && (tsk->thread.regs->msr & MSR_FP)) 290 giveup_spe(current);
231 giveup_fpu(tsk); 291 /* We copy u32 evr[32] + u64 acc + u32 spefscr -> 35 */
232 preempt_enable(); 292 memcpy(evrregs, &current->thread.evr[0], sizeof(u32) * 35);
233 memcpy(fpregs, &tsk->thread.fpr[0], sizeof(*fpregs));
234 return 1; 293 return 1;
235} 294}
295#endif /* CONFIG_SPE */
236 296
237struct task_struct *__switch_to(struct task_struct *prev, 297struct task_struct *__switch_to(struct task_struct *prev,
238 struct task_struct *new) 298 struct task_struct *new)
@@ -287,11 +347,13 @@ struct task_struct *__switch_to(struct task_struct *prev,
287#endif /* CONFIG_SPE */ 347#endif /* CONFIG_SPE */
288#endif /* CONFIG_SMP */ 348#endif /* CONFIG_SMP */
289 349
350#ifdef CONFIG_ALTIVEC
290 /* Avoid the trap. On smp this this never happens since 351 /* Avoid the trap. On smp this this never happens since
291 * we don't set last_task_used_altivec -- Cort 352 * we don't set last_task_used_altivec -- Cort
292 */ 353 */
293 if (new->thread.regs && last_task_used_altivec == new) 354 if (new->thread.regs && last_task_used_altivec == new)
294 new->thread.regs->msr |= MSR_VEC; 355 new->thread.regs->msr |= MSR_VEC;
356#endif
295#ifdef CONFIG_SPE 357#ifdef CONFIG_SPE
296 /* Avoid the trap. On smp this this never happens since 358 /* Avoid the trap. On smp this this never happens since
297 * we don't set last_task_used_spe 359 * we don't set last_task_used_spe
@@ -482,7 +544,7 @@ void start_thread(struct pt_regs *regs, unsigned long nip, unsigned long sp)
482 last_task_used_spe = NULL; 544 last_task_used_spe = NULL;
483#endif 545#endif
484 memset(current->thread.fpr, 0, sizeof(current->thread.fpr)); 546 memset(current->thread.fpr, 0, sizeof(current->thread.fpr));
485 current->thread.fpscr = 0; 547 current->thread.fpscr.val = 0;
486#ifdef CONFIG_ALTIVEC 548#ifdef CONFIG_ALTIVEC
487 memset(current->thread.vr, 0, sizeof(current->thread.vr)); 549 memset(current->thread.vr, 0, sizeof(current->thread.vr));
488 memset(&current->thread.vscr, 0, sizeof(current->thread.vscr)); 550 memset(&current->thread.vscr, 0, sizeof(current->thread.vscr));
@@ -557,14 +619,16 @@ int sys_clone(unsigned long clone_flags, unsigned long usp,
557 return do_fork(clone_flags, usp, regs, 0, parent_tidp, child_tidp); 619 return do_fork(clone_flags, usp, regs, 0, parent_tidp, child_tidp);
558} 620}
559 621
560int sys_fork(int p1, int p2, int p3, int p4, int p5, int p6, 622int sys_fork(unsigned long p1, unsigned long p2, unsigned long p3,
623 unsigned long p4, unsigned long p5, unsigned long p6,
561 struct pt_regs *regs) 624 struct pt_regs *regs)
562{ 625{
563 CHECK_FULL_REGS(regs); 626 CHECK_FULL_REGS(regs);
564 return do_fork(SIGCHLD, regs->gpr[1], regs, 0, NULL, NULL); 627 return do_fork(SIGCHLD, regs->gpr[1], regs, 0, NULL, NULL);
565} 628}
566 629
567int sys_vfork(int p1, int p2, int p3, int p4, int p5, int p6, 630int sys_vfork(unsigned long p1, unsigned long p2, unsigned long p3,
631 unsigned long p4, unsigned long p5, unsigned long p6,
568 struct pt_regs *regs) 632 struct pt_regs *regs)
569{ 633{
570 CHECK_FULL_REGS(regs); 634 CHECK_FULL_REGS(regs);
diff --git a/arch/ppc/kernel/setup.c b/arch/ppc/kernel/setup.c
index 545cfd0fab59..6bcb85d2b7fd 100644
--- a/arch/ppc/kernel/setup.c
+++ b/arch/ppc/kernel/setup.c
@@ -71,7 +71,8 @@ struct ide_machdep_calls ppc_ide_md;
71unsigned long boot_mem_size; 71unsigned long boot_mem_size;
72 72
73unsigned long ISA_DMA_THRESHOLD; 73unsigned long ISA_DMA_THRESHOLD;
74unsigned long DMA_MODE_READ, DMA_MODE_WRITE; 74unsigned int DMA_MODE_READ;
75unsigned int DMA_MODE_WRITE;
75 76
76#ifdef CONFIG_PPC_MULTIPLATFORM 77#ifdef CONFIG_PPC_MULTIPLATFORM
77int _machine = 0; 78int _machine = 0;
@@ -82,8 +83,18 @@ extern void pmac_init(unsigned long r3, unsigned long r4,
82 unsigned long r5, unsigned long r6, unsigned long r7); 83 unsigned long r5, unsigned long r6, unsigned long r7);
83extern void chrp_init(unsigned long r3, unsigned long r4, 84extern void chrp_init(unsigned long r3, unsigned long r4,
84 unsigned long r5, unsigned long r6, unsigned long r7); 85 unsigned long r5, unsigned long r6, unsigned long r7);
86
87dev_t boot_dev;
85#endif /* CONFIG_PPC_MULTIPLATFORM */ 88#endif /* CONFIG_PPC_MULTIPLATFORM */
86 89
90int have_of;
91EXPORT_SYMBOL(have_of);
92
93#ifdef __DO_IRQ_CANON
94int ppc_do_canonicalize_irqs;
95EXPORT_SYMBOL(ppc_do_canonicalize_irqs);
96#endif
97
87#ifdef CONFIG_MAGIC_SYSRQ 98#ifdef CONFIG_MAGIC_SYSRQ
88unsigned long SYSRQ_KEY = 0x54; 99unsigned long SYSRQ_KEY = 0x54;
89#endif /* CONFIG_MAGIC_SYSRQ */ 100#endif /* CONFIG_MAGIC_SYSRQ */
@@ -185,18 +196,18 @@ int show_cpuinfo(struct seq_file *m, void *v)
185 seq_printf(m, "processor\t: %d\n", i); 196 seq_printf(m, "processor\t: %d\n", i);
186 seq_printf(m, "cpu\t\t: "); 197 seq_printf(m, "cpu\t\t: ");
187 198
188 if (cur_cpu_spec[i]->pvr_mask) 199 if (cur_cpu_spec->pvr_mask)
189 seq_printf(m, "%s", cur_cpu_spec[i]->cpu_name); 200 seq_printf(m, "%s", cur_cpu_spec->cpu_name);
190 else 201 else
191 seq_printf(m, "unknown (%08x)", pvr); 202 seq_printf(m, "unknown (%08x)", pvr);
192#ifdef CONFIG_ALTIVEC 203#ifdef CONFIG_ALTIVEC
193 if (cur_cpu_spec[i]->cpu_features & CPU_FTR_ALTIVEC) 204 if (cur_cpu_spec->cpu_features & CPU_FTR_ALTIVEC)
194 seq_printf(m, ", altivec supported"); 205 seq_printf(m, ", altivec supported");
195#endif 206#endif
196 seq_printf(m, "\n"); 207 seq_printf(m, "\n");
197 208
198#ifdef CONFIG_TAU 209#ifdef CONFIG_TAU
199 if (cur_cpu_spec[i]->cpu_features & CPU_FTR_TAU) { 210 if (cur_cpu_spec->cpu_features & CPU_FTR_TAU) {
200#ifdef CONFIG_TAU_AVERAGE 211#ifdef CONFIG_TAU_AVERAGE
201 /* more straightforward, but potentially misleading */ 212 /* more straightforward, but potentially misleading */
202 seq_printf(m, "temperature \t: %u C (uncalibrated)\n", 213 seq_printf(m, "temperature \t: %u C (uncalibrated)\n",
@@ -339,7 +350,7 @@ early_init(int r3, int r4, int r5)
339 * Assume here that all clock rates are the same in a 350 * Assume here that all clock rates are the same in a
340 * smp system. -- Cort 351 * smp system. -- Cort
341 */ 352 */
342int __openfirmware 353int
343of_show_percpuinfo(struct seq_file *m, int i) 354of_show_percpuinfo(struct seq_file *m, int i)
344{ 355{
345 struct device_node *cpu_node; 356 struct device_node *cpu_node;
@@ -404,11 +415,15 @@ platform_init(unsigned long r3, unsigned long r4, unsigned long r5,
404 _machine = _MACH_prep; 415 _machine = _MACH_prep;
405 } 416 }
406 417
418#ifdef CONFIG_PPC_PREP
407 /* not much more to do here, if prep */ 419 /* not much more to do here, if prep */
408 if (_machine == _MACH_prep) { 420 if (_machine == _MACH_prep) {
409 prep_init(r3, r4, r5, r6, r7); 421 prep_init(r3, r4, r5, r6, r7);
410 return; 422 return;
411 } 423 }
424#endif
425
426 have_of = 1;
412 427
413 /* prom_init has already been called from __start */ 428 /* prom_init has already been called from __start */
414 if (boot_infos) 429 if (boot_infos)
@@ -479,12 +494,16 @@ platform_init(unsigned long r3, unsigned long r4, unsigned long r5,
479#endif /* CONFIG_ADB */ 494#endif /* CONFIG_ADB */
480 495
481 switch (_machine) { 496 switch (_machine) {
497#ifdef CONFIG_PPC_PMAC
482 case _MACH_Pmac: 498 case _MACH_Pmac:
483 pmac_init(r3, r4, r5, r6, r7); 499 pmac_init(r3, r4, r5, r6, r7);
484 break; 500 break;
501#endif
502#ifdef CONFIG_PPC_CHRP
485 case _MACH_chrp: 503 case _MACH_chrp:
486 chrp_init(r3, r4, r5, r6, r7); 504 chrp_init(r3, r4, r5, r6, r7);
487 break; 505 break;
506#endif
488 } 507 }
489} 508}
490 509
@@ -721,7 +740,7 @@ void __init setup_arch(char **cmdline_p)
721#endif 740#endif
722 741
723#ifdef CONFIG_XMON 742#ifdef CONFIG_XMON
724 xmon_map_scc(); 743 xmon_init(1);
725 if (strstr(cmd_line, "xmon")) 744 if (strstr(cmd_line, "xmon"))
726 xmon(NULL); 745 xmon(NULL);
727#endif /* CONFIG_XMON */ 746#endif /* CONFIG_XMON */
@@ -745,12 +764,12 @@ void __init setup_arch(char **cmdline_p)
745 * for a possibly more accurate value. 764 * for a possibly more accurate value.
746 */ 765 */
747 if (cpu_has_feature(CPU_FTR_SPLIT_ID_CACHE)) { 766 if (cpu_has_feature(CPU_FTR_SPLIT_ID_CACHE)) {
748 dcache_bsize = cur_cpu_spec[0]->dcache_bsize; 767 dcache_bsize = cur_cpu_spec->dcache_bsize;
749 icache_bsize = cur_cpu_spec[0]->icache_bsize; 768 icache_bsize = cur_cpu_spec->icache_bsize;
750 ucache_bsize = 0; 769 ucache_bsize = 0;
751 } else 770 } else
752 ucache_bsize = dcache_bsize = icache_bsize 771 ucache_bsize = dcache_bsize = icache_bsize
753 = cur_cpu_spec[0]->dcache_bsize; 772 = cur_cpu_spec->dcache_bsize;
754 773
755 /* reboot on panic */ 774 /* reboot on panic */
756 panic_timeout = 180; 775 panic_timeout = 180;
diff --git a/arch/ppc/kernel/signal.c b/arch/ppc/kernel/signal.c
deleted file mode 100644
index 2244bf91e593..000000000000
--- a/arch/ppc/kernel/signal.c
+++ /dev/null
@@ -1,771 +0,0 @@
1/*
2 * arch/ppc/kernel/signal.c
3 *
4 * PowerPC version
5 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
6 *
7 * Derived from "arch/i386/kernel/signal.c"
8 * Copyright (C) 1991, 1992 Linus Torvalds
9 * 1997-11-28 Modified for POSIX.1b signals by Richard Henderson
10 *
11 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License
13 * as published by the Free Software Foundation; either version
14 * 2 of the License, or (at your option) any later version.
15 */
16
17#include <linux/sched.h>
18#include <linux/mm.h>
19#include <linux/smp.h>
20#include <linux/smp_lock.h>
21#include <linux/kernel.h>
22#include <linux/signal.h>
23#include <linux/errno.h>
24#include <linux/wait.h>
25#include <linux/ptrace.h>
26#include <linux/unistd.h>
27#include <linux/stddef.h>
28#include <linux/elf.h>
29#include <linux/tty.h>
30#include <linux/binfmts.h>
31#include <linux/suspend.h>
32#include <asm/ucontext.h>
33#include <asm/uaccess.h>
34#include <asm/pgtable.h>
35#include <asm/cacheflush.h>
36
37#undef DEBUG_SIG
38
39#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
40
41extern void sigreturn_exit(struct pt_regs *);
42
43#define GP_REGS_SIZE min(sizeof(elf_gregset_t), sizeof(struct pt_regs))
44
45int do_signal(sigset_t *oldset, struct pt_regs *regs);
46
47/*
48 * Atomically swap in the new signal mask, and wait for a signal.
49 */
50int
51sys_sigsuspend(old_sigset_t mask, int p2, int p3, int p4, int p6, int p7,
52 struct pt_regs *regs)
53{
54 sigset_t saveset;
55
56 mask &= _BLOCKABLE;
57 spin_lock_irq(&current->sighand->siglock);
58 saveset = current->blocked;
59 siginitset(&current->blocked, mask);
60 recalc_sigpending();
61 spin_unlock_irq(&current->sighand->siglock);
62
63 regs->result = -EINTR;
64 regs->gpr[3] = EINTR;
65 regs->ccr |= 0x10000000;
66 while (1) {
67 current->state = TASK_INTERRUPTIBLE;
68 schedule();
69 if (do_signal(&saveset, regs))
70 sigreturn_exit(regs);
71 }
72}
73
74int
75sys_rt_sigsuspend(sigset_t __user *unewset, size_t sigsetsize, int p3, int p4,
76 int p6, int p7, struct pt_regs *regs)
77{
78 sigset_t saveset, newset;
79
80 /* XXX: Don't preclude handling different sized sigset_t's. */
81 if (sigsetsize != sizeof(sigset_t))
82 return -EINVAL;
83
84 if (copy_from_user(&newset, unewset, sizeof(newset)))
85 return -EFAULT;
86 sigdelsetmask(&newset, ~_BLOCKABLE);
87
88 spin_lock_irq(&current->sighand->siglock);
89 saveset = current->blocked;
90 current->blocked = newset;
91 recalc_sigpending();
92 spin_unlock_irq(&current->sighand->siglock);
93
94 regs->result = -EINTR;
95 regs->gpr[3] = EINTR;
96 regs->ccr |= 0x10000000;
97 while (1) {
98 current->state = TASK_INTERRUPTIBLE;
99 schedule();
100 if (do_signal(&saveset, regs))
101 sigreturn_exit(regs);
102 }
103}
104
105
106int
107sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss, int r5,
108 int r6, int r7, int r8, struct pt_regs *regs)
109{
110 return do_sigaltstack(uss, uoss, regs->gpr[1]);
111}
112
113int
114sys_sigaction(int sig, const struct old_sigaction __user *act,
115 struct old_sigaction __user *oact)
116{
117 struct k_sigaction new_ka, old_ka;
118 int ret;
119
120 if (act) {
121 old_sigset_t mask;
122 if (!access_ok(VERIFY_READ, act, sizeof(*act)) ||
123 __get_user(new_ka.sa.sa_handler, &act->sa_handler) ||
124 __get_user(new_ka.sa.sa_restorer, &act->sa_restorer))
125 return -EFAULT;
126 __get_user(new_ka.sa.sa_flags, &act->sa_flags);
127 __get_user(mask, &act->sa_mask);
128 siginitset(&new_ka.sa.sa_mask, mask);
129 }
130
131 ret = do_sigaction(sig, (act? &new_ka: NULL), (oact? &old_ka: NULL));
132
133 if (!ret && oact) {
134 if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) ||
135 __put_user(old_ka.sa.sa_handler, &oact->sa_handler) ||
136 __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer))
137 return -EFAULT;
138 __put_user(old_ka.sa.sa_flags, &oact->sa_flags);
139 __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask);
140 }
141
142 return ret;
143}
144
145/*
146 * When we have signals to deliver, we set up on the
147 * user stack, going down from the original stack pointer:
148 * a sigregs struct
149 * a sigcontext struct
150 * a gap of __SIGNAL_FRAMESIZE bytes
151 *
152 * Each of these things must be a multiple of 16 bytes in size.
153 *
154 */
155struct sigregs {
156 struct mcontext mctx; /* all the register values */
157 /* Programs using the rs6000/xcoff abi can save up to 19 gp regs
158 and 18 fp regs below sp before decrementing it. */
159 int abigap[56];
160};
161
162/* We use the mc_pad field for the signal return trampoline. */
163#define tramp mc_pad
164
165/*
166 * When we have rt signals to deliver, we set up on the
167 * user stack, going down from the original stack pointer:
168 * one rt_sigframe struct (siginfo + ucontext + ABI gap)
169 * a gap of __SIGNAL_FRAMESIZE+16 bytes
170 * (the +16 is to get the siginfo and ucontext in the same
171 * positions as in older kernels).
172 *
173 * Each of these things must be a multiple of 16 bytes in size.
174 *
175 */
176struct rt_sigframe
177{
178 struct siginfo info;
179 struct ucontext uc;
180 /* Programs using the rs6000/xcoff abi can save up to 19 gp regs
181 and 18 fp regs below sp before decrementing it. */
182 int abigap[56];
183};
184
185/*
186 * Save the current user registers on the user stack.
187 * We only save the altivec/spe registers if the process has used
188 * altivec/spe instructions at some point.
189 */
190static int
191save_user_regs(struct pt_regs *regs, struct mcontext __user *frame, int sigret)
192{
193 /* save general and floating-point registers */
194 CHECK_FULL_REGS(regs);
195 preempt_disable();
196 if (regs->msr & MSR_FP)
197 giveup_fpu(current);
198#ifdef CONFIG_ALTIVEC
199 if (current->thread.used_vr && (regs->msr & MSR_VEC))
200 giveup_altivec(current);
201#endif /* CONFIG_ALTIVEC */
202#ifdef CONFIG_SPE
203 if (current->thread.used_spe && (regs->msr & MSR_SPE))
204 giveup_spe(current);
205#endif /* CONFIG_ALTIVEC */
206 preempt_enable();
207
208 if (__copy_to_user(&frame->mc_gregs, regs, GP_REGS_SIZE)
209 || __copy_to_user(&frame->mc_fregs, current->thread.fpr,
210 ELF_NFPREG * sizeof(double)))
211 return 1;
212
213 current->thread.fpscr = 0; /* turn off all fp exceptions */
214
215#ifdef CONFIG_ALTIVEC
216 /* save altivec registers */
217 if (current->thread.used_vr) {
218 if (__copy_to_user(&frame->mc_vregs, current->thread.vr,
219 ELF_NVRREG * sizeof(vector128)))
220 return 1;
221 /* set MSR_VEC in the saved MSR value to indicate that
222 frame->mc_vregs contains valid data */
223 if (__put_user(regs->msr | MSR_VEC, &frame->mc_gregs[PT_MSR]))
224 return 1;
225 }
226 /* else assert((regs->msr & MSR_VEC) == 0) */
227
228 /* We always copy to/from vrsave, it's 0 if we don't have or don't
229 * use altivec. Since VSCR only contains 32 bits saved in the least
230 * significant bits of a vector, we "cheat" and stuff VRSAVE in the
231 * most significant bits of that same vector. --BenH
232 */
233 if (__put_user(current->thread.vrsave, (u32 __user *)&frame->mc_vregs[32]))
234 return 1;
235#endif /* CONFIG_ALTIVEC */
236
237#ifdef CONFIG_SPE
238 /* save spe registers */
239 if (current->thread.used_spe) {
240 if (__copy_to_user(&frame->mc_vregs, current->thread.evr,
241 ELF_NEVRREG * sizeof(u32)))
242 return 1;
243 /* set MSR_SPE in the saved MSR value to indicate that
244 frame->mc_vregs contains valid data */
245 if (__put_user(regs->msr | MSR_SPE, &frame->mc_gregs[PT_MSR]))
246 return 1;
247 }
248 /* else assert((regs->msr & MSR_SPE) == 0) */
249
250 /* We always copy to/from spefscr */
251 if (__put_user(current->thread.spefscr, (u32 *)&frame->mc_vregs + ELF_NEVRREG))
252 return 1;
253#endif /* CONFIG_SPE */
254
255 if (sigret) {
256 /* Set up the sigreturn trampoline: li r0,sigret; sc */
257 if (__put_user(0x38000000UL + sigret, &frame->tramp[0])
258 || __put_user(0x44000002UL, &frame->tramp[1]))
259 return 1;
260 flush_icache_range((unsigned long) &frame->tramp[0],
261 (unsigned long) &frame->tramp[2]);
262 }
263
264 return 0;
265}
266
267/*
268 * Restore the current user register values from the user stack,
269 * (except for MSR).
270 */
271static int
272restore_user_regs(struct pt_regs *regs, struct mcontext __user *sr, int sig)
273{
274 unsigned long save_r2 = 0;
275#if defined(CONFIG_ALTIVEC) || defined(CONFIG_SPE)
276 unsigned long msr;
277#endif
278
279 /* backup/restore the TLS as we don't want it to be modified */
280 if (!sig)
281 save_r2 = regs->gpr[2];
282 /* copy up to but not including MSR */
283 if (__copy_from_user(regs, &sr->mc_gregs, PT_MSR * sizeof(elf_greg_t)))
284 return 1;
285 /* copy from orig_r3 (the word after the MSR) up to the end */
286 if (__copy_from_user(&regs->orig_gpr3, &sr->mc_gregs[PT_ORIG_R3],
287 GP_REGS_SIZE - PT_ORIG_R3 * sizeof(elf_greg_t)))
288 return 1;
289 if (!sig)
290 regs->gpr[2] = save_r2;
291
292 /* force the process to reload the FP registers from
293 current->thread when it next does FP instructions */
294 regs->msr &= ~(MSR_FP | MSR_FE0 | MSR_FE1);
295 if (__copy_from_user(current->thread.fpr, &sr->mc_fregs,
296 sizeof(sr->mc_fregs)))
297 return 1;
298
299#ifdef CONFIG_ALTIVEC
300 /* force the process to reload the altivec registers from
301 current->thread when it next does altivec instructions */
302 regs->msr &= ~MSR_VEC;
303 if (!__get_user(msr, &sr->mc_gregs[PT_MSR]) && (msr & MSR_VEC) != 0) {
304 /* restore altivec registers from the stack */
305 if (__copy_from_user(current->thread.vr, &sr->mc_vregs,
306 sizeof(sr->mc_vregs)))
307 return 1;
308 } else if (current->thread.used_vr)
309 memset(&current->thread.vr, 0, ELF_NVRREG * sizeof(vector128));
310
311 /* Always get VRSAVE back */
312 if (__get_user(current->thread.vrsave, (u32 __user *)&sr->mc_vregs[32]))
313 return 1;
314#endif /* CONFIG_ALTIVEC */
315
316#ifdef CONFIG_SPE
317 /* force the process to reload the spe registers from
318 current->thread when it next does spe instructions */
319 regs->msr &= ~MSR_SPE;
320 if (!__get_user(msr, &sr->mc_gregs[PT_MSR]) && (msr & MSR_SPE) != 0) {
321 /* restore spe registers from the stack */
322 if (__copy_from_user(current->thread.evr, &sr->mc_vregs,
323 ELF_NEVRREG * sizeof(u32)))
324 return 1;
325 } else if (current->thread.used_spe)
326 memset(&current->thread.evr, 0, ELF_NEVRREG * sizeof(u32));
327
328 /* Always get SPEFSCR back */
329 if (__get_user(current->thread.spefscr, (u32 *)&sr->mc_vregs + ELF_NEVRREG))
330 return 1;
331#endif /* CONFIG_SPE */
332
333#ifndef CONFIG_SMP
334 preempt_disable();
335 if (last_task_used_math == current)
336 last_task_used_math = NULL;
337 if (last_task_used_altivec == current)
338 last_task_used_altivec = NULL;
339 if (last_task_used_spe == current)
340 last_task_used_spe = NULL;
341 preempt_enable();
342#endif
343 return 0;
344}
345
346/*
347 * Restore the user process's signal mask
348 */
349static void
350restore_sigmask(sigset_t *set)
351{
352 sigdelsetmask(set, ~_BLOCKABLE);
353 spin_lock_irq(&current->sighand->siglock);
354 current->blocked = *set;
355 recalc_sigpending();
356 spin_unlock_irq(&current->sighand->siglock);
357}
358
359/*
360 * Set up a signal frame for a "real-time" signal handler
361 * (one which gets siginfo).
362 */
363static void
364handle_rt_signal(unsigned long sig, struct k_sigaction *ka,
365 siginfo_t *info, sigset_t *oldset, struct pt_regs * regs,
366 unsigned long newsp)
367{
368 struct rt_sigframe __user *rt_sf;
369 struct mcontext __user *frame;
370 unsigned long origsp = newsp;
371
372 /* Set up Signal Frame */
373 /* Put a Real Time Context onto stack */
374 newsp -= sizeof(*rt_sf);
375 rt_sf = (struct rt_sigframe __user *) newsp;
376
377 /* create a stack frame for the caller of the handler */
378 newsp -= __SIGNAL_FRAMESIZE + 16;
379
380 if (!access_ok(VERIFY_WRITE, (void __user *) newsp, origsp - newsp))
381 goto badframe;
382
383 /* Put the siginfo & fill in most of the ucontext */
384 if (copy_siginfo_to_user(&rt_sf->info, info)
385 || __put_user(0, &rt_sf->uc.uc_flags)
386 || __put_user(0, &rt_sf->uc.uc_link)
387 || __put_user(current->sas_ss_sp, &rt_sf->uc.uc_stack.ss_sp)
388 || __put_user(sas_ss_flags(regs->gpr[1]),
389 &rt_sf->uc.uc_stack.ss_flags)
390 || __put_user(current->sas_ss_size, &rt_sf->uc.uc_stack.ss_size)
391 || __put_user(&rt_sf->uc.uc_mcontext, &rt_sf->uc.uc_regs)
392 || __copy_to_user(&rt_sf->uc.uc_sigmask, oldset, sizeof(*oldset)))
393 goto badframe;
394
395 /* Save user registers on the stack */
396 frame = &rt_sf->uc.uc_mcontext;
397 if (save_user_regs(regs, frame, __NR_rt_sigreturn))
398 goto badframe;
399
400 if (put_user(regs->gpr[1], (unsigned long __user *)newsp))
401 goto badframe;
402 regs->gpr[1] = newsp;
403 regs->gpr[3] = sig;
404 regs->gpr[4] = (unsigned long) &rt_sf->info;
405 regs->gpr[5] = (unsigned long) &rt_sf->uc;
406 regs->gpr[6] = (unsigned long) rt_sf;
407 regs->nip = (unsigned long) ka->sa.sa_handler;
408 regs->link = (unsigned long) frame->tramp;
409 regs->trap = 0;
410
411 return;
412
413badframe:
414#ifdef DEBUG_SIG
415 printk("badframe in handle_rt_signal, regs=%p frame=%p newsp=%lx\n",
416 regs, frame, newsp);
417#endif
418 force_sigsegv(sig, current);
419}
420
421static int do_setcontext(struct ucontext __user *ucp, struct pt_regs *regs, int sig)
422{
423 sigset_t set;
424 struct mcontext __user *mcp;
425
426 if (__copy_from_user(&set, &ucp->uc_sigmask, sizeof(set))
427 || __get_user(mcp, &ucp->uc_regs))
428 return -EFAULT;
429 restore_sigmask(&set);
430 if (restore_user_regs(regs, mcp, sig))
431 return -EFAULT;
432
433 return 0;
434}
435
436int sys_swapcontext(struct ucontext __user *old_ctx,
437 struct ucontext __user *new_ctx,
438 int ctx_size, int r6, int r7, int r8, struct pt_regs *regs)
439{
440 unsigned char tmp;
441
442 /* Context size is for future use. Right now, we only make sure
443 * we are passed something we understand
444 */
445 if (ctx_size < sizeof(struct ucontext))
446 return -EINVAL;
447
448 if (old_ctx != NULL) {
449 if (!access_ok(VERIFY_WRITE, old_ctx, sizeof(*old_ctx))
450 || save_user_regs(regs, &old_ctx->uc_mcontext, 0)
451 || __copy_to_user(&old_ctx->uc_sigmask,
452 &current->blocked, sizeof(sigset_t))
453 || __put_user(&old_ctx->uc_mcontext, &old_ctx->uc_regs))
454 return -EFAULT;
455 }
456 if (new_ctx == NULL)
457 return 0;
458 if (!access_ok(VERIFY_READ, new_ctx, sizeof(*new_ctx))
459 || __get_user(tmp, (u8 __user *) new_ctx)
460 || __get_user(tmp, (u8 __user *) (new_ctx + 1) - 1))
461 return -EFAULT;
462
463 /*
464 * If we get a fault copying the context into the kernel's
465 * image of the user's registers, we can't just return -EFAULT
466 * because the user's registers will be corrupted. For instance
467 * the NIP value may have been updated but not some of the
468 * other registers. Given that we have done the access_ok
469 * and successfully read the first and last bytes of the region
470 * above, this should only happen in an out-of-memory situation
471 * or if another thread unmaps the region containing the context.
472 * We kill the task with a SIGSEGV in this situation.
473 */
474 if (do_setcontext(new_ctx, regs, 0))
475 do_exit(SIGSEGV);
476 sigreturn_exit(regs);
477 /* doesn't actually return back to here */
478 return 0;
479}
480
481int sys_rt_sigreturn(int r3, int r4, int r5, int r6, int r7, int r8,
482 struct pt_regs *regs)
483{
484 struct rt_sigframe __user *rt_sf;
485
486 /* Always make any pending restarted system calls return -EINTR */
487 current_thread_info()->restart_block.fn = do_no_restart_syscall;
488
489 rt_sf = (struct rt_sigframe __user *)
490 (regs->gpr[1] + __SIGNAL_FRAMESIZE + 16);
491 if (!access_ok(VERIFY_READ, rt_sf, sizeof(struct rt_sigframe)))
492 goto bad;
493 if (do_setcontext(&rt_sf->uc, regs, 1))
494 goto bad;
495
496 /*
497 * It's not clear whether or why it is desirable to save the
498 * sigaltstack setting on signal delivery and restore it on
499 * signal return. But other architectures do this and we have
500 * always done it up until now so it is probably better not to
501 * change it. -- paulus
502 */
503 do_sigaltstack(&rt_sf->uc.uc_stack, NULL, regs->gpr[1]);
504
505 sigreturn_exit(regs); /* doesn't return here */
506 return 0;
507
508 bad:
509 force_sig(SIGSEGV, current);
510 return 0;
511}
512
513int sys_debug_setcontext(struct ucontext __user *ctx,
514 int ndbg, struct sig_dbg_op __user *dbg,
515 int r6, int r7, int r8,
516 struct pt_regs *regs)
517{
518 struct sig_dbg_op op;
519 int i;
520 unsigned long new_msr = regs->msr;
521#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
522 unsigned long new_dbcr0 = current->thread.dbcr0;
523#endif
524
525 for (i=0; i<ndbg; i++) {
526 if (__copy_from_user(&op, dbg, sizeof(op)))
527 return -EFAULT;
528 switch (op.dbg_type) {
529 case SIG_DBG_SINGLE_STEPPING:
530#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
531 if (op.dbg_value) {
532 new_msr |= MSR_DE;
533 new_dbcr0 |= (DBCR0_IDM | DBCR0_IC);
534 } else {
535 new_msr &= ~MSR_DE;
536 new_dbcr0 &= ~(DBCR0_IDM | DBCR0_IC);
537 }
538#else
539 if (op.dbg_value)
540 new_msr |= MSR_SE;
541 else
542 new_msr &= ~MSR_SE;
543#endif
544 break;
545 case SIG_DBG_BRANCH_TRACING:
546#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
547 return -EINVAL;
548#else
549 if (op.dbg_value)
550 new_msr |= MSR_BE;
551 else
552 new_msr &= ~MSR_BE;
553#endif
554 break;
555
556 default:
557 return -EINVAL;
558 }
559 }
560
561 /* We wait until here to actually install the values in the
562 registers so if we fail in the above loop, it will not
563 affect the contents of these registers. After this point,
564 failure is a problem, anyway, and it's very unlikely unless
565 the user is really doing something wrong. */
566 regs->msr = new_msr;
567#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
568 current->thread.dbcr0 = new_dbcr0;
569#endif
570
571 /*
572 * If we get a fault copying the context into the kernel's
573 * image of the user's registers, we can't just return -EFAULT
574 * because the user's registers will be corrupted. For instance
575 * the NIP value may have been updated but not some of the
576 * other registers. Given that we have done the access_ok
577 * and successfully read the first and last bytes of the region
578 * above, this should only happen in an out-of-memory situation
579 * or if another thread unmaps the region containing the context.
580 * We kill the task with a SIGSEGV in this situation.
581 */
582 if (do_setcontext(ctx, regs, 1)) {
583 force_sig(SIGSEGV, current);
584 goto out;
585 }
586
587 /*
588 * It's not clear whether or why it is desirable to save the
589 * sigaltstack setting on signal delivery and restore it on
590 * signal return. But other architectures do this and we have
591 * always done it up until now so it is probably better not to
592 * change it. -- paulus
593 */
594 do_sigaltstack(&ctx->uc_stack, NULL, regs->gpr[1]);
595
596 sigreturn_exit(regs);
597 /* doesn't actually return back to here */
598
599 out:
600 return 0;
601}
602
603/*
604 * OK, we're invoking a handler
605 */
606static void
607handle_signal(unsigned long sig, struct k_sigaction *ka,
608 siginfo_t *info, sigset_t *oldset, struct pt_regs * regs,
609 unsigned long newsp)
610{
611 struct sigcontext __user *sc;
612 struct sigregs __user *frame;
613 unsigned long origsp = newsp;
614
615 /* Set up Signal Frame */
616 newsp -= sizeof(struct sigregs);
617 frame = (struct sigregs __user *) newsp;
618
619 /* Put a sigcontext on the stack */
620 newsp -= sizeof(*sc);
621 sc = (struct sigcontext __user *) newsp;
622
623 /* create a stack frame for the caller of the handler */
624 newsp -= __SIGNAL_FRAMESIZE;
625
626 if (!access_ok(VERIFY_WRITE, (void __user *) newsp, origsp - newsp))
627 goto badframe;
628
629#if _NSIG != 64
630#error "Please adjust handle_signal()"
631#endif
632 if (__put_user((unsigned long) ka->sa.sa_handler, &sc->handler)
633 || __put_user(oldset->sig[0], &sc->oldmask)
634 || __put_user(oldset->sig[1], &sc->_unused[3])
635 || __put_user((struct pt_regs __user *)frame, &sc->regs)
636 || __put_user(sig, &sc->signal))
637 goto badframe;
638
639 if (save_user_regs(regs, &frame->mctx, __NR_sigreturn))
640 goto badframe;
641
642 if (put_user(regs->gpr[1], (unsigned long __user *)newsp))
643 goto badframe;
644 regs->gpr[1] = newsp;
645 regs->gpr[3] = sig;
646 regs->gpr[4] = (unsigned long) sc;
647 regs->nip = (unsigned long) ka->sa.sa_handler;
648 regs->link = (unsigned long) frame->mctx.tramp;
649 regs->trap = 0;
650
651 return;
652
653badframe:
654#ifdef DEBUG_SIG
655 printk("badframe in handle_signal, regs=%p frame=%p newsp=%lx\n",
656 regs, frame, newsp);
657#endif
658 force_sigsegv(sig, current);
659}
660
661/*
662 * Do a signal return; undo the signal stack.
663 */
664int sys_sigreturn(int r3, int r4, int r5, int r6, int r7, int r8,
665 struct pt_regs *regs)
666{
667 struct sigcontext __user *sc;
668 struct sigcontext sigctx;
669 struct mcontext __user *sr;
670 sigset_t set;
671
672 /* Always make any pending restarted system calls return -EINTR */
673 current_thread_info()->restart_block.fn = do_no_restart_syscall;
674
675 sc = (struct sigcontext __user *)(regs->gpr[1] + __SIGNAL_FRAMESIZE);
676 if (copy_from_user(&sigctx, sc, sizeof(sigctx)))
677 goto badframe;
678
679 set.sig[0] = sigctx.oldmask;
680 set.sig[1] = sigctx._unused[3];
681 restore_sigmask(&set);
682
683 sr = (struct mcontext __user *) sigctx.regs;
684 if (!access_ok(VERIFY_READ, sr, sizeof(*sr))
685 || restore_user_regs(regs, sr, 1))
686 goto badframe;
687
688 sigreturn_exit(regs); /* doesn't return */
689 return 0;
690
691badframe:
692 force_sig(SIGSEGV, current);
693 return 0;
694}
695
696/*
697 * Note that 'init' is a special process: it doesn't get signals it doesn't
698 * want to handle. Thus you cannot kill init even with a SIGKILL even by
699 * mistake.
700 */
701int do_signal(sigset_t *oldset, struct pt_regs *regs)
702{
703 siginfo_t info;
704 struct k_sigaction ka;
705 unsigned long frame, newsp;
706 int signr, ret;
707
708 if (try_to_freeze()) {
709 signr = 0;
710 if (!signal_pending(current))
711 goto no_signal;
712 }
713
714 if (!oldset)
715 oldset = &current->blocked;
716
717 newsp = frame = 0;
718
719 signr = get_signal_to_deliver(&info, &ka, regs, NULL);
720 no_signal:
721 if (TRAP(regs) == 0x0C00 /* System Call! */
722 && regs->ccr & 0x10000000 /* error signalled */
723 && ((ret = regs->gpr[3]) == ERESTARTSYS
724 || ret == ERESTARTNOHAND || ret == ERESTARTNOINTR
725 || ret == ERESTART_RESTARTBLOCK)) {
726
727 if (signr > 0
728 && (ret == ERESTARTNOHAND || ret == ERESTART_RESTARTBLOCK
729 || (ret == ERESTARTSYS
730 && !(ka.sa.sa_flags & SA_RESTART)))) {
731 /* make the system call return an EINTR error */
732 regs->result = -EINTR;
733 regs->gpr[3] = EINTR;
734 /* note that the cr0.SO bit is already set */
735 } else {
736 regs->nip -= 4; /* Back up & retry system call */
737 regs->result = 0;
738 regs->trap = 0;
739 if (ret == ERESTART_RESTARTBLOCK)
740 regs->gpr[0] = __NR_restart_syscall;
741 else
742 regs->gpr[3] = regs->orig_gpr3;
743 }
744 }
745
746 if (signr == 0)
747 return 0; /* no signals delivered */
748
749 if ((ka.sa.sa_flags & SA_ONSTACK) && current->sas_ss_size
750 && !on_sig_stack(regs->gpr[1]))
751 newsp = current->sas_ss_sp + current->sas_ss_size;
752 else
753 newsp = regs->gpr[1];
754 newsp &= ~0xfUL;
755
756 /* Whee! Actually deliver the signal. */
757 if (ka.sa.sa_flags & SA_SIGINFO)
758 handle_rt_signal(signr, &ka, &info, oldset, regs, newsp);
759 else
760 handle_signal(signr, &ka, &info, oldset, regs, newsp);
761
762 spin_lock_irq(&current->sighand->siglock);
763 sigorsets(&current->blocked,&current->blocked,&ka.sa.sa_mask);
764 if (!(ka.sa.sa_flags & SA_NODEFER))
765 sigaddset(&current->blocked, signr);
766 recalc_sigpending();
767 spin_unlock_irq(&current->sighand->siglock);
768
769 return 1;
770}
771
diff --git a/arch/ppc/kernel/smp.c b/arch/ppc/kernel/smp.c
index 726fe7ce1747..bc5bf1124836 100644
--- a/arch/ppc/kernel/smp.c
+++ b/arch/ppc/kernel/smp.c
@@ -34,11 +34,11 @@
34#include <asm/thread_info.h> 34#include <asm/thread_info.h>
35#include <asm/tlbflush.h> 35#include <asm/tlbflush.h>
36#include <asm/xmon.h> 36#include <asm/xmon.h>
37#include <asm/machdep.h>
37 38
38volatile int smp_commenced; 39volatile int smp_commenced;
39int smp_tb_synchronized; 40int smp_tb_synchronized;
40struct cpuinfo_PPC cpu_data[NR_CPUS]; 41struct cpuinfo_PPC cpu_data[NR_CPUS];
41struct klock_info_struct klock_info = { KLOCK_CLEAR, 0 };
42atomic_t ipi_recv; 42atomic_t ipi_recv;
43atomic_t ipi_sent; 43atomic_t ipi_sent;
44cpumask_t cpu_online_map; 44cpumask_t cpu_online_map;
@@ -51,7 +51,7 @@ EXPORT_SYMBOL(cpu_online_map);
51EXPORT_SYMBOL(cpu_possible_map); 51EXPORT_SYMBOL(cpu_possible_map);
52 52
53/* SMP operations for this machine */ 53/* SMP operations for this machine */
54static struct smp_ops_t *smp_ops; 54struct smp_ops_t *smp_ops;
55 55
56/* all cpu mappings are 1-1 -- Cort */ 56/* all cpu mappings are 1-1 -- Cort */
57volatile unsigned long cpu_callin_map[NR_CPUS]; 57volatile unsigned long cpu_callin_map[NR_CPUS];
@@ -74,11 +74,11 @@ extern void __save_cpu_setup(void);
74#define PPC_MSG_XMON_BREAK 3 74#define PPC_MSG_XMON_BREAK 3
75 75
76static inline void 76static inline void
77smp_message_pass(int target, int msg, unsigned long data, int wait) 77smp_message_pass(int target, int msg)
78{ 78{
79 if (smp_ops){ 79 if (smp_ops) {
80 atomic_inc(&ipi_sent); 80 atomic_inc(&ipi_sent);
81 smp_ops->message_pass(target,msg,data,wait); 81 smp_ops->message_pass(target, msg);
82 } 82 }
83} 83}
84 84
@@ -119,7 +119,7 @@ void smp_message_recv(int msg, struct pt_regs *regs)
119void smp_send_tlb_invalidate(int cpu) 119void smp_send_tlb_invalidate(int cpu)
120{ 120{
121 if ( PVR_VER(mfspr(SPRN_PVR)) == 8 ) 121 if ( PVR_VER(mfspr(SPRN_PVR)) == 8 )
122 smp_message_pass(MSG_ALL_BUT_SELF, PPC_MSG_INVALIDATE_TLB, 0, 0); 122 smp_message_pass(MSG_ALL_BUT_SELF, PPC_MSG_INVALIDATE_TLB);
123} 123}
124 124
125void smp_send_reschedule(int cpu) 125void smp_send_reschedule(int cpu)
@@ -135,13 +135,13 @@ void smp_send_reschedule(int cpu)
135 */ 135 */
136 /* This is only used if `cpu' is running an idle task, 136 /* This is only used if `cpu' is running an idle task,
137 so it will reschedule itself anyway... */ 137 so it will reschedule itself anyway... */
138 smp_message_pass(cpu, PPC_MSG_RESCHEDULE, 0, 0); 138 smp_message_pass(cpu, PPC_MSG_RESCHEDULE);
139} 139}
140 140
141#ifdef CONFIG_XMON 141#ifdef CONFIG_XMON
142void smp_send_xmon_break(int cpu) 142void smp_send_xmon_break(int cpu)
143{ 143{
144 smp_message_pass(cpu, PPC_MSG_XMON_BREAK, 0, 0); 144 smp_message_pass(cpu, PPC_MSG_XMON_BREAK);
145} 145}
146#endif /* CONFIG_XMON */ 146#endif /* CONFIG_XMON */
147 147
@@ -224,7 +224,7 @@ static int __smp_call_function(void (*func) (void *info), void *info,
224 spin_lock(&call_lock); 224 spin_lock(&call_lock);
225 call_data = &data; 225 call_data = &data;
226 /* Send a message to all other CPUs and wait for them to respond */ 226 /* Send a message to all other CPUs and wait for them to respond */
227 smp_message_pass(target, PPC_MSG_CALL_FUNCTION, 0, 0); 227 smp_message_pass(target, PPC_MSG_CALL_FUNCTION);
228 228
229 /* Wait for response */ 229 /* Wait for response */
230 timeout = 1000000; 230 timeout = 1000000;
@@ -294,7 +294,6 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
294 smp_store_cpu_info(smp_processor_id()); 294 smp_store_cpu_info(smp_processor_id());
295 cpu_callin_map[smp_processor_id()] = 1; 295 cpu_callin_map[smp_processor_id()] = 1;
296 296
297 smp_ops = ppc_md.smp_ops;
298 if (smp_ops == NULL) { 297 if (smp_ops == NULL) {
299 printk("SMP not supported on this machine.\n"); 298 printk("SMP not supported on this machine.\n");
300 return; 299 return;
@@ -308,9 +307,6 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
308 /* Backup CPU 0 state */ 307 /* Backup CPU 0 state */
309 __save_cpu_setup(); 308 __save_cpu_setup();
310 309
311 if (smp_ops->space_timers)
312 smp_ops->space_timers(num_cpus);
313
314 for_each_cpu(cpu) { 310 for_each_cpu(cpu) {
315 if (cpu == smp_processor_id()) 311 if (cpu == smp_processor_id())
316 continue; 312 continue;
diff --git a/arch/ppc/kernel/syscalls.c b/arch/ppc/kernel/syscalls.c
deleted file mode 100644
index 127f040de9de..000000000000
--- a/arch/ppc/kernel/syscalls.c
+++ /dev/null
@@ -1,268 +0,0 @@
1/*
2 * arch/ppc/kernel/sys_ppc.c
3 *
4 * PowerPC version
5 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
6 *
7 * Derived from "arch/i386/kernel/sys_i386.c"
8 * Adapted from the i386 version by Gary Thomas
9 * Modified by Cort Dougan (cort@cs.nmt.edu)
10 * and Paul Mackerras (paulus@cs.anu.edu.au).
11 *
12 * This file contains various random system calls that
13 * have a non-standard calling sequence on the Linux/PPC
14 * platform.
15 *
16 * This program is free software; you can redistribute it and/or
17 * modify it under the terms of the GNU General Public License
18 * as published by the Free Software Foundation; either version
19 * 2 of the License, or (at your option) any later version.
20 *
21 */
22
23#include <linux/errno.h>
24#include <linux/sched.h>
25#include <linux/mm.h>
26#include <linux/smp.h>
27#include <linux/smp_lock.h>
28#include <linux/sem.h>
29#include <linux/msg.h>
30#include <linux/shm.h>
31#include <linux/stat.h>
32#include <linux/syscalls.h>
33#include <linux/mman.h>
34#include <linux/sys.h>
35#include <linux/ipc.h>
36#include <linux/utsname.h>
37#include <linux/file.h>
38#include <linux/unistd.h>
39
40#include <asm/uaccess.h>
41#include <asm/ipc.h>
42#include <asm/semaphore.h>
43
44
45/*
46 * sys_ipc() is the de-multiplexer for the SysV IPC calls..
47 *
48 * This is really horribly ugly.
49 */
50int
51sys_ipc (uint call, int first, int second, int third, void __user *ptr, long fifth)
52{
53 int version, ret;
54
55 version = call >> 16; /* hack for backward compatibility */
56 call &= 0xffff;
57
58 ret = -ENOSYS;
59 switch (call) {
60 case SEMOP:
61 ret = sys_semtimedop (first, (struct sembuf __user *)ptr,
62 second, NULL);
63 break;
64 case SEMTIMEDOP:
65 ret = sys_semtimedop (first, (struct sembuf __user *)ptr,
66 second, (const struct timespec __user *) fifth);
67 break;
68 case SEMGET:
69 ret = sys_semget (first, second, third);
70 break;
71 case SEMCTL: {
72 union semun fourth;
73
74 if (!ptr)
75 break;
76 if ((ret = access_ok(VERIFY_READ, ptr, sizeof(long)) ? 0 : -EFAULT)
77 || (ret = get_user(fourth.__pad, (void __user *__user *)ptr)))
78 break;
79 ret = sys_semctl (first, second, third, fourth);
80 break;
81 }
82 case MSGSND:
83 ret = sys_msgsnd (first, (struct msgbuf __user *) ptr, second, third);
84 break;
85 case MSGRCV:
86 switch (version) {
87 case 0: {
88 struct ipc_kludge tmp;
89
90 if (!ptr)
91 break;
92 if ((ret = access_ok(VERIFY_READ, ptr, sizeof(tmp)) ? 0 : -EFAULT)
93 || (ret = copy_from_user(&tmp,
94 (struct ipc_kludge __user *) ptr,
95 sizeof (tmp)) ? -EFAULT : 0))
96 break;
97 ret = sys_msgrcv (first, tmp.msgp, second, tmp.msgtyp,
98 third);
99 break;
100 }
101 default:
102 ret = sys_msgrcv (first, (struct msgbuf __user *) ptr,
103 second, fifth, third);
104 break;
105 }
106 break;
107 case MSGGET:
108 ret = sys_msgget ((key_t) first, second);
109 break;
110 case MSGCTL:
111 ret = sys_msgctl (first, second, (struct msqid_ds __user *) ptr);
112 break;
113 case SHMAT: {
114 ulong raddr;
115
116 if ((ret = access_ok(VERIFY_WRITE, (ulong __user *) third,
117 sizeof(ulong)) ? 0 : -EFAULT))
118 break;
119 ret = do_shmat (first, (char __user *) ptr, second, &raddr);
120 if (ret)
121 break;
122 ret = put_user (raddr, (ulong __user *) third);
123 break;
124 }
125 case SHMDT:
126 ret = sys_shmdt ((char __user *)ptr);
127 break;
128 case SHMGET:
129 ret = sys_shmget (first, second, third);
130 break;
131 case SHMCTL:
132 ret = sys_shmctl (first, second, (struct shmid_ds __user *) ptr);
133 break;
134 }
135
136 return ret;
137}
138
139/*
140 * sys_pipe() is the normal C calling standard for creating
141 * a pipe. It's not the way unix traditionally does this, though.
142 */
143int sys_pipe(int __user *fildes)
144{
145 int fd[2];
146 int error;
147
148 error = do_pipe(fd);
149 if (!error) {
150 if (copy_to_user(fildes, fd, 2*sizeof(int)))
151 error = -EFAULT;
152 }
153 return error;
154}
155
156static inline unsigned long
157do_mmap2(unsigned long addr, size_t len,
158 unsigned long prot, unsigned long flags,
159 unsigned long fd, unsigned long pgoff)
160{
161 struct file * file = NULL;
162 int ret = -EBADF;
163
164 flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
165 if (!(flags & MAP_ANONYMOUS)) {
166 if (!(file = fget(fd)))
167 goto out;
168 }
169
170 down_write(&current->mm->mmap_sem);
171 ret = do_mmap_pgoff(file, addr, len, prot, flags, pgoff);
172 up_write(&current->mm->mmap_sem);
173 if (file)
174 fput(file);
175out:
176 return ret;
177}
178
179unsigned long sys_mmap2(unsigned long addr, size_t len,
180 unsigned long prot, unsigned long flags,
181 unsigned long fd, unsigned long pgoff)
182{
183 return do_mmap2(addr, len, prot, flags, fd, pgoff);
184}
185
186unsigned long sys_mmap(unsigned long addr, size_t len,
187 unsigned long prot, unsigned long flags,
188 unsigned long fd, off_t offset)
189{
190 int err = -EINVAL;
191
192 if (offset & ~PAGE_MASK)
193 goto out;
194
195 err = do_mmap2(addr, len, prot, flags, fd, offset >> PAGE_SHIFT);
196out:
197 return err;
198}
199
200/*
201 * Due to some executables calling the wrong select we sometimes
202 * get wrong args. This determines how the args are being passed
203 * (a single ptr to them all args passed) then calls
204 * sys_select() with the appropriate args. -- Cort
205 */
206int
207ppc_select(int n, fd_set __user *inp, fd_set __user *outp, fd_set __user *exp, struct timeval __user *tvp)
208{
209 if ( (unsigned long)n >= 4096 )
210 {
211 unsigned long __user *buffer = (unsigned long __user *)n;
212 if (!access_ok(VERIFY_READ, buffer, 5*sizeof(unsigned long))
213 || __get_user(n, buffer)
214 || __get_user(inp, ((fd_set __user * __user *)(buffer+1)))
215 || __get_user(outp, ((fd_set __user * __user *)(buffer+2)))
216 || __get_user(exp, ((fd_set __user * __user *)(buffer+3)))
217 || __get_user(tvp, ((struct timeval __user * __user *)(buffer+4))))
218 return -EFAULT;
219 }
220 return sys_select(n, inp, outp, exp, tvp);
221}
222
223int sys_uname(struct old_utsname __user * name)
224{
225 int err = -EFAULT;
226
227 down_read(&uts_sem);
228 if (name && !copy_to_user(name, &system_utsname, sizeof (*name)))
229 err = 0;
230 up_read(&uts_sem);
231 return err;
232}
233
234int sys_olduname(struct oldold_utsname __user * name)
235{
236 int error;
237
238 if (!name)
239 return -EFAULT;
240 if (!access_ok(VERIFY_WRITE,name,sizeof(struct oldold_utsname)))
241 return -EFAULT;
242
243 down_read(&uts_sem);
244 error = __copy_to_user(&name->sysname,&system_utsname.sysname,__OLD_UTS_LEN);
245 error -= __put_user(0,name->sysname+__OLD_UTS_LEN);
246 error -= __copy_to_user(&name->nodename,&system_utsname.nodename,__OLD_UTS_LEN);
247 error -= __put_user(0,name->nodename+__OLD_UTS_LEN);
248 error -= __copy_to_user(&name->release,&system_utsname.release,__OLD_UTS_LEN);
249 error -= __put_user(0,name->release+__OLD_UTS_LEN);
250 error -= __copy_to_user(&name->version,&system_utsname.version,__OLD_UTS_LEN);
251 error -= __put_user(0,name->version+__OLD_UTS_LEN);
252 error -= __copy_to_user(&name->machine,&system_utsname.machine,__OLD_UTS_LEN);
253 error = __put_user(0,name->machine+__OLD_UTS_LEN);
254 up_read(&uts_sem);
255
256 error = error ? -EFAULT : 0;
257 return error;
258}
259
260/*
261 * We put the arguments in a different order so we only use 6
262 * registers for arguments, rather than 7 as sys_fadvise64_64 needs
263 * (because `offset' goes in r5/r6).
264 */
265long ppc_fadvise64_64(int fd, int advice, loff_t offset, loff_t len)
266{
267 return sys_fadvise64_64(fd, offset, len, advice);
268}
diff --git a/arch/ppc/kernel/time.c b/arch/ppc/kernel/time.c
index 22d7fd1e0aea..53ea723af60a 100644
--- a/arch/ppc/kernel/time.c
+++ b/arch/ppc/kernel/time.c
@@ -66,11 +66,6 @@
66 66
67#include <asm/time.h> 67#include <asm/time.h>
68 68
69/* XXX false sharing with below? */
70u64 jiffies_64 = INITIAL_JIFFIES;
71
72EXPORT_SYMBOL(jiffies_64);
73
74unsigned long disarm_decr[NR_CPUS]; 69unsigned long disarm_decr[NR_CPUS];
75 70
76extern struct timezone sys_tz; 71extern struct timezone sys_tz;
@@ -121,6 +116,15 @@ unsigned long profile_pc(struct pt_regs *regs)
121EXPORT_SYMBOL(profile_pc); 116EXPORT_SYMBOL(profile_pc);
122#endif 117#endif
123 118
119void wakeup_decrementer(void)
120{
121 set_dec(tb_ticks_per_jiffy);
122 /* No currently-supported powerbook has a 601,
123 * so use get_tbl, not native
124 */
125 last_jiffy_stamp(0) = tb_last_stamp = get_tbl();
126}
127
124/* 128/*
125 * timer_interrupt - gets called when the decrementer overflows, 129 * timer_interrupt - gets called when the decrementer overflows,
126 * with interrupts disabled. 130 * with interrupts disabled.
diff --git a/arch/ppc/kernel/traps.c b/arch/ppc/kernel/traps.c
index 961ede87be72..f265b81e7008 100644
--- a/arch/ppc/kernel/traps.c
+++ b/arch/ppc/kernel/traps.c
@@ -41,9 +41,14 @@
41#ifdef CONFIG_PMAC_BACKLIGHT 41#ifdef CONFIG_PMAC_BACKLIGHT
42#include <asm/backlight.h> 42#include <asm/backlight.h>
43#endif 43#endif
44#include <asm/perfmon.h> 44#include <asm/pmc.h>
45 45
46#ifdef CONFIG_XMON 46#ifdef CONFIG_XMON
47extern int xmon_bpt(struct pt_regs *regs);
48extern int xmon_sstep(struct pt_regs *regs);
49extern int xmon_iabr_match(struct pt_regs *regs);
50extern int xmon_dabr_match(struct pt_regs *regs);
51
47void (*debugger)(struct pt_regs *regs) = xmon; 52void (*debugger)(struct pt_regs *regs) = xmon;
48int (*debugger_bpt)(struct pt_regs *regs) = xmon_bpt; 53int (*debugger_bpt)(struct pt_regs *regs) = xmon_bpt;
49int (*debugger_sstep)(struct pt_regs *regs) = xmon_sstep; 54int (*debugger_sstep)(struct pt_regs *regs) = xmon_sstep;
@@ -74,7 +79,7 @@ void (*debugger_fault_handler)(struct pt_regs *regs);
74 79
75DEFINE_SPINLOCK(die_lock); 80DEFINE_SPINLOCK(die_lock);
76 81
77void die(const char * str, struct pt_regs * fp, long err) 82int die(const char * str, struct pt_regs * fp, long err)
78{ 83{
79 static int die_counter; 84 static int die_counter;
80 int nl = 0; 85 int nl = 0;
@@ -232,7 +237,7 @@ platform_machine_check(struct pt_regs *regs)
232{ 237{
233} 238}
234 239
235void MachineCheckException(struct pt_regs *regs) 240void machine_check_exception(struct pt_regs *regs)
236{ 241{
237 unsigned long reason = get_mc_reason(regs); 242 unsigned long reason = get_mc_reason(regs);
238 243
@@ -393,14 +398,14 @@ void SMIException(struct pt_regs *regs)
393#endif 398#endif
394} 399}
395 400
396void UnknownException(struct pt_regs *regs) 401void unknown_exception(struct pt_regs *regs)
397{ 402{
398 printk("Bad trap at PC: %lx, MSR: %lx, vector=%lx %s\n", 403 printk("Bad trap at PC: %lx, MSR: %lx, vector=%lx %s\n",
399 regs->nip, regs->msr, regs->trap, print_tainted()); 404 regs->nip, regs->msr, regs->trap, print_tainted());
400 _exception(SIGTRAP, regs, 0, 0); 405 _exception(SIGTRAP, regs, 0, 0);
401} 406}
402 407
403void InstructionBreakpoint(struct pt_regs *regs) 408void instruction_breakpoint_exception(struct pt_regs *regs)
404{ 409{
405 if (debugger_iabr_match(regs)) 410 if (debugger_iabr_match(regs))
406 return; 411 return;
@@ -575,7 +580,7 @@ extern struct bug_entry __start___bug_table[], __stop___bug_table[];
575#define module_find_bug(x) NULL 580#define module_find_bug(x) NULL
576#endif 581#endif
577 582
578static struct bug_entry *find_bug(unsigned long bugaddr) 583struct bug_entry *find_bug(unsigned long bugaddr)
579{ 584{
580 struct bug_entry *bug; 585 struct bug_entry *bug;
581 586
@@ -622,7 +627,7 @@ int check_bug_trap(struct pt_regs *regs)
622 return 0; 627 return 0;
623} 628}
624 629
625void ProgramCheckException(struct pt_regs *regs) 630void program_check_exception(struct pt_regs *regs)
626{ 631{
627 unsigned int reason = get_reason(regs); 632 unsigned int reason = get_reason(regs);
628 extern int do_mathemu(struct pt_regs *regs); 633 extern int do_mathemu(struct pt_regs *regs);
@@ -654,7 +659,7 @@ void ProgramCheckException(struct pt_regs *regs)
654 giveup_fpu(current); 659 giveup_fpu(current);
655 preempt_enable(); 660 preempt_enable();
656 661
657 fpscr = current->thread.fpscr; 662 fpscr = current->thread.fpscr.val;
658 fpscr &= fpscr << 22; /* mask summary bits with enables */ 663 fpscr &= fpscr << 22; /* mask summary bits with enables */
659 if (fpscr & FPSCR_VX) 664 if (fpscr & FPSCR_VX)
660 code = FPE_FLTINV; 665 code = FPE_FLTINV;
@@ -701,7 +706,7 @@ void ProgramCheckException(struct pt_regs *regs)
701 _exception(SIGILL, regs, ILL_ILLOPC, regs->nip); 706 _exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
702} 707}
703 708
704void SingleStepException(struct pt_regs *regs) 709void single_step_exception(struct pt_regs *regs)
705{ 710{
706 regs->msr &= ~(MSR_SE | MSR_BE); /* Turn off 'trace' bits */ 711 regs->msr &= ~(MSR_SE | MSR_BE); /* Turn off 'trace' bits */
707 if (debugger_sstep(regs)) 712 if (debugger_sstep(regs))
@@ -709,7 +714,7 @@ void SingleStepException(struct pt_regs *regs)
709 _exception(SIGTRAP, regs, TRAP_TRACE, 0); 714 _exception(SIGTRAP, regs, TRAP_TRACE, 0);
710} 715}
711 716
712void AlignmentException(struct pt_regs *regs) 717void alignment_exception(struct pt_regs *regs)
713{ 718{
714 int fixed; 719 int fixed;
715 720
@@ -814,7 +819,18 @@ void TAUException(struct pt_regs *regs)
814} 819}
815#endif /* CONFIG_INT_TAU */ 820#endif /* CONFIG_INT_TAU */
816 821
817void AltivecUnavailException(struct pt_regs *regs) 822/*
823 * FP unavailable trap from kernel - print a message, but let
824 * the task use FP in the kernel until it returns to user mode.
825 */
826void kernel_fp_unavailable_exception(struct pt_regs *regs)
827{
828 regs->msr |= MSR_FP;
829 printk(KERN_ERR "floating point used in kernel (task=%p, pc=%lx)\n",
830 current, regs->nip);
831}
832
833void altivec_unavailable_exception(struct pt_regs *regs)
818{ 834{
819 static int kernel_altivec_count; 835 static int kernel_altivec_count;
820 836
@@ -835,7 +851,7 @@ void AltivecUnavailException(struct pt_regs *regs)
835} 851}
836 852
837#ifdef CONFIG_ALTIVEC 853#ifdef CONFIG_ALTIVEC
838void AltivecAssistException(struct pt_regs *regs) 854void altivec_assist_exception(struct pt_regs *regs)
839{ 855{
840 int err; 856 int err;
841 857
@@ -872,7 +888,7 @@ void AltivecAssistException(struct pt_regs *regs)
872#endif /* CONFIG_ALTIVEC */ 888#endif /* CONFIG_ALTIVEC */
873 889
874#ifdef CONFIG_E500 890#ifdef CONFIG_E500
875void PerformanceMonitorException(struct pt_regs *regs) 891void performance_monitor_exception(struct pt_regs *regs)
876{ 892{
877 perf_irq(regs); 893 perf_irq(regs);
878} 894}
diff --git a/arch/ppc/kernel/vector.S b/arch/ppc/kernel/vector.S
deleted file mode 100644
index 82a21346bf80..000000000000
--- a/arch/ppc/kernel/vector.S
+++ /dev/null
@@ -1,217 +0,0 @@
1#include <asm/ppc_asm.h>
2#include <asm/processor.h>
3
4/*
5 * The routines below are in assembler so we can closely control the
6 * usage of floating-point registers. These routines must be called
7 * with preempt disabled.
8 */
9 .data
10fpzero:
11 .long 0
12fpone:
13 .long 0x3f800000 /* 1.0 in single-precision FP */
14fphalf:
15 .long 0x3f000000 /* 0.5 in single-precision FP */
16
17 .text
18/*
19 * Internal routine to enable floating point and set FPSCR to 0.
20 * Don't call it from C; it doesn't use the normal calling convention.
21 */
22fpenable:
23 mfmsr r10
24 ori r11,r10,MSR_FP
25 mtmsr r11
26 isync
27 stfd fr0,24(r1)
28 stfd fr1,16(r1)
29 stfd fr31,8(r1)
30 lis r11,fpzero@ha
31 mffs fr31
32 lfs fr1,fpzero@l(r11)
33 mtfsf 0xff,fr1
34 blr
35
36fpdisable:
37 mtfsf 0xff,fr31
38 lfd fr31,8(r1)
39 lfd fr1,16(r1)
40 lfd fr0,24(r1)
41 mtmsr r10
42 isync
43 blr
44
45/*
46 * Vector add, floating point.
47 */
48 .globl vaddfp
49vaddfp:
50 stwu r1,-32(r1)
51 mflr r0
52 stw r0,36(r1)
53 bl fpenable
54 li r0,4
55 mtctr r0
56 li r6,0
571: lfsx fr0,r4,r6
58 lfsx fr1,r5,r6
59 fadds fr0,fr0,fr1
60 stfsx fr0,r3,r6
61 addi r6,r6,4
62 bdnz 1b
63 bl fpdisable
64 lwz r0,36(r1)
65 mtlr r0
66 addi r1,r1,32
67 blr
68
69/*
70 * Vector subtract, floating point.
71 */
72 .globl vsubfp
73vsubfp:
74 stwu r1,-32(r1)
75 mflr r0
76 stw r0,36(r1)
77 bl fpenable
78 li r0,4
79 mtctr r0
80 li r6,0
811: lfsx fr0,r4,r6
82 lfsx fr1,r5,r6
83 fsubs fr0,fr0,fr1
84 stfsx fr0,r3,r6
85 addi r6,r6,4
86 bdnz 1b
87 bl fpdisable
88 lwz r0,36(r1)
89 mtlr r0
90 addi r1,r1,32
91 blr
92
93/*
94 * Vector multiply and add, floating point.
95 */
96 .globl vmaddfp
97vmaddfp:
98 stwu r1,-48(r1)
99 mflr r0
100 stw r0,52(r1)
101 bl fpenable
102 stfd fr2,32(r1)
103 li r0,4
104 mtctr r0
105 li r7,0
1061: lfsx fr0,r4,r7
107 lfsx fr1,r5,r7
108 lfsx fr2,r6,r7
109 fmadds fr0,fr0,fr2,fr1
110 stfsx fr0,r3,r7
111 addi r7,r7,4
112 bdnz 1b
113 lfd fr2,32(r1)
114 bl fpdisable
115 lwz r0,52(r1)
116 mtlr r0
117 addi r1,r1,48
118 blr
119
120/*
121 * Vector negative multiply and subtract, floating point.
122 */
123 .globl vnmsubfp
124vnmsubfp:
125 stwu r1,-48(r1)
126 mflr r0
127 stw r0,52(r1)
128 bl fpenable
129 stfd fr2,32(r1)
130 li r0,4
131 mtctr r0
132 li r7,0
1331: lfsx fr0,r4,r7
134 lfsx fr1,r5,r7
135 lfsx fr2,r6,r7
136 fnmsubs fr0,fr0,fr2,fr1
137 stfsx fr0,r3,r7
138 addi r7,r7,4
139 bdnz 1b
140 lfd fr2,32(r1)
141 bl fpdisable
142 lwz r0,52(r1)
143 mtlr r0
144 addi r1,r1,48
145 blr
146
147/*
148 * Vector reciprocal estimate. We just compute 1.0/x.
149 * r3 -> destination, r4 -> source.
150 */
151 .globl vrefp
152vrefp:
153 stwu r1,-32(r1)
154 mflr r0
155 stw r0,36(r1)
156 bl fpenable
157 lis r9,fpone@ha
158 li r0,4
159 lfs fr1,fpone@l(r9)
160 mtctr r0
161 li r6,0
1621: lfsx fr0,r4,r6
163 fdivs fr0,fr1,fr0
164 stfsx fr0,r3,r6
165 addi r6,r6,4
166 bdnz 1b
167 bl fpdisable
168 lwz r0,36(r1)
169 mtlr r0
170 addi r1,r1,32
171 blr
172
173/*
174 * Vector reciprocal square-root estimate, floating point.
175 * We use the frsqrte instruction for the initial estimate followed
176 * by 2 iterations of Newton-Raphson to get sufficient accuracy.
177 * r3 -> destination, r4 -> source.
178 */
179 .globl vrsqrtefp
180vrsqrtefp:
181 stwu r1,-48(r1)
182 mflr r0
183 stw r0,52(r1)
184 bl fpenable
185 stfd fr2,32(r1)
186 stfd fr3,40(r1)
187 stfd fr4,48(r1)
188 stfd fr5,56(r1)
189 lis r9,fpone@ha
190 lis r8,fphalf@ha
191 li r0,4
192 lfs fr4,fpone@l(r9)
193 lfs fr5,fphalf@l(r8)
194 mtctr r0
195 li r6,0
1961: lfsx fr0,r4,r6
197 frsqrte fr1,fr0 /* r = frsqrte(s) */
198 fmuls fr3,fr1,fr0 /* r * s */
199 fmuls fr2,fr1,fr5 /* r * 0.5 */
200 fnmsubs fr3,fr1,fr3,fr4 /* 1 - s * r * r */
201 fmadds fr1,fr2,fr3,fr1 /* r = r + 0.5 * r * (1 - s * r * r) */
202 fmuls fr3,fr1,fr0 /* r * s */
203 fmuls fr2,fr1,fr5 /* r * 0.5 */
204 fnmsubs fr3,fr1,fr3,fr4 /* 1 - s * r * r */
205 fmadds fr1,fr2,fr3,fr1 /* r = r + 0.5 * r * (1 - s * r * r) */
206 stfsx fr1,r3,r6
207 addi r6,r6,4
208 bdnz 1b
209 lfd fr5,56(r1)
210 lfd fr4,48(r1)
211 lfd fr3,40(r1)
212 lfd fr2,32(r1)
213 bl fpdisable
214 lwz r0,36(r1)
215 mtlr r0
216 addi r1,r1,32
217 blr
diff --git a/arch/ppc/kernel/vmlinux.lds.S b/arch/ppc/kernel/vmlinux.lds.S
index 17d2db7e537d..09c6525cfa61 100644
--- a/arch/ppc/kernel/vmlinux.lds.S
+++ b/arch/ppc/kernel/vmlinux.lds.S
@@ -149,32 +149,6 @@ SECTIONS
149 149
150 . = ALIGN(4096); 150 . = ALIGN(4096);
151 _sextratext = .; 151 _sextratext = .;
152 __pmac_begin = .;
153 .pmac.text : { *(.pmac.text) }
154 .pmac.data : { *(.pmac.data) }
155 . = ALIGN(4096);
156 __pmac_end = .;
157
158 . = ALIGN(4096);
159 __prep_begin = .;
160 .prep.text : { *(.prep.text) }
161 .prep.data : { *(.prep.data) }
162 . = ALIGN(4096);
163 __prep_end = .;
164
165 . = ALIGN(4096);
166 __chrp_begin = .;
167 .chrp.text : { *(.chrp.text) }
168 .chrp.data : { *(.chrp.data) }
169 . = ALIGN(4096);
170 __chrp_end = .;
171
172 . = ALIGN(4096);
173 __openfirmware_begin = .;
174 .openfirmware.text : { *(.openfirmware.text) }
175 .openfirmware.data : { *(.openfirmware.data) }
176 . = ALIGN(4096);
177 __openfirmware_end = .;
178 _eextratext = .; 152 _eextratext = .;
179 153
180 __bss_start = .; 154 __bss_start = .;
diff --git a/arch/ppc/lib/string.S b/arch/ppc/lib/string.S
index 36c9b97fd92a..2e258c49e8be 100644
--- a/arch/ppc/lib/string.S
+++ b/arch/ppc/lib/string.S
@@ -65,9 +65,9 @@
65 .stabs "arch/ppc/lib/",N_SO,0,0,0f 65 .stabs "arch/ppc/lib/",N_SO,0,0,0f
66 .stabs "string.S",N_SO,0,0,0f 66 .stabs "string.S",N_SO,0,0,0f
67 67
68CACHELINE_BYTES = L1_CACHE_LINE_SIZE 68CACHELINE_BYTES = L1_CACHE_BYTES
69LG_CACHELINE_BYTES = LG_L1_CACHE_LINE_SIZE 69LG_CACHELINE_BYTES = L1_CACHE_SHIFT
70CACHELINE_MASK = (L1_CACHE_LINE_SIZE-1) 70CACHELINE_MASK = (L1_CACHE_BYTES-1)
71 71
72_GLOBAL(strcpy) 72_GLOBAL(strcpy)
73 addi r5,r3,-1 73 addi r5,r3,-1
@@ -265,12 +265,12 @@ _GLOBAL(cacheable_memcpy)
265 dcbz r11,r6 265 dcbz r11,r6
266#endif 266#endif
267 COPY_16_BYTES 267 COPY_16_BYTES
268#if L1_CACHE_LINE_SIZE >= 32 268#if L1_CACHE_BYTES >= 32
269 COPY_16_BYTES 269 COPY_16_BYTES
270#if L1_CACHE_LINE_SIZE >= 64 270#if L1_CACHE_BYTES >= 64
271 COPY_16_BYTES 271 COPY_16_BYTES
272 COPY_16_BYTES 272 COPY_16_BYTES
273#if L1_CACHE_LINE_SIZE >= 128 273#if L1_CACHE_BYTES >= 128
274 COPY_16_BYTES 274 COPY_16_BYTES
275 COPY_16_BYTES 275 COPY_16_BYTES
276 COPY_16_BYTES 276 COPY_16_BYTES
@@ -485,12 +485,12 @@ _GLOBAL(__copy_tofrom_user)
485 .text 485 .text
486/* the main body of the cacheline loop */ 486/* the main body of the cacheline loop */
487 COPY_16_BYTES_WITHEX(0) 487 COPY_16_BYTES_WITHEX(0)
488#if L1_CACHE_LINE_SIZE >= 32 488#if L1_CACHE_BYTES >= 32
489 COPY_16_BYTES_WITHEX(1) 489 COPY_16_BYTES_WITHEX(1)
490#if L1_CACHE_LINE_SIZE >= 64 490#if L1_CACHE_BYTES >= 64
491 COPY_16_BYTES_WITHEX(2) 491 COPY_16_BYTES_WITHEX(2)
492 COPY_16_BYTES_WITHEX(3) 492 COPY_16_BYTES_WITHEX(3)
493#if L1_CACHE_LINE_SIZE >= 128 493#if L1_CACHE_BYTES >= 128
494 COPY_16_BYTES_WITHEX(4) 494 COPY_16_BYTES_WITHEX(4)
495 COPY_16_BYTES_WITHEX(5) 495 COPY_16_BYTES_WITHEX(5)
496 COPY_16_BYTES_WITHEX(6) 496 COPY_16_BYTES_WITHEX(6)
@@ -544,12 +544,12 @@ _GLOBAL(__copy_tofrom_user)
544 * 104f (if in read part) or 105f (if in write part), after updating r5 544 * 104f (if in read part) or 105f (if in write part), after updating r5
545 */ 545 */
546 COPY_16_BYTES_EXCODE(0) 546 COPY_16_BYTES_EXCODE(0)
547#if L1_CACHE_LINE_SIZE >= 32 547#if L1_CACHE_BYTES >= 32
548 COPY_16_BYTES_EXCODE(1) 548 COPY_16_BYTES_EXCODE(1)
549#if L1_CACHE_LINE_SIZE >= 64 549#if L1_CACHE_BYTES >= 64
550 COPY_16_BYTES_EXCODE(2) 550 COPY_16_BYTES_EXCODE(2)
551 COPY_16_BYTES_EXCODE(3) 551 COPY_16_BYTES_EXCODE(3)
552#if L1_CACHE_LINE_SIZE >= 128 552#if L1_CACHE_BYTES >= 128
553 COPY_16_BYTES_EXCODE(4) 553 COPY_16_BYTES_EXCODE(4)
554 COPY_16_BYTES_EXCODE(5) 554 COPY_16_BYTES_EXCODE(5)
555 COPY_16_BYTES_EXCODE(6) 555 COPY_16_BYTES_EXCODE(6)
diff --git a/arch/ppc/math-emu/sfp-machine.h b/arch/ppc/math-emu/sfp-machine.h
index 686e06d29186..4b17d83cfcdd 100644
--- a/arch/ppc/math-emu/sfp-machine.h
+++ b/arch/ppc/math-emu/sfp-machine.h
@@ -166,7 +166,7 @@ extern int fp_pack_ds(void *, long, unsigned long, unsigned long, long, long);
166#include <linux/kernel.h> 166#include <linux/kernel.h>
167#include <linux/sched.h> 167#include <linux/sched.h>
168 168
169#define __FPU_FPSCR (current->thread.fpscr) 169#define __FPU_FPSCR (current->thread.fpscr.val)
170 170
171/* We only actually write to the destination register 171/* We only actually write to the destination register
172 * if exceptions signalled (if any) will not trap. 172 * if exceptions signalled (if any) will not trap.
diff --git a/arch/ppc/mm/init.c b/arch/ppc/mm/init.c
index f421a4b337f6..99b48abd3296 100644
--- a/arch/ppc/mm/init.c
+++ b/arch/ppc/mm/init.c
@@ -69,15 +69,12 @@ int init_bootmem_done;
69int boot_mapsize; 69int boot_mapsize;
70#ifdef CONFIG_PPC_PMAC 70#ifdef CONFIG_PPC_PMAC
71unsigned long agp_special_page; 71unsigned long agp_special_page;
72EXPORT_SYMBOL(agp_special_page);
72#endif 73#endif
73 74
74extern char _end[]; 75extern char _end[];
75extern char etext[], _stext[]; 76extern char etext[], _stext[];
76extern char __init_begin, __init_end; 77extern char __init_begin, __init_end;
77extern char __prep_begin, __prep_end;
78extern char __chrp_begin, __chrp_end;
79extern char __pmac_begin, __pmac_end;
80extern char __openfirmware_begin, __openfirmware_end;
81 78
82#ifdef CONFIG_HIGHMEM 79#ifdef CONFIG_HIGHMEM
83pte_t *kmap_pte; 80pte_t *kmap_pte;
@@ -167,14 +164,6 @@ void free_initmem(void)
167 164
168 printk ("Freeing unused kernel memory:"); 165 printk ("Freeing unused kernel memory:");
169 FREESEC(init); 166 FREESEC(init);
170 if (_machine != _MACH_Pmac)
171 FREESEC(pmac);
172 if (_machine != _MACH_chrp)
173 FREESEC(chrp);
174 if (_machine != _MACH_prep)
175 FREESEC(prep);
176 if (!have_of)
177 FREESEC(openfirmware);
178 printk("\n"); 167 printk("\n");
179 ppc_md.progress = NULL; 168 ppc_md.progress = NULL;
180#undef FREESEC 169#undef FREESEC
@@ -648,18 +637,16 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
648 */ 637 */
649int page_is_ram(unsigned long pfn) 638int page_is_ram(unsigned long pfn)
650{ 639{
651 unsigned long paddr = (pfn << PAGE_SHIFT); 640 return pfn < max_pfn;
652
653 return paddr < __pa(high_memory);
654} 641}
655 642
656pgprot_t phys_mem_access_prot(struct file *file, unsigned long addr, 643pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
657 unsigned long size, pgprot_t vma_prot) 644 unsigned long size, pgprot_t vma_prot)
658{ 645{
659 if (ppc_md.phys_mem_access_prot) 646 if (ppc_md.phys_mem_access_prot)
660 return ppc_md.phys_mem_access_prot(file, addr, size, vma_prot); 647 return ppc_md.phys_mem_access_prot(file, pfn, size, vma_prot);
661 648
662 if (!page_is_ram(addr >> PAGE_SHIFT)) 649 if (!page_is_ram(pfn))
663 vma_prot = __pgprot(pgprot_val(vma_prot) 650 vma_prot = __pgprot(pgprot_val(vma_prot)
664 | _PAGE_GUARDED | _PAGE_NO_CACHE); 651 | _PAGE_GUARDED | _PAGE_NO_CACHE);
665 return vma_prot; 652 return vma_prot;
diff --git a/arch/ppc/oprofile/common.c b/arch/ppc/oprofile/common.c
deleted file mode 100644
index 3169c67abea7..000000000000
--- a/arch/ppc/oprofile/common.c
+++ /dev/null
@@ -1,161 +0,0 @@
1/*
2 * PPC 32 oprofile support
3 * Based on PPC64 oprofile support
4 * Copyright (C) 2004 Anton Blanchard <anton@au.ibm.com>, IBM
5 *
6 * Copyright (C) Freescale Semiconductor, Inc 2004
7 *
8 * Author: Andy Fleming
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version
13 * 2 of the License, or (at your option) any later version.
14 */
15
16#include <linux/oprofile.h>
17#include <linux/slab.h>
18#include <linux/init.h>
19#include <linux/smp.h>
20#include <linux/errno.h>
21#include <asm/ptrace.h>
22#include <asm/system.h>
23#include <asm/perfmon.h>
24#include <asm/cputable.h>
25
26#include "op_impl.h"
27
28static struct op_ppc32_model *model;
29
30static struct op_counter_config ctr[OP_MAX_COUNTER];
31static struct op_system_config sys;
32
33static void op_handle_interrupt(struct pt_regs *regs)
34{
35 model->handle_interrupt(regs, ctr);
36}
37
38static int op_ppc32_setup(void)
39{
40 /* Install our interrupt handler into the existing hook. */
41 if(request_perfmon_irq(&op_handle_interrupt))
42 return -EBUSY;
43
44 mb();
45
46 /* Pre-compute the values to stuff in the hardware registers. */
47 model->reg_setup(ctr, &sys, model->num_counters);
48
49#if 0
50 /* FIXME: Make multi-cpu work */
51 /* Configure the registers on all cpus. */
52 on_each_cpu(model->reg_setup, NULL, 0, 1);
53#endif
54
55 return 0;
56}
57
58static void op_ppc32_shutdown(void)
59{
60 mb();
61
62 /* Remove our interrupt handler. We may be removing this module. */
63 free_perfmon_irq();
64}
65
66static void op_ppc32_cpu_start(void *dummy)
67{
68 model->start(ctr);
69}
70
71static int op_ppc32_start(void)
72{
73 on_each_cpu(op_ppc32_cpu_start, NULL, 0, 1);
74 return 0;
75}
76
77static inline void op_ppc32_cpu_stop(void *dummy)
78{
79 model->stop();
80}
81
82static void op_ppc32_stop(void)
83{
84 on_each_cpu(op_ppc32_cpu_stop, NULL, 0, 1);
85}
86
87static int op_ppc32_create_files(struct super_block *sb, struct dentry *root)
88{
89 int i;
90
91 for (i = 0; i < model->num_counters; ++i) {
92 struct dentry *dir;
93 char buf[3];
94
95 snprintf(buf, sizeof buf, "%d", i);
96 dir = oprofilefs_mkdir(sb, root, buf);
97
98 oprofilefs_create_ulong(sb, dir, "enabled", &ctr[i].enabled);
99 oprofilefs_create_ulong(sb, dir, "event", &ctr[i].event);
100 oprofilefs_create_ulong(sb, dir, "count", &ctr[i].count);
101 oprofilefs_create_ulong(sb, dir, "kernel", &ctr[i].kernel);
102 oprofilefs_create_ulong(sb, dir, "user", &ctr[i].user);
103
104 /* FIXME: Not sure if this is used */
105 oprofilefs_create_ulong(sb, dir, "unit_mask", &ctr[i].unit_mask);
106 }
107
108 oprofilefs_create_ulong(sb, root, "enable_kernel", &sys.enable_kernel);
109 oprofilefs_create_ulong(sb, root, "enable_user", &sys.enable_user);
110
111 /* Default to tracing both kernel and user */
112 sys.enable_kernel = 1;
113 sys.enable_user = 1;
114
115 return 0;
116}
117
118static struct oprofile_operations oprof_ppc32_ops = {
119 .create_files = op_ppc32_create_files,
120 .setup = op_ppc32_setup,
121 .shutdown = op_ppc32_shutdown,
122 .start = op_ppc32_start,
123 .stop = op_ppc32_stop,
124 .cpu_type = NULL /* To be filled in below. */
125};
126
127int __init oprofile_arch_init(struct oprofile_operations *ops)
128{
129 char *name;
130 int cpu_id = smp_processor_id();
131
132#ifdef CONFIG_FSL_BOOKE
133 model = &op_model_fsl_booke;
134#else
135 return -ENODEV;
136#endif
137
138 name = kmalloc(32, GFP_KERNEL);
139
140 if (NULL == name)
141 return -ENOMEM;
142
143 sprintf(name, "ppc/%s", cur_cpu_spec[cpu_id]->cpu_name);
144
145 oprof_ppc32_ops.cpu_type = name;
146
147 model->num_counters = cur_cpu_spec[cpu_id]->num_pmcs;
148
149 *ops = oprof_ppc32_ops;
150
151 printk(KERN_INFO "oprofile: using %s performance monitoring.\n",
152 oprof_ppc32_ops.cpu_type);
153
154 return 0;
155}
156
157void oprofile_arch_exit(void)
158{
159 kfree(oprof_ppc32_ops.cpu_type);
160 oprof_ppc32_ops.cpu_type = NULL;
161}
diff --git a/arch/ppc/oprofile/op_impl.h b/arch/ppc/oprofile/op_impl.h
deleted file mode 100644
index bc336dc971e3..000000000000
--- a/arch/ppc/oprofile/op_impl.h
+++ /dev/null
@@ -1,45 +0,0 @@
1/*
2 * Copyright (C) 2004 Anton Blanchard <anton@au.ibm.com>, IBM
3 *
4 * Based on alpha version.
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11
12#ifndef OP_IMPL_H
13#define OP_IMPL_H 1
14
15#define OP_MAX_COUNTER 8
16
17/* Per-counter configuration as set via oprofilefs. */
18struct op_counter_config {
19 unsigned long enabled;
20 unsigned long event;
21 unsigned long count;
22 unsigned long kernel;
23 unsigned long user;
24 unsigned long unit_mask;
25};
26
27/* System-wide configuration as set via oprofilefs. */
28struct op_system_config {
29 unsigned long enable_kernel;
30 unsigned long enable_user;
31};
32
33/* Per-arch configuration */
34struct op_ppc32_model {
35 void (*reg_setup) (struct op_counter_config *,
36 struct op_system_config *,
37 int num_counters);
38 void (*start) (struct op_counter_config *);
39 void (*stop) (void);
40 void (*handle_interrupt) (struct pt_regs *,
41 struct op_counter_config *);
42 int num_counters;
43};
44
45#endif /* OP_IMPL_H */
diff --git a/arch/ppc/platforms/4xx/bamboo.c b/arch/ppc/platforms/4xx/bamboo.c
index 78a403b48dba..159b228eca1e 100644
--- a/arch/ppc/platforms/4xx/bamboo.c
+++ b/arch/ppc/platforms/4xx/bamboo.c
@@ -51,7 +51,7 @@
51#include <syslib/gen550.h> 51#include <syslib/gen550.h>
52#include <syslib/ibm440gx_common.h> 52#include <syslib/ibm440gx_common.h>
53 53
54bd_t __res; 54extern bd_t __res;
55 55
56static struct ibm44x_clocks clocks __initdata; 56static struct ibm44x_clocks clocks __initdata;
57 57
@@ -425,17 +425,7 @@ bamboo_setup_arch(void)
425void __init platform_init(unsigned long r3, unsigned long r4, 425void __init platform_init(unsigned long r3, unsigned long r4,
426 unsigned long r5, unsigned long r6, unsigned long r7) 426 unsigned long r5, unsigned long r6, unsigned long r7)
427{ 427{
428 parse_bootinfo(find_bootinfo()); 428 ibm44x_platform_init(r3, r4, r5, r6, r7);
429
430 /*
431 * If we were passed in a board information, copy it into the
432 * residual data area.
433 */
434 if (r3)
435 __res = *(bd_t *)(r3 + KERNELBASE);
436
437
438 ibm44x_platform_init();
439 429
440 ppc_md.setup_arch = bamboo_setup_arch; 430 ppc_md.setup_arch = bamboo_setup_arch;
441 ppc_md.show_cpuinfo = bamboo_show_cpuinfo; 431 ppc_md.show_cpuinfo = bamboo_show_cpuinfo;
diff --git a/arch/ppc/platforms/4xx/ebony.c b/arch/ppc/platforms/4xx/ebony.c
index 27b778ab903b..64ebae19cdbb 100644
--- a/arch/ppc/platforms/4xx/ebony.c
+++ b/arch/ppc/platforms/4xx/ebony.c
@@ -54,7 +54,7 @@
54#include <syslib/gen550.h> 54#include <syslib/gen550.h>
55#include <syslib/ibm440gp_common.h> 55#include <syslib/ibm440gp_common.h>
56 56
57bd_t __res; 57extern bd_t __res;
58 58
59static struct ibm44x_clocks clocks __initdata; 59static struct ibm44x_clocks clocks __initdata;
60 60
@@ -90,7 +90,7 @@ ebony_calibrate_decr(void)
90 * on Rev. C silicon then errata forces us to 90 * on Rev. C silicon then errata forces us to
91 * use the internal clock. 91 * use the internal clock.
92 */ 92 */
93 if (strcmp(cur_cpu_spec[0]->cpu_name, "440GP Rev. B") == 0) 93 if (strcmp(cur_cpu_spec->cpu_name, "440GP Rev. B") == 0)
94 freq = EBONY_440GP_RB_SYSCLK; 94 freq = EBONY_440GP_RB_SYSCLK;
95 else 95 else
96 freq = EBONY_440GP_RC_SYSCLK; 96 freq = EBONY_440GP_RC_SYSCLK;
@@ -317,16 +317,7 @@ ebony_setup_arch(void)
317void __init platform_init(unsigned long r3, unsigned long r4, 317void __init platform_init(unsigned long r3, unsigned long r4,
318 unsigned long r5, unsigned long r6, unsigned long r7) 318 unsigned long r5, unsigned long r6, unsigned long r7)
319{ 319{
320 parse_bootinfo(find_bootinfo()); 320 ibm44x_platform_init(r3, r4, r5, r6, r7);
321
322 /*
323 * If we were passed in a board information, copy it into the
324 * residual data area.
325 */
326 if (r3)
327 __res = *(bd_t *)(r3 + KERNELBASE);
328
329 ibm44x_platform_init();
330 321
331 ppc_md.setup_arch = ebony_setup_arch; 322 ppc_md.setup_arch = ebony_setup_arch;
332 ppc_md.show_cpuinfo = ebony_show_cpuinfo; 323 ppc_md.show_cpuinfo = ebony_show_cpuinfo;
diff --git a/arch/ppc/platforms/4xx/luan.c b/arch/ppc/platforms/4xx/luan.c
index 16d953bda22c..d810b736d9bf 100644
--- a/arch/ppc/platforms/4xx/luan.c
+++ b/arch/ppc/platforms/4xx/luan.c
@@ -52,7 +52,7 @@
52#include <syslib/ibm440gx_common.h> 52#include <syslib/ibm440gx_common.h>
53#include <syslib/ibm440sp_common.h> 53#include <syslib/ibm440sp_common.h>
54 54
55bd_t __res; 55extern bd_t __res;
56 56
57static struct ibm44x_clocks clocks __initdata; 57static struct ibm44x_clocks clocks __initdata;
58 58
@@ -355,16 +355,7 @@ luan_setup_arch(void)
355void __init platform_init(unsigned long r3, unsigned long r4, 355void __init platform_init(unsigned long r3, unsigned long r4,
356 unsigned long r5, unsigned long r6, unsigned long r7) 356 unsigned long r5, unsigned long r6, unsigned long r7)
357{ 357{
358 parse_bootinfo(find_bootinfo()); 358 ibm44x_platform_init(r3, r4, r5, r6, r7);
359
360 /*
361 * If we were passed in a board information, copy it into the
362 * residual data area.
363 */
364 if (r3)
365 __res = *(bd_t *)(r3 + KERNELBASE);
366
367 ibm44x_platform_init();
368 359
369 ppc_md.setup_arch = luan_setup_arch; 360 ppc_md.setup_arch = luan_setup_arch;
370 ppc_md.show_cpuinfo = luan_show_cpuinfo; 361 ppc_md.show_cpuinfo = luan_show_cpuinfo;
diff --git a/arch/ppc/platforms/4xx/ocotea.c b/arch/ppc/platforms/4xx/ocotea.c
index 506949c5dd29..73b2c98158f6 100644
--- a/arch/ppc/platforms/4xx/ocotea.c
+++ b/arch/ppc/platforms/4xx/ocotea.c
@@ -52,7 +52,7 @@
52#include <syslib/gen550.h> 52#include <syslib/gen550.h>
53#include <syslib/ibm440gx_common.h> 53#include <syslib/ibm440gx_common.h>
54 54
55bd_t __res; 55extern bd_t __res;
56 56
57static struct ibm44x_clocks clocks __initdata; 57static struct ibm44x_clocks clocks __initdata;
58 58
@@ -286,6 +286,15 @@ ocotea_setup_arch(void)
286 286
287 ibm440gx_tah_enable(); 287 ibm440gx_tah_enable();
288 288
289 /*
290 * Determine various clocks.
291 * To be completely correct we should get SysClk
292 * from FPGA, because it can be changed by on-board switches
293 * --ebs
294 */
295 ibm440gx_get_clocks(&clocks, 33333333, 6 * 1843200);
296 ocp_sys_info.opb_bus_freq = clocks.opb;
297
289 /* Setup TODC access */ 298 /* Setup TODC access */
290 TODC_INIT(TODC_TYPE_DS1743, 299 TODC_INIT(TODC_TYPE_DS1743,
291 0, 300 0,
@@ -324,25 +333,7 @@ static void __init ocotea_init(void)
324void __init platform_init(unsigned long r3, unsigned long r4, 333void __init platform_init(unsigned long r3, unsigned long r4,
325 unsigned long r5, unsigned long r6, unsigned long r7) 334 unsigned long r5, unsigned long r6, unsigned long r7)
326{ 335{
327 parse_bootinfo(find_bootinfo()); 336 ibm44x_platform_init(r3, r4, r5, r6, r7);
328
329 /*
330 * If we were passed in a board information, copy it into the
331 * residual data area.
332 */
333 if (r3)
334 __res = *(bd_t *)(r3 + KERNELBASE);
335
336 /*
337 * Determine various clocks.
338 * To be completely correct we should get SysClk
339 * from FPGA, because it can be changed by on-board switches
340 * --ebs
341 */
342 ibm440gx_get_clocks(&clocks, 33333333, 6 * 1843200);
343 ocp_sys_info.opb_bus_freq = clocks.opb;
344
345 ibm44x_platform_init();
346 337
347 ppc_md.setup_arch = ocotea_setup_arch; 338 ppc_md.setup_arch = ocotea_setup_arch;
348 ppc_md.show_cpuinfo = ocotea_show_cpuinfo; 339 ppc_md.show_cpuinfo = ocotea_show_cpuinfo;
diff --git a/arch/ppc/platforms/83xx/mpc834x_sys.h b/arch/ppc/platforms/83xx/mpc834x_sys.h
index 1584cd77a9ef..58e44c042535 100644
--- a/arch/ppc/platforms/83xx/mpc834x_sys.h
+++ b/arch/ppc/platforms/83xx/mpc834x_sys.h
@@ -19,7 +19,6 @@
19 19
20#include <linux/config.h> 20#include <linux/config.h>
21#include <linux/init.h> 21#include <linux/init.h>
22#include <linux/seq_file.h>
23#include <syslib/ppc83xx_setup.h> 22#include <syslib/ppc83xx_setup.h>
24#include <asm/ppcboot.h> 23#include <asm/ppcboot.h>
25 24
diff --git a/arch/ppc/platforms/85xx/mpc8540_ads.c b/arch/ppc/platforms/85xx/mpc8540_ads.c
index 7dc8a68acfd0..7e952c1228cb 100644
--- a/arch/ppc/platforms/85xx/mpc8540_ads.c
+++ b/arch/ppc/platforms/85xx/mpc8540_ads.c
@@ -52,6 +52,10 @@
52 52
53#include <syslib/ppc85xx_setup.h> 53#include <syslib/ppc85xx_setup.h>
54 54
55static const char *GFAR_PHY_0 = "phy0:0";
56static const char *GFAR_PHY_1 = "phy0:1";
57static const char *GFAR_PHY_3 = "phy0:3";
58
55/* ************************************************************************ 59/* ************************************************************************
56 * 60 *
57 * Setup the architecture 61 * Setup the architecture
@@ -63,6 +67,7 @@ mpc8540ads_setup_arch(void)
63 bd_t *binfo = (bd_t *) __res; 67 bd_t *binfo = (bd_t *) __res;
64 unsigned int freq; 68 unsigned int freq;
65 struct gianfar_platform_data *pdata; 69 struct gianfar_platform_data *pdata;
70 struct gianfar_mdio_data *mdata;
66 71
67 /* get the core frequency */ 72 /* get the core frequency */
68 freq = binfo->bi_intfreq; 73 freq = binfo->bi_intfreq;
@@ -89,34 +94,35 @@ mpc8540ads_setup_arch(void)
89 invalidate_tlbcam_entry(num_tlbcam_entries - 1); 94 invalidate_tlbcam_entry(num_tlbcam_entries - 1);
90#endif 95#endif
91 96
97 /* setup the board related info for the MDIO bus */
98 mdata = (struct gianfar_mdio_data *) ppc_sys_get_pdata(MPC85xx_MDIO);
99
100 mdata->irq[0] = MPC85xx_IRQ_EXT5;
101 mdata->irq[1] = MPC85xx_IRQ_EXT5;
102 mdata->irq[2] = -1;
103 mdata->irq[3] = MPC85xx_IRQ_EXT5;
104 mdata->irq[31] = -1;
105 mdata->paddr += binfo->bi_immr_base;
106
92 /* setup the board related information for the enet controllers */ 107 /* setup the board related information for the enet controllers */
93 pdata = (struct gianfar_platform_data *) ppc_sys_get_pdata(MPC85xx_TSEC1); 108 pdata = (struct gianfar_platform_data *) ppc_sys_get_pdata(MPC85xx_TSEC1);
94 if (pdata) { 109 if (pdata) {
95 pdata->board_flags = FSL_GIANFAR_BRD_HAS_PHY_INTR; 110 pdata->board_flags = FSL_GIANFAR_BRD_HAS_PHY_INTR;
96 pdata->interruptPHY = MPC85xx_IRQ_EXT5; 111 pdata->bus_id = GFAR_PHY_0;
97 pdata->phyid = 0;
98 /* fixup phy address */
99 pdata->phy_reg_addr += binfo->bi_immr_base;
100 memcpy(pdata->mac_addr, binfo->bi_enetaddr, 6); 112 memcpy(pdata->mac_addr, binfo->bi_enetaddr, 6);
101 } 113 }
102 114
103 pdata = (struct gianfar_platform_data *) ppc_sys_get_pdata(MPC85xx_TSEC2); 115 pdata = (struct gianfar_platform_data *) ppc_sys_get_pdata(MPC85xx_TSEC2);
104 if (pdata) { 116 if (pdata) {
105 pdata->board_flags = FSL_GIANFAR_BRD_HAS_PHY_INTR; 117 pdata->board_flags = FSL_GIANFAR_BRD_HAS_PHY_INTR;
106 pdata->interruptPHY = MPC85xx_IRQ_EXT5; 118 pdata->bus_id = GFAR_PHY_1;
107 pdata->phyid = 1;
108 /* fixup phy address */
109 pdata->phy_reg_addr += binfo->bi_immr_base;
110 memcpy(pdata->mac_addr, binfo->bi_enet1addr, 6); 119 memcpy(pdata->mac_addr, binfo->bi_enet1addr, 6);
111 } 120 }
112 121
113 pdata = (struct gianfar_platform_data *) ppc_sys_get_pdata(MPC85xx_FEC); 122 pdata = (struct gianfar_platform_data *) ppc_sys_get_pdata(MPC85xx_FEC);
114 if (pdata) { 123 if (pdata) {
115 pdata->board_flags = 0; 124 pdata->board_flags = 0;
116 pdata->interruptPHY = MPC85xx_IRQ_EXT5; 125 pdata->bus_id = GFAR_PHY_3;
117 pdata->phyid = 3;
118 /* fixup phy address */
119 pdata->phy_reg_addr += binfo->bi_immr_base;
120 memcpy(pdata->mac_addr, binfo->bi_enet2addr, 6); 126 memcpy(pdata->mac_addr, binfo->bi_enet2addr, 6);
121 } 127 }
122 128
diff --git a/arch/ppc/platforms/85xx/mpc8560_ads.c b/arch/ppc/platforms/85xx/mpc8560_ads.c
index 8841fd7da6ee..208433f1e93a 100644
--- a/arch/ppc/platforms/85xx/mpc8560_ads.c
+++ b/arch/ppc/platforms/85xx/mpc8560_ads.c
@@ -56,6 +56,10 @@
56#include <syslib/ppc85xx_setup.h> 56#include <syslib/ppc85xx_setup.h>
57 57
58 58
59static const char *GFAR_PHY_0 = "phy0:0";
60static const char *GFAR_PHY_1 = "phy0:1";
61static const char *GFAR_PHY_3 = "phy0:3";
62
59/* ************************************************************************ 63/* ************************************************************************
60 * 64 *
61 * Setup the architecture 65 * Setup the architecture
@@ -68,6 +72,7 @@ mpc8560ads_setup_arch(void)
68 bd_t *binfo = (bd_t *) __res; 72 bd_t *binfo = (bd_t *) __res;
69 unsigned int freq; 73 unsigned int freq;
70 struct gianfar_platform_data *pdata; 74 struct gianfar_platform_data *pdata;
75 struct gianfar_mdio_data *mdata;
71 76
72 cpm2_reset(); 77 cpm2_reset();
73 78
@@ -86,24 +91,28 @@ mpc8560ads_setup_arch(void)
86 mpc85xx_setup_hose(); 91 mpc85xx_setup_hose();
87#endif 92#endif
88 93
94 /* setup the board related info for the MDIO bus */
95 mdata = (struct gianfar_mdio_data *) ppc_sys_get_pdata(MPC85xx_MDIO);
96
97 mdata->irq[0] = MPC85xx_IRQ_EXT5;
98 mdata->irq[1] = MPC85xx_IRQ_EXT5;
99 mdata->irq[2] = -1;
100 mdata->irq[3] = MPC85xx_IRQ_EXT5;
101 mdata->irq[31] = -1;
102 mdata->paddr += binfo->bi_immr_base;
103
89 /* setup the board related information for the enet controllers */ 104 /* setup the board related information for the enet controllers */
90 pdata = (struct gianfar_platform_data *) ppc_sys_get_pdata(MPC85xx_TSEC1); 105 pdata = (struct gianfar_platform_data *) ppc_sys_get_pdata(MPC85xx_TSEC1);
91 if (pdata) { 106 if (pdata) {
92 pdata->board_flags = FSL_GIANFAR_BRD_HAS_PHY_INTR; 107 pdata->board_flags = FSL_GIANFAR_BRD_HAS_PHY_INTR;
93 pdata->interruptPHY = MPC85xx_IRQ_EXT5; 108 pdata->bus_id = GFAR_PHY_0;
94 pdata->phyid = 0;
95 /* fixup phy address */
96 pdata->phy_reg_addr += binfo->bi_immr_base;
97 memcpy(pdata->mac_addr, binfo->bi_enetaddr, 6); 109 memcpy(pdata->mac_addr, binfo->bi_enetaddr, 6);
98 } 110 }
99 111
100 pdata = (struct gianfar_platform_data *) ppc_sys_get_pdata(MPC85xx_TSEC2); 112 pdata = (struct gianfar_platform_data *) ppc_sys_get_pdata(MPC85xx_TSEC2);
101 if (pdata) { 113 if (pdata) {
102 pdata->board_flags = FSL_GIANFAR_BRD_HAS_PHY_INTR; 114 pdata->board_flags = FSL_GIANFAR_BRD_HAS_PHY_INTR;
103 pdata->interruptPHY = MPC85xx_IRQ_EXT5; 115 pdata->bus_id = GFAR_PHY_1;
104 pdata->phyid = 1;
105 /* fixup phy address */
106 pdata->phy_reg_addr += binfo->bi_immr_base;
107 memcpy(pdata->mac_addr, binfo->bi_enet1addr, 6); 116 memcpy(pdata->mac_addr, binfo->bi_enet1addr, 6);
108 } 117 }
109 118
diff --git a/arch/ppc/platforms/85xx/mpc85xx_ads_common.h b/arch/ppc/platforms/85xx/mpc85xx_ads_common.h
index 3875e839cff7..84acf6e8d45e 100644
--- a/arch/ppc/platforms/85xx/mpc85xx_ads_common.h
+++ b/arch/ppc/platforms/85xx/mpc85xx_ads_common.h
@@ -19,7 +19,6 @@
19 19
20#include <linux/config.h> 20#include <linux/config.h>
21#include <linux/init.h> 21#include <linux/init.h>
22#include <linux/seq_file.h>
23#include <asm/ppcboot.h> 22#include <asm/ppcboot.h>
24 23
25#define BOARD_CCSRBAR ((uint)0xe0000000) 24#define BOARD_CCSRBAR ((uint)0xe0000000)
diff --git a/arch/ppc/platforms/85xx/mpc85xx_cds_common.c b/arch/ppc/platforms/85xx/mpc85xx_cds_common.c
index 9f9039498ae5..a21156967a5e 100644
--- a/arch/ppc/platforms/85xx/mpc85xx_cds_common.c
+++ b/arch/ppc/platforms/85xx/mpc85xx_cds_common.c
@@ -173,10 +173,7 @@ mpc85xx_cds_init_IRQ(void)
173#ifdef CONFIG_PCI 173#ifdef CONFIG_PCI
174 openpic_hookup_cascade(PIRQ0A, "82c59 cascade", i8259_irq); 174 openpic_hookup_cascade(PIRQ0A, "82c59 cascade", i8259_irq);
175 175
176 for (i = 0; i < NUM_8259_INTERRUPTS; i++) 176 i8259_init(0, 0);
177 irq_desc[i].handler = &i8259_pic;
178
179 i8259_init(0);
180#endif 177#endif
181 178
182#ifdef CONFIG_CPM2 179#ifdef CONFIG_CPM2
@@ -394,6 +391,9 @@ mpc85xx_cds_pcibios_fixup(void)
394 391
395TODC_ALLOC(); 392TODC_ALLOC();
396 393
394static const char *GFAR_PHY_0 = "phy0:0";
395static const char *GFAR_PHY_1 = "phy0:1";
396
397/* ************************************************************************ 397/* ************************************************************************
398 * 398 *
399 * Setup the architecture 399 * Setup the architecture
@@ -405,6 +405,7 @@ mpc85xx_cds_setup_arch(void)
405 bd_t *binfo = (bd_t *) __res; 405 bd_t *binfo = (bd_t *) __res;
406 unsigned int freq; 406 unsigned int freq;
407 struct gianfar_platform_data *pdata; 407 struct gianfar_platform_data *pdata;
408 struct gianfar_mdio_data *mdata;
408 409
409 /* get the core frequency */ 410 /* get the core frequency */
410 freq = binfo->bi_intfreq; 411 freq = binfo->bi_intfreq;
@@ -448,44 +449,42 @@ mpc85xx_cds_setup_arch(void)
448 invalidate_tlbcam_entry(num_tlbcam_entries - 1); 449 invalidate_tlbcam_entry(num_tlbcam_entries - 1);
449#endif 450#endif
450 451
452 /* setup the board related info for the MDIO bus */
453 mdata = (struct gianfar_mdio_data *) ppc_sys_get_pdata(MPC85xx_MDIO);
454
455 mdata->irq[0] = MPC85xx_IRQ_EXT5;
456 mdata->irq[1] = MPC85xx_IRQ_EXT5;
457 mdata->irq[2] = -1;
458 mdata->irq[3] = -1;
459 mdata->irq[31] = -1;
460 mdata->paddr += binfo->bi_immr_base;
461
451 /* setup the board related information for the enet controllers */ 462 /* setup the board related information for the enet controllers */
452 pdata = (struct gianfar_platform_data *) ppc_sys_get_pdata(MPC85xx_TSEC1); 463 pdata = (struct gianfar_platform_data *) ppc_sys_get_pdata(MPC85xx_TSEC1);
453 if (pdata) { 464 if (pdata) {
454 pdata->board_flags = FSL_GIANFAR_BRD_HAS_PHY_INTR; 465 pdata->board_flags = FSL_GIANFAR_BRD_HAS_PHY_INTR;
455 pdata->interruptPHY = MPC85xx_IRQ_EXT5; 466 pdata->bus_id = GFAR_PHY_0;
456 pdata->phyid = 0;
457 /* fixup phy address */
458 pdata->phy_reg_addr += binfo->bi_immr_base;
459 memcpy(pdata->mac_addr, binfo->bi_enetaddr, 6); 467 memcpy(pdata->mac_addr, binfo->bi_enetaddr, 6);
460 } 468 }
461 469
462 pdata = (struct gianfar_platform_data *) ppc_sys_get_pdata(MPC85xx_TSEC2); 470 pdata = (struct gianfar_platform_data *) ppc_sys_get_pdata(MPC85xx_TSEC2);
463 if (pdata) { 471 if (pdata) {
464 pdata->board_flags = FSL_GIANFAR_BRD_HAS_PHY_INTR; 472 pdata->board_flags = FSL_GIANFAR_BRD_HAS_PHY_INTR;
465 pdata->interruptPHY = MPC85xx_IRQ_EXT5; 473 pdata->bus_id = GFAR_PHY_1;
466 pdata->phyid = 1;
467 /* fixup phy address */
468 pdata->phy_reg_addr += binfo->bi_immr_base;
469 memcpy(pdata->mac_addr, binfo->bi_enet1addr, 6); 474 memcpy(pdata->mac_addr, binfo->bi_enet1addr, 6);
470 } 475 }
471 476
472 pdata = (struct gianfar_platform_data *) ppc_sys_get_pdata(MPC85xx_eTSEC1); 477 pdata = (struct gianfar_platform_data *) ppc_sys_get_pdata(MPC85xx_eTSEC1);
473 if (pdata) { 478 if (pdata) {
474 pdata->board_flags = FSL_GIANFAR_BRD_HAS_PHY_INTR; 479 pdata->board_flags = FSL_GIANFAR_BRD_HAS_PHY_INTR;
475 pdata->interruptPHY = MPC85xx_IRQ_EXT5; 480 pdata->bus_id = GFAR_PHY_0;
476 pdata->phyid = 0;
477 /* fixup phy address */
478 pdata->phy_reg_addr += binfo->bi_immr_base;
479 memcpy(pdata->mac_addr, binfo->bi_enetaddr, 6); 481 memcpy(pdata->mac_addr, binfo->bi_enetaddr, 6);
480 } 482 }
481 483
482 pdata = (struct gianfar_platform_data *) ppc_sys_get_pdata(MPC85xx_eTSEC2); 484 pdata = (struct gianfar_platform_data *) ppc_sys_get_pdata(MPC85xx_eTSEC2);
483 if (pdata) { 485 if (pdata) {
484 pdata->board_flags = FSL_GIANFAR_BRD_HAS_PHY_INTR; 486 pdata->board_flags = FSL_GIANFAR_BRD_HAS_PHY_INTR;
485 pdata->interruptPHY = MPC85xx_IRQ_EXT5; 487 pdata->bus_id = GFAR_PHY_1;
486 pdata->phyid = 1;
487 /* fixup phy address */
488 pdata->phy_reg_addr += binfo->bi_immr_base;
489 memcpy(pdata->mac_addr, binfo->bi_enet1addr, 6); 488 memcpy(pdata->mac_addr, binfo->bi_enet1addr, 6);
490 } 489 }
491 490
diff --git a/arch/ppc/platforms/85xx/sbc8560.c b/arch/ppc/platforms/85xx/sbc8560.c
index c76760a781c1..b4ee1707a836 100644
--- a/arch/ppc/platforms/85xx/sbc8560.c
+++ b/arch/ppc/platforms/85xx/sbc8560.c
@@ -91,6 +91,9 @@ sbc8560_early_serial_map(void)
91} 91}
92#endif 92#endif
93 93
94static const char *GFAR_PHY_25 = "phy0:25";
95static const char *GFAR_PHY_26 = "phy0:26";
96
94/* ************************************************************************ 97/* ************************************************************************
95 * 98 *
96 * Setup the architecture 99 * Setup the architecture
@@ -102,6 +105,7 @@ sbc8560_setup_arch(void)
102 bd_t *binfo = (bd_t *) __res; 105 bd_t *binfo = (bd_t *) __res;
103 unsigned int freq; 106 unsigned int freq;
104 struct gianfar_platform_data *pdata; 107 struct gianfar_platform_data *pdata;
108 struct gianfar_mdio_data *mdata;
105 109
106 /* get the core frequency */ 110 /* get the core frequency */
107 freq = binfo->bi_intfreq; 111 freq = binfo->bi_intfreq;
@@ -126,24 +130,26 @@ sbc8560_setup_arch(void)
126 invalidate_tlbcam_entry(num_tlbcam_entries - 1); 130 invalidate_tlbcam_entry(num_tlbcam_entries - 1);
127#endif 131#endif
128 132
133 /* setup the board related info for the MDIO bus */
134 mdata = (struct gianfar_mdio_data *) ppc_sys_get_pdata(MPC85xx_MDIO);
135
136 mdata->irq[25] = MPC85xx_IRQ_EXT6;
137 mdata->irq[26] = MPC85xx_IRQ_EXT7;
138 mdata->irq[31] = -1;
139 mdata->paddr += binfo->bi_immr_base;
140
129 /* setup the board related information for the enet controllers */ 141 /* setup the board related information for the enet controllers */
130 pdata = (struct gianfar_platform_data *) ppc_sys_get_pdata(MPC85xx_TSEC1); 142 pdata = (struct gianfar_platform_data *) ppc_sys_get_pdata(MPC85xx_TSEC1);
131 if (pdata) { 143 if (pdata) {
132 pdata->board_flags = FSL_GIANFAR_BRD_HAS_PHY_INTR; 144 pdata->board_flags = FSL_GIANFAR_BRD_HAS_PHY_INTR;
133 pdata->interruptPHY = MPC85xx_IRQ_EXT6; 145 pdata->bus_id = GFAR_PHY_25;
134 pdata->phyid = 25;
135 /* fixup phy address */
136 pdata->phy_reg_addr += binfo->bi_immr_base;
137 memcpy(pdata->mac_addr, binfo->bi_enetaddr, 6); 146 memcpy(pdata->mac_addr, binfo->bi_enetaddr, 6);
138 } 147 }
139 148
140 pdata = (struct gianfar_platform_data *) ppc_sys_get_pdata(MPC85xx_TSEC2); 149 pdata = (struct gianfar_platform_data *) ppc_sys_get_pdata(MPC85xx_TSEC2);
141 if (pdata) { 150 if (pdata) {
142 pdata->board_flags = FSL_GIANFAR_BRD_HAS_PHY_INTR; 151 pdata->board_flags = FSL_GIANFAR_BRD_HAS_PHY_INTR;
143 pdata->interruptPHY = MPC85xx_IRQ_EXT7; 152 pdata->bus_id = GFAR_PHY_26;
144 pdata->phyid = 26;
145 /* fixup phy address */
146 pdata->phy_reg_addr += binfo->bi_immr_base;
147 memcpy(pdata->mac_addr, binfo->bi_enet1addr, 6); 153 memcpy(pdata->mac_addr, binfo->bi_enet1addr, 6);
148 } 154 }
149 155
diff --git a/arch/ppc/platforms/85xx/stx_gp3.c b/arch/ppc/platforms/85xx/stx_gp3.c
index 20940f4044f4..1e1b85f8193a 100644
--- a/arch/ppc/platforms/85xx/stx_gp3.c
+++ b/arch/ppc/platforms/85xx/stx_gp3.c
@@ -91,6 +91,9 @@ static u8 gp3_openpic_initsenses[] __initdata = {
91 0x0, /* External 11: */ 91 0x0, /* External 11: */
92}; 92};
93 93
94static const char *GFAR_PHY_2 = "phy0:2";
95static const char *GFAR_PHY_4 = "phy0:4";
96
94/* 97/*
95 * Setup the architecture 98 * Setup the architecture
96 */ 99 */
@@ -100,6 +103,7 @@ gp3_setup_arch(void)
100 bd_t *binfo = (bd_t *) __res; 103 bd_t *binfo = (bd_t *) __res;
101 unsigned int freq; 104 unsigned int freq;
102 struct gianfar_platform_data *pdata; 105 struct gianfar_platform_data *pdata;
106 struct gianfar_mdio_data *mdata;
103 107
104 cpm2_reset(); 108 cpm2_reset();
105 109
@@ -118,23 +122,26 @@ gp3_setup_arch(void)
118 mpc85xx_setup_hose(); 122 mpc85xx_setup_hose();
119#endif 123#endif
120 124
125 /* setup the board related info for the MDIO bus */
126 mdata = (struct gianfar_mdio_data *) ppc_sys_get_pdata(MPC85xx_MDIO);
127
128 mdata->irq[2] = MPC85xx_IRQ_EXT5;
129 mdata->irq[4] = MPC85xx_IRQ_EXT5;
130 mdata->irq[31] = -1;
131 mdata->paddr += binfo->bi_immr_base;
132
121 /* setup the board related information for the enet controllers */ 133 /* setup the board related information for the enet controllers */
122 pdata = (struct gianfar_platform_data *) ppc_sys_get_pdata(MPC85xx_TSEC1); 134 pdata = (struct gianfar_platform_data *) ppc_sys_get_pdata(MPC85xx_TSEC1);
123 if (pdata) { 135 if (pdata) {
124 /* pdata->board_flags = FSL_GIANFAR_BRD_HAS_PHY_INTR; */ 136 /* pdata->board_flags = FSL_GIANFAR_BRD_HAS_PHY_INTR; */
125 pdata->interruptPHY = MPC85xx_IRQ_EXT5; 137 pdata->bus_id = GFAR_PHY_2;
126 pdata->phyid = 2;
127 pdata->phy_reg_addr += binfo->bi_immr_base;
128 memcpy(pdata->mac_addr, binfo->bi_enetaddr, 6); 138 memcpy(pdata->mac_addr, binfo->bi_enetaddr, 6);
129 } 139 }
130 140
131 pdata = (struct gianfar_platform_data *) ppc_sys_get_pdata(MPC85xx_TSEC2); 141 pdata = (struct gianfar_platform_data *) ppc_sys_get_pdata(MPC85xx_TSEC2);
132 if (pdata) { 142 if (pdata) {
133 /* pdata->board_flags = FSL_GIANFAR_BRD_HAS_PHY_INTR; */ 143 /* pdata->board_flags = FSL_GIANFAR_BRD_HAS_PHY_INTR; */
134 pdata->interruptPHY = MPC85xx_IRQ_EXT5; 144 pdata->bus_id = GFAR_PHY_4;
135 pdata->phyid = 4;
136 /* fixup phy address */
137 pdata->phy_reg_addr += binfo->bi_immr_base;
138 memcpy(pdata->mac_addr, binfo->bi_enet1addr, 6); 145 memcpy(pdata->mac_addr, binfo->bi_enet1addr, 6);
139 } 146 }
140 147
diff --git a/arch/ppc/platforms/85xx/stx_gp3.h b/arch/ppc/platforms/85xx/stx_gp3.h
index 7bcc6c35a417..95fdf4b0680b 100644
--- a/arch/ppc/platforms/85xx/stx_gp3.h
+++ b/arch/ppc/platforms/85xx/stx_gp3.h
@@ -21,7 +21,6 @@
21 21
22#include <linux/config.h> 22#include <linux/config.h>
23#include <linux/init.h> 23#include <linux/init.h>
24#include <linux/seq_file.h>
25#include <asm/ppcboot.h> 24#include <asm/ppcboot.h>
26 25
27#define BOARD_CCSRBAR ((uint)0xe0000000) 26#define BOARD_CCSRBAR ((uint)0xe0000000)
diff --git a/arch/ppc/platforms/Makefile b/arch/ppc/platforms/Makefile
index ff7452e5d8e5..7c5cdabf6f3c 100644
--- a/arch/ppc/platforms/Makefile
+++ b/arch/ppc/platforms/Makefile
@@ -14,6 +14,9 @@ obj-$(CONFIG_PPC_PMAC) += pmac_pic.o pmac_setup.o pmac_time.o \
14 pmac_low_i2c.o pmac_cache.o 14 pmac_low_i2c.o pmac_cache.o
15obj-$(CONFIG_PPC_CHRP) += chrp_setup.o chrp_time.o chrp_pci.o \ 15obj-$(CONFIG_PPC_CHRP) += chrp_setup.o chrp_time.o chrp_pci.o \
16 chrp_pegasos_eth.o 16 chrp_pegasos_eth.o
17ifeq ($(CONFIG_PPC_CHRP),y)
18obj-$(CONFIG_NVRAM) += chrp_nvram.o
19endif
17obj-$(CONFIG_PPC_PREP) += prep_pci.o prep_setup.o 20obj-$(CONFIG_PPC_PREP) += prep_pci.o prep_setup.o
18ifeq ($(CONFIG_PPC_PMAC),y) 21ifeq ($(CONFIG_PPC_PMAC),y)
19obj-$(CONFIG_NVRAM) += pmac_nvram.o 22obj-$(CONFIG_NVRAM) += pmac_nvram.o
diff --git a/arch/ppc/platforms/chestnut.c b/arch/ppc/platforms/chestnut.c
index df6ff98c023a..48a4a510d598 100644
--- a/arch/ppc/platforms/chestnut.c
+++ b/arch/ppc/platforms/chestnut.c
@@ -541,7 +541,6 @@ platform_init(unsigned long r3, unsigned long r4, unsigned long r5,
541 541
542 ppc_md.setup_arch = chestnut_setup_arch; 542 ppc_md.setup_arch = chestnut_setup_arch;
543 ppc_md.show_cpuinfo = chestnut_show_cpuinfo; 543 ppc_md.show_cpuinfo = chestnut_show_cpuinfo;
544 ppc_md.irq_canonicalize = NULL;
545 ppc_md.init_IRQ = mv64360_init_irq; 544 ppc_md.init_IRQ = mv64360_init_irq;
546 ppc_md.get_irq = mv64360_get_irq; 545 ppc_md.get_irq = mv64360_get_irq;
547 ppc_md.init = NULL; 546 ppc_md.init = NULL;
diff --git a/arch/ppc/platforms/chrp_nvram.c b/arch/ppc/platforms/chrp_nvram.c
new file mode 100644
index 000000000000..465ba9b090ef
--- /dev/null
+++ b/arch/ppc/platforms/chrp_nvram.c
@@ -0,0 +1,83 @@
1/*
2 * c 2001 PPC 64 Team, IBM Corp
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * /dev/nvram driver for PPC
10 *
11 */
12
13#include <linux/kernel.h>
14#include <linux/init.h>
15#include <linux/slab.h>
16#include <linux/spinlock.h>
17#include <asm/uaccess.h>
18#include <asm/prom.h>
19#include <asm/machdep.h>
20
21static unsigned int nvram_size;
22static unsigned char nvram_buf[4];
23static DEFINE_SPINLOCK(nvram_lock);
24
25static unsigned char chrp_nvram_read(int addr)
26{
27 unsigned long done, flags;
28 unsigned char ret;
29
30 if (addr >= nvram_size) {
31 printk(KERN_DEBUG "%s: read addr %d > nvram_size %u\n",
32 current->comm, addr, nvram_size);
33 return 0xff;
34 }
35 spin_lock_irqsave(&nvram_lock, flags);
36 if ((call_rtas("nvram-fetch", 3, 2, &done, addr, __pa(nvram_buf), 1) != 0) || 1 != done)
37 ret = 0xff;
38 else
39 ret = nvram_buf[0];
40 spin_unlock_irqrestore(&nvram_lock, flags);
41
42 return ret;
43}
44
45static void chrp_nvram_write(int addr, unsigned char val)
46{
47 unsigned long done, flags;
48
49 if (addr >= nvram_size) {
50 printk(KERN_DEBUG "%s: write addr %d > nvram_size %u\n",
51 current->comm, addr, nvram_size);
52 return;
53 }
54 spin_lock_irqsave(&nvram_lock, flags);
55 nvram_buf[0] = val;
56 if ((call_rtas("nvram-store", 3, 2, &done, addr, __pa(nvram_buf), 1) != 0) || 1 != done)
57 printk(KERN_DEBUG "rtas IO error storing 0x%02x at %d", val, addr);
58 spin_unlock_irqrestore(&nvram_lock, flags);
59}
60
61void __init chrp_nvram_init(void)
62{
63 struct device_node *nvram;
64 unsigned int *nbytes_p, proplen;
65
66 nvram = of_find_node_by_type(NULL, "nvram");
67 if (nvram == NULL)
68 return;
69
70 nbytes_p = (unsigned int *)get_property(nvram, "#bytes", &proplen);
71 if (nbytes_p == NULL || proplen != sizeof(unsigned int))
72 return;
73
74 nvram_size = *nbytes_p;
75
76 printk(KERN_INFO "CHRP nvram contains %u bytes\n", nvram_size);
77 of_node_put(nvram);
78
79 ppc_md.nvram_read_val = chrp_nvram_read;
80 ppc_md.nvram_write_val = chrp_nvram_write;
81
82 return;
83}
diff --git a/arch/ppc/platforms/chrp_pci.c b/arch/ppc/platforms/chrp_pci.c
index 7d3fbb5c5db2..bd047aac01b1 100644
--- a/arch/ppc/platforms/chrp_pci.c
+++ b/arch/ppc/platforms/chrp_pci.c
@@ -29,7 +29,7 @@ void __iomem *gg2_pci_config_base;
29 * limit the bus number to 3 bits 29 * limit the bus number to 3 bits
30 */ 30 */
31 31
32int __chrp gg2_read_config(struct pci_bus *bus, unsigned int devfn, int off, 32int gg2_read_config(struct pci_bus *bus, unsigned int devfn, int off,
33 int len, u32 *val) 33 int len, u32 *val)
34{ 34{
35 volatile void __iomem *cfg_data; 35 volatile void __iomem *cfg_data;
@@ -56,7 +56,7 @@ int __chrp gg2_read_config(struct pci_bus *bus, unsigned int devfn, int off,
56 return PCIBIOS_SUCCESSFUL; 56 return PCIBIOS_SUCCESSFUL;
57} 57}
58 58
59int __chrp gg2_write_config(struct pci_bus *bus, unsigned int devfn, int off, 59int gg2_write_config(struct pci_bus *bus, unsigned int devfn, int off,
60 int len, u32 val) 60 int len, u32 val)
61{ 61{
62 volatile void __iomem *cfg_data; 62 volatile void __iomem *cfg_data;
@@ -92,7 +92,7 @@ static struct pci_ops gg2_pci_ops =
92/* 92/*
93 * Access functions for PCI config space using RTAS calls. 93 * Access functions for PCI config space using RTAS calls.
94 */ 94 */
95int __chrp 95int
96rtas_read_config(struct pci_bus *bus, unsigned int devfn, int offset, 96rtas_read_config(struct pci_bus *bus, unsigned int devfn, int offset,
97 int len, u32 *val) 97 int len, u32 *val)
98{ 98{
@@ -108,7 +108,7 @@ rtas_read_config(struct pci_bus *bus, unsigned int devfn, int offset,
108 return rval? PCIBIOS_DEVICE_NOT_FOUND: PCIBIOS_SUCCESSFUL; 108 return rval? PCIBIOS_DEVICE_NOT_FOUND: PCIBIOS_SUCCESSFUL;
109} 109}
110 110
111int __chrp 111int
112rtas_write_config(struct pci_bus *bus, unsigned int devfn, int offset, 112rtas_write_config(struct pci_bus *bus, unsigned int devfn, int offset,
113 int len, u32 val) 113 int len, u32 val)
114{ 114{
@@ -203,7 +203,7 @@ static void __init setup_peg2(struct pci_controller *hose, struct device_node *d
203 printk ("RTAS supporting Pegasos OF not found, please upgrade" 203 printk ("RTAS supporting Pegasos OF not found, please upgrade"
204 " your firmware\n"); 204 " your firmware\n");
205 } 205 }
206 pci_assign_all_busses = 1; 206 pci_assign_all_buses = 1;
207} 207}
208 208
209void __init 209void __init
diff --git a/arch/ppc/platforms/chrp_pegasos_eth.c b/arch/ppc/platforms/chrp_pegasos_eth.c
index cad5bfa153b2..a9052305c35d 100644
--- a/arch/ppc/platforms/chrp_pegasos_eth.c
+++ b/arch/ppc/platforms/chrp_pegasos_eth.c
@@ -17,7 +17,20 @@
17#include <linux/mv643xx.h> 17#include <linux/mv643xx.h>
18#include <linux/pci.h> 18#include <linux/pci.h>
19 19
20/* Pegasos 2 specific Marvell MV 64361 gigabit ethernet port setup */ 20#define PEGASOS2_MARVELL_REGBASE (0xf1000000)
21#define PEGASOS2_MARVELL_REGSIZE (0x00004000)
22#define PEGASOS2_SRAM_BASE (0xf2000000)
23#define PEGASOS2_SRAM_SIZE (256*1024)
24
25#define PEGASOS2_SRAM_BASE_ETH0 (PEGASOS2_SRAM_BASE)
26#define PEGASOS2_SRAM_BASE_ETH1 (PEGASOS2_SRAM_BASE_ETH0 + (PEGASOS2_SRAM_SIZE / 2) )
27
28
29#define PEGASOS2_SRAM_RXRING_SIZE (PEGASOS2_SRAM_SIZE/4)
30#define PEGASOS2_SRAM_TXRING_SIZE (PEGASOS2_SRAM_SIZE/4)
31
32#undef BE_VERBOSE
33
21static struct resource mv643xx_eth_shared_resources[] = { 34static struct resource mv643xx_eth_shared_resources[] = {
22 [0] = { 35 [0] = {
23 .name = "ethernet shared base", 36 .name = "ethernet shared base",
@@ -44,7 +57,16 @@ static struct resource mv643xx_eth0_resources[] = {
44 }, 57 },
45}; 58};
46 59
47static struct mv643xx_eth_platform_data eth0_pd; 60
61static struct mv643xx_eth_platform_data eth0_pd = {
62 .tx_sram_addr = PEGASOS2_SRAM_BASE_ETH0,
63 .tx_sram_size = PEGASOS2_SRAM_TXRING_SIZE,
64 .tx_queue_size = PEGASOS2_SRAM_TXRING_SIZE/16,
65
66 .rx_sram_addr = PEGASOS2_SRAM_BASE_ETH0 + PEGASOS2_SRAM_TXRING_SIZE,
67 .rx_sram_size = PEGASOS2_SRAM_RXRING_SIZE,
68 .rx_queue_size = PEGASOS2_SRAM_RXRING_SIZE/16,
69};
48 70
49static struct platform_device eth0_device = { 71static struct platform_device eth0_device = {
50 .name = MV643XX_ETH_NAME, 72 .name = MV643XX_ETH_NAME,
@@ -65,7 +87,15 @@ static struct resource mv643xx_eth1_resources[] = {
65 }, 87 },
66}; 88};
67 89
68static struct mv643xx_eth_platform_data eth1_pd; 90static struct mv643xx_eth_platform_data eth1_pd = {
91 .tx_sram_addr = PEGASOS2_SRAM_BASE_ETH1,
92 .tx_sram_size = PEGASOS2_SRAM_TXRING_SIZE,
93 .tx_queue_size = PEGASOS2_SRAM_TXRING_SIZE/16,
94
95 .rx_sram_addr = PEGASOS2_SRAM_BASE_ETH1 + PEGASOS2_SRAM_TXRING_SIZE,
96 .rx_sram_size = PEGASOS2_SRAM_RXRING_SIZE,
97 .rx_queue_size = PEGASOS2_SRAM_RXRING_SIZE/16,
98};
69 99
70static struct platform_device eth1_device = { 100static struct platform_device eth1_device = {
71 .name = MV643XX_ETH_NAME, 101 .name = MV643XX_ETH_NAME,
@@ -83,9 +113,62 @@ static struct platform_device *mv643xx_eth_pd_devs[] __initdata = {
83 &eth1_device, 113 &eth1_device,
84}; 114};
85 115
116/***********/
117/***********/
118#define MV_READ(offset,val) { val = readl(mv643xx_reg_base + offset); }
119#define MV_WRITE(offset,data) writel(data, mv643xx_reg_base + offset)
120
121static void __iomem *mv643xx_reg_base;
122
123static int Enable_SRAM(void)
124{
125 u32 ALong;
126
127 if (mv643xx_reg_base == NULL)
128 mv643xx_reg_base = ioremap(PEGASOS2_MARVELL_REGBASE,
129 PEGASOS2_MARVELL_REGSIZE);
130
131 if (mv643xx_reg_base == NULL)
132 return -ENOMEM;
133
134#ifdef BE_VERBOSE
135 printk("Pegasos II/Marvell MV64361: register remapped from %p to %p\n",
136 (void *)PEGASOS2_MARVELL_REGBASE, (void *)mv643xx_reg_base);
137#endif
138
139 MV_WRITE(MV64340_SRAM_CONFIG, 0);
86 140
87int 141 MV_WRITE(MV64340_INTEGRATED_SRAM_BASE_ADDR, PEGASOS2_SRAM_BASE >> 16);
88mv643xx_eth_add_pds(void) 142
143 MV_READ(MV64340_BASE_ADDR_ENABLE, ALong);
144 ALong &= ~(1 << 19);
145 MV_WRITE(MV64340_BASE_ADDR_ENABLE, ALong);
146
147 ALong = 0x02;
148 ALong |= PEGASOS2_SRAM_BASE & 0xffff0000;
149 MV_WRITE(MV643XX_ETH_BAR_4, ALong);
150
151 MV_WRITE(MV643XX_ETH_SIZE_REG_4, (PEGASOS2_SRAM_SIZE-1) & 0xffff0000);
152
153 MV_READ(MV643XX_ETH_BASE_ADDR_ENABLE_REG, ALong);
154 ALong &= ~(1 << 4);
155 MV_WRITE(MV643XX_ETH_BASE_ADDR_ENABLE_REG, ALong);
156
157#ifdef BE_VERBOSE
158 printk("Pegasos II/Marvell MV64361: register unmapped\n");
159 printk("Pegasos II/Marvell MV64361: SRAM at %p, size=%x\n", (void*) PEGASOS2_SRAM_BASE, PEGASOS2_SRAM_SIZE);
160#endif
161
162 iounmap(mv643xx_reg_base);
163 mv643xx_reg_base = NULL;
164
165 return 1;
166}
167
168
169/***********/
170/***********/
171int mv643xx_eth_add_pds(void)
89{ 172{
90 int ret = 0; 173 int ret = 0;
91 static struct pci_device_id pci_marvell_mv64360[] = { 174 static struct pci_device_id pci_marvell_mv64360[] = {
@@ -93,9 +176,38 @@ mv643xx_eth_add_pds(void)
93 { } 176 { }
94 }; 177 };
95 178
179#ifdef BE_VERBOSE
180 printk("Pegasos II/Marvell MV64361: init\n");
181#endif
182
96 if (pci_dev_present(pci_marvell_mv64360)) { 183 if (pci_dev_present(pci_marvell_mv64360)) {
97 ret = platform_add_devices(mv643xx_eth_pd_devs, ARRAY_SIZE(mv643xx_eth_pd_devs)); 184 ret = platform_add_devices(mv643xx_eth_pd_devs,
185 ARRAY_SIZE(mv643xx_eth_pd_devs));
186
187 if ( Enable_SRAM() < 0)
188 {
189 eth0_pd.tx_sram_addr = 0;
190 eth0_pd.tx_sram_size = 0;
191 eth0_pd.rx_sram_addr = 0;
192 eth0_pd.rx_sram_size = 0;
193
194 eth1_pd.tx_sram_addr = 0;
195 eth1_pd.tx_sram_size = 0;
196 eth1_pd.rx_sram_addr = 0;
197 eth1_pd.rx_sram_size = 0;
198
199#ifdef BE_VERBOSE
200 printk("Pegasos II/Marvell MV64361: Can't enable the "
201 "SRAM\n");
202#endif
203 }
98 } 204 }
205
206#ifdef BE_VERBOSE
207 printk("Pegasos II/Marvell MV64361: init is over\n");
208#endif
209
99 return ret; 210 return ret;
100} 211}
212
101device_initcall(mv643xx_eth_add_pds); 213device_initcall(mv643xx_eth_add_pds);
diff --git a/arch/ppc/platforms/chrp_setup.c b/arch/ppc/platforms/chrp_setup.c
index 66346f0de7ec..f1b70ab3c6fd 100644
--- a/arch/ppc/platforms/chrp_setup.c
+++ b/arch/ppc/platforms/chrp_setup.c
@@ -104,7 +104,7 @@ static const char *gg2_cachemodes[4] = {
104 "Disabled", "Write-Through", "Copy-Back", "Transparent Mode" 104 "Disabled", "Write-Through", "Copy-Back", "Transparent Mode"
105}; 105};
106 106
107int __chrp 107int
108chrp_show_cpuinfo(struct seq_file *m) 108chrp_show_cpuinfo(struct seq_file *m)
109{ 109{
110 int i, sdramen; 110 int i, sdramen;
@@ -302,7 +302,7 @@ void __init chrp_setup_arch(void)
302 pci_create_OF_bus_map(); 302 pci_create_OF_bus_map();
303} 303}
304 304
305void __chrp 305void
306chrp_event_scan(void) 306chrp_event_scan(void)
307{ 307{
308 unsigned char log[1024]; 308 unsigned char log[1024];
@@ -313,7 +313,7 @@ chrp_event_scan(void)
313 ppc_md.heartbeat_count = ppc_md.heartbeat_reset; 313 ppc_md.heartbeat_count = ppc_md.heartbeat_reset;
314} 314}
315 315
316void __chrp 316void
317chrp_restart(char *cmd) 317chrp_restart(char *cmd)
318{ 318{
319 printk("RTAS system-reboot returned %d\n", 319 printk("RTAS system-reboot returned %d\n",
@@ -321,7 +321,7 @@ chrp_restart(char *cmd)
321 for (;;); 321 for (;;);
322} 322}
323 323
324void __chrp 324void
325chrp_power_off(void) 325chrp_power_off(void)
326{ 326{
327 /* allow power on only with power button press */ 327 /* allow power on only with power button press */
@@ -330,20 +330,12 @@ chrp_power_off(void)
330 for (;;); 330 for (;;);
331} 331}
332 332
333void __chrp 333void
334chrp_halt(void) 334chrp_halt(void)
335{ 335{
336 chrp_power_off(); 336 chrp_power_off();
337} 337}
338 338
339u_int __chrp
340chrp_irq_canonicalize(u_int irq)
341{
342 if (irq == 2)
343 return 9;
344 return irq;
345}
346
347/* 339/*
348 * Finds the open-pic node and sets OpenPIC_Addr based on its reg property. 340 * Finds the open-pic node and sets OpenPIC_Addr based on its reg property.
349 * Then checks if it has an interrupt-ranges property. If it does then 341 * Then checks if it has an interrupt-ranges property. If it does then
@@ -444,9 +436,7 @@ void __init chrp_init_IRQ(void)
444 i8259_irq); 436 i8259_irq);
445 437
446 } 438 }
447 for (i = 0; i < NUM_8259_INTERRUPTS; i++) 439 i8259_init(chrp_int_ack, 0);
448 irq_desc[i].handler = &i8259_pic;
449 i8259_init(chrp_int_ack);
450 440
451#if defined(CONFIG_VT) && defined(CONFIG_INPUT_ADBHID) && defined(XMON) 441#if defined(CONFIG_VT) && defined(CONFIG_INPUT_ADBHID) && defined(XMON)
452 /* see if there is a keyboard in the device tree 442 /* see if there is a keyboard in the device tree
@@ -464,8 +454,7 @@ void __init
464chrp_init2(void) 454chrp_init2(void)
465{ 455{
466#ifdef CONFIG_NVRAM 456#ifdef CONFIG_NVRAM
467// XX replace this in a more saner way 457 chrp_nvram_init();
468// pmac_nvram_init();
469#endif 458#endif
470 459
471 request_region(0x20,0x20,"pic1"); 460 request_region(0x20,0x20,"pic1");
@@ -499,6 +488,7 @@ chrp_init(unsigned long r3, unsigned long r4, unsigned long r5,
499 DMA_MODE_READ = 0x44; 488 DMA_MODE_READ = 0x44;
500 DMA_MODE_WRITE = 0x48; 489 DMA_MODE_WRITE = 0x48;
501 isa_io_base = CHRP_ISA_IO_BASE; /* default value */ 490 isa_io_base = CHRP_ISA_IO_BASE; /* default value */
491 ppc_do_canonicalize_irqs = 1;
502 492
503 if (root) 493 if (root)
504 machine = get_property(root, "model", NULL); 494 machine = get_property(root, "model", NULL);
@@ -517,7 +507,6 @@ chrp_init(unsigned long r3, unsigned long r4, unsigned long r5,
517 ppc_md.show_percpuinfo = of_show_percpuinfo; 507 ppc_md.show_percpuinfo = of_show_percpuinfo;
518 ppc_md.show_cpuinfo = chrp_show_cpuinfo; 508 ppc_md.show_cpuinfo = chrp_show_cpuinfo;
519 509
520 ppc_md.irq_canonicalize = chrp_irq_canonicalize;
521 ppc_md.init_IRQ = chrp_init_IRQ; 510 ppc_md.init_IRQ = chrp_init_IRQ;
522 if (_chrp_type == _CHRP_Pegasos) 511 if (_chrp_type == _CHRP_Pegasos)
523 ppc_md.get_irq = i8259_irq; 512 ppc_md.get_irq = i8259_irq;
@@ -561,7 +550,7 @@ chrp_init(unsigned long r3, unsigned long r4, unsigned long r5,
561#endif 550#endif
562 551
563#ifdef CONFIG_SMP 552#ifdef CONFIG_SMP
564 ppc_md.smp_ops = &chrp_smp_ops; 553 smp_ops = &chrp_smp_ops;
565#endif /* CONFIG_SMP */ 554#endif /* CONFIG_SMP */
566 555
567 /* 556 /*
@@ -571,7 +560,7 @@ chrp_init(unsigned long r3, unsigned long r4, unsigned long r5,
571 if (ppc_md.progress) ppc_md.progress("Linux/PPC "UTS_RELEASE"\n", 0x0); 560 if (ppc_md.progress) ppc_md.progress("Linux/PPC "UTS_RELEASE"\n", 0x0);
572} 561}
573 562
574void __chrp 563void
575rtas_display_progress(char *s, unsigned short hex) 564rtas_display_progress(char *s, unsigned short hex)
576{ 565{
577 int width; 566 int width;
@@ -598,7 +587,7 @@ rtas_display_progress(char *s, unsigned short hex)
598 call_rtas( "display-character", 1, 1, NULL, ' ' ); 587 call_rtas( "display-character", 1, 1, NULL, ' ' );
599} 588}
600 589
601void __chrp 590void
602rtas_indicator_progress(char *s, unsigned short hex) 591rtas_indicator_progress(char *s, unsigned short hex)
603{ 592{
604 call_rtas("set-indicator", 3, 1, NULL, 6, 0, hex); 593 call_rtas("set-indicator", 3, 1, NULL, 6, 0, hex);
diff --git a/arch/ppc/platforms/chrp_smp.c b/arch/ppc/platforms/chrp_smp.c
index 0ea1f7d9e46a..97e539557ecb 100644
--- a/arch/ppc/platforms/chrp_smp.c
+++ b/arch/ppc/platforms/chrp_smp.c
@@ -31,6 +31,7 @@
31#include <asm/residual.h> 31#include <asm/residual.h>
32#include <asm/time.h> 32#include <asm/time.h>
33#include <asm/open_pic.h> 33#include <asm/open_pic.h>
34#include <asm/machdep.h>
34 35
35extern unsigned long smp_chrp_cpu_nr; 36extern unsigned long smp_chrp_cpu_nr;
36 37
@@ -88,7 +89,7 @@ smp_chrp_take_timebase(void)
88} 89}
89 90
90/* CHRP with openpic */ 91/* CHRP with openpic */
91struct smp_ops_t chrp_smp_ops __chrpdata = { 92struct smp_ops_t chrp_smp_ops = {
92 .message_pass = smp_openpic_message_pass, 93 .message_pass = smp_openpic_message_pass,
93 .probe = smp_chrp_probe, 94 .probe = smp_chrp_probe,
94 .kick_cpu = smp_chrp_kick_cpu, 95 .kick_cpu = smp_chrp_kick_cpu,
diff --git a/arch/ppc/platforms/chrp_time.c b/arch/ppc/platforms/chrp_time.c
index 6037ce7796f5..29d074c305f0 100644
--- a/arch/ppc/platforms/chrp_time.c
+++ b/arch/ppc/platforms/chrp_time.c
@@ -52,7 +52,7 @@ long __init chrp_time_init(void)
52 return 0; 52 return 0;
53} 53}
54 54
55int __chrp chrp_cmos_clock_read(int addr) 55int chrp_cmos_clock_read(int addr)
56{ 56{
57 if (nvram_as1 != 0) 57 if (nvram_as1 != 0)
58 outb(addr>>8, nvram_as1); 58 outb(addr>>8, nvram_as1);
@@ -60,7 +60,7 @@ int __chrp chrp_cmos_clock_read(int addr)
60 return (inb(nvram_data)); 60 return (inb(nvram_data));
61} 61}
62 62
63void __chrp chrp_cmos_clock_write(unsigned long val, int addr) 63void chrp_cmos_clock_write(unsigned long val, int addr)
64{ 64{
65 if (nvram_as1 != 0) 65 if (nvram_as1 != 0)
66 outb(addr>>8, nvram_as1); 66 outb(addr>>8, nvram_as1);
@@ -72,7 +72,7 @@ void __chrp chrp_cmos_clock_write(unsigned long val, int addr)
72/* 72/*
73 * Set the hardware clock. -- Cort 73 * Set the hardware clock. -- Cort
74 */ 74 */
75int __chrp chrp_set_rtc_time(unsigned long nowtime) 75int chrp_set_rtc_time(unsigned long nowtime)
76{ 76{
77 unsigned char save_control, save_freq_select; 77 unsigned char save_control, save_freq_select;
78 struct rtc_time tm; 78 struct rtc_time tm;
@@ -118,7 +118,7 @@ int __chrp chrp_set_rtc_time(unsigned long nowtime)
118 return 0; 118 return 0;
119} 119}
120 120
121unsigned long __chrp chrp_get_rtc_time(void) 121unsigned long chrp_get_rtc_time(void)
122{ 122{
123 unsigned int year, mon, day, hour, min, sec; 123 unsigned int year, mon, day, hour, min, sec;
124 int uip, i; 124 int uip, i;
diff --git a/arch/ppc/platforms/ev64360.c b/arch/ppc/platforms/ev64360.c
index 9811a8a52c25..53388a1c334f 100644
--- a/arch/ppc/platforms/ev64360.c
+++ b/arch/ppc/platforms/ev64360.c
@@ -35,6 +35,7 @@
35#include <asm/bootinfo.h> 35#include <asm/bootinfo.h>
36#include <asm/ppcboot.h> 36#include <asm/ppcboot.h>
37#include <asm/mv64x60.h> 37#include <asm/mv64x60.h>
38#include <asm/machdep.h>
38#include <platforms/ev64360.h> 39#include <platforms/ev64360.h>
39 40
40#define BOARD_VENDOR "Marvell" 41#define BOARD_VENDOR "Marvell"
diff --git a/arch/ppc/platforms/fads.h b/arch/ppc/platforms/fads.h
index b60c56450b67..a48fb8d723e4 100644
--- a/arch/ppc/platforms/fads.h
+++ b/arch/ppc/platforms/fads.h
@@ -25,6 +25,8 @@
25 25
26#if defined(CONFIG_MPC86XADS) 26#if defined(CONFIG_MPC86XADS)
27 27
28#define BOARD_CHIP_NAME "MPC86X"
29
28/* U-Boot maps BCSR to 0xff080000 */ 30/* U-Boot maps BCSR to 0xff080000 */
29#define BCSR_ADDR ((uint)0xff080000) 31#define BCSR_ADDR ((uint)0xff080000)
30 32
diff --git a/arch/ppc/platforms/gemini_setup.c b/arch/ppc/platforms/gemini_setup.c
index 3a5ff9fb71d6..729897c59033 100644
--- a/arch/ppc/platforms/gemini_setup.c
+++ b/arch/ppc/platforms/gemini_setup.c
@@ -35,6 +35,7 @@
35#include <asm/time.h> 35#include <asm/time.h>
36#include <asm/open_pic.h> 36#include <asm/open_pic.h>
37#include <asm/bootinfo.h> 37#include <asm/bootinfo.h>
38#include <asm/machdep.h>
38 39
39void gemini_find_bridges(void); 40void gemini_find_bridges(void);
40static int gemini_get_clock_speed(void); 41static int gemini_get_clock_speed(void);
@@ -555,7 +556,6 @@ void __init platform_init(unsigned long r3, unsigned long r4, unsigned long r5,
555 556
556 ppc_md.setup_arch = gemini_setup_arch; 557 ppc_md.setup_arch = gemini_setup_arch;
557 ppc_md.show_cpuinfo = gemini_show_cpuinfo; 558 ppc_md.show_cpuinfo = gemini_show_cpuinfo;
558 ppc_md.irq_canonicalize = NULL;
559 ppc_md.init_IRQ = gemini_init_IRQ; 559 ppc_md.init_IRQ = gemini_init_IRQ;
560 ppc_md.get_irq = openpic_get_irq; 560 ppc_md.get_irq = openpic_get_irq;
561 ppc_md.init = NULL; 561 ppc_md.init = NULL;
@@ -575,6 +575,6 @@ void __init platform_init(unsigned long r3, unsigned long r4, unsigned long r5,
575 ppc_md.pcibios_fixup_bus = gemini_pcibios_fixup; 575 ppc_md.pcibios_fixup_bus = gemini_pcibios_fixup;
576 576
577#ifdef CONFIG_SMP 577#ifdef CONFIG_SMP
578 ppc_md.smp_ops = &gemini_smp_ops; 578 smp_ops = &gemini_smp_ops;
579#endif /* CONFIG_SMP */ 579#endif /* CONFIG_SMP */
580} 580}
diff --git a/arch/ppc/platforms/hdpu.c b/arch/ppc/platforms/hdpu.c
index ff3796860123..b6a66d5e9d83 100644
--- a/arch/ppc/platforms/hdpu.c
+++ b/arch/ppc/platforms/hdpu.c
@@ -609,11 +609,6 @@ static void parse_bootinfo(unsigned long r3,
609} 609}
610 610
611#if defined(CONFIG_BLK_DEV_IDE) || defined(CONFIG_BLK_DEV_IDE_MODULE) 611#if defined(CONFIG_BLK_DEV_IDE) || defined(CONFIG_BLK_DEV_IDE_MODULE)
612static int hdpu_ide_check_region(ide_ioreg_t from, unsigned int extent)
613{
614 return check_region(from, extent);
615}
616
617static void 612static void
618hdpu_ide_request_region(ide_ioreg_t from, unsigned int extent, const char *name) 613hdpu_ide_request_region(ide_ioreg_t from, unsigned int extent, const char *name)
619{ 614{
@@ -753,7 +748,7 @@ static int smp_hdpu_probe(void)
753} 748}
754 749
755static void 750static void
756smp_hdpu_message_pass(int target, int msg, unsigned long data, int wait) 751smp_hdpu_message_pass(int target, int msg)
757{ 752{
758 if (msg > 0x3) { 753 if (msg > 0x3) {
759 printk("SMP %d: smp_message_pass: unknown msg %d\n", 754 printk("SMP %d: smp_message_pass: unknown msg %d\n",
@@ -949,7 +944,7 @@ platform_init(unsigned long r3, unsigned long r4, unsigned long r5,
949#endif /* CONFIG_SERIAL_TEXT_DEBUG */ 944#endif /* CONFIG_SERIAL_TEXT_DEBUG */
950 945
951#ifdef CONFIG_SMP 946#ifdef CONFIG_SMP
952 ppc_md.smp_ops = &hdpu_smp_ops; 947 smp_ops = &hdpu_smp_ops;
953#endif /* CONFIG_SMP */ 948#endif /* CONFIG_SMP */
954 949
955#if defined(CONFIG_SERIAL_MPSC) || defined(CONFIG_MV643XX_ETH) 950#if defined(CONFIG_SERIAL_MPSC) || defined(CONFIG_MV643XX_ETH)
diff --git a/arch/ppc/platforms/katana.c b/arch/ppc/platforms/katana.c
index 2b53afae0e9c..a301c5ac58dd 100644
--- a/arch/ppc/platforms/katana.c
+++ b/arch/ppc/platforms/katana.c
@@ -42,6 +42,7 @@
42#include <asm/ppcboot.h> 42#include <asm/ppcboot.h>
43#include <asm/mv64x60.h> 43#include <asm/mv64x60.h>
44#include <platforms/katana.h> 44#include <platforms/katana.h>
45#include <asm/machdep.h>
45 46
46static struct mv64x60_handle bh; 47static struct mv64x60_handle bh;
47static katana_id_t katana_id; 48static katana_id_t katana_id;
@@ -520,7 +521,7 @@ katana_fixup_resources(struct pci_dev *dev)
520{ 521{
521 u16 v16; 522 u16 v16;
522 523
523 pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, L1_CACHE_LINE_SIZE>>2); 524 pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, L1_CACHE_BYTES>>2);
524 525
525 pci_read_config_word(dev, PCI_COMMAND, &v16); 526 pci_read_config_word(dev, PCI_COMMAND, &v16);
526 v16 |= PCI_COMMAND_INVALIDATE | PCI_COMMAND_FAST_BACK; 527 v16 |= PCI_COMMAND_INVALIDATE | PCI_COMMAND_FAST_BACK;
diff --git a/arch/ppc/platforms/lite5200.c b/arch/ppc/platforms/lite5200.c
index b604cf8b3cae..d44cc991179f 100644
--- a/arch/ppc/platforms/lite5200.c
+++ b/arch/ppc/platforms/lite5200.c
@@ -35,6 +35,7 @@
35#include <asm/io.h> 35#include <asm/io.h>
36#include <asm/mpc52xx.h> 36#include <asm/mpc52xx.h>
37#include <asm/ppc_sys.h> 37#include <asm/ppc_sys.h>
38#include <asm/machdep.h>
38 39
39#include <syslib/mpc52xx_pci.h> 40#include <syslib/mpc52xx_pci.h>
40 41
diff --git a/arch/ppc/platforms/lopec.c b/arch/ppc/platforms/lopec.c
index a5569525e0af..06d247c23b82 100644
--- a/arch/ppc/platforms/lopec.c
+++ b/arch/ppc/platforms/lopec.c
@@ -144,15 +144,6 @@ lopec_show_cpuinfo(struct seq_file *m)
144 return 0; 144 return 0;
145} 145}
146 146
147static u32
148lopec_irq_canonicalize(u32 irq)
149{
150 if (irq == 2)
151 return 9;
152 else
153 return irq;
154}
155
156static void 147static void
157lopec_restart(char *cmd) 148lopec_restart(char *cmd)
158{ 149{
@@ -276,15 +267,11 @@ lopec_init_IRQ(void)
276 openpic_hookup_cascade(NUM_8259_INTERRUPTS, "82c59 cascade", 267 openpic_hookup_cascade(NUM_8259_INTERRUPTS, "82c59 cascade",
277 &i8259_irq); 268 &i8259_irq);
278 269
279 /* Map i8259 interrupts */
280 for(i = 0; i < NUM_8259_INTERRUPTS; i++)
281 irq_desc[i].handler = &i8259_pic;
282
283 /* 270 /*
284 * The EPIC allows for a read in the range of 0xFEF00000 -> 271 * The EPIC allows for a read in the range of 0xFEF00000 ->
285 * 0xFEFFFFFF to generate a PCI interrupt-acknowledge transaction. 272 * 0xFEFFFFFF to generate a PCI interrupt-acknowledge transaction.
286 */ 273 */
287 i8259_init(0xfef00000); 274 i8259_init(0xfef00000, 0);
288} 275}
289 276
290static int __init 277static int __init
@@ -379,10 +366,10 @@ platform_init(unsigned long r3, unsigned long r4, unsigned long r5,
379 ISA_DMA_THRESHOLD = 0x00ffffff; 366 ISA_DMA_THRESHOLD = 0x00ffffff;
380 DMA_MODE_READ = 0x44; 367 DMA_MODE_READ = 0x44;
381 DMA_MODE_WRITE = 0x48; 368 DMA_MODE_WRITE = 0x48;
369 ppc_do_canonicalize_irqs = 1;
382 370
383 ppc_md.setup_arch = lopec_setup_arch; 371 ppc_md.setup_arch = lopec_setup_arch;
384 ppc_md.show_cpuinfo = lopec_show_cpuinfo; 372 ppc_md.show_cpuinfo = lopec_show_cpuinfo;
385 ppc_md.irq_canonicalize = lopec_irq_canonicalize;
386 ppc_md.init_IRQ = lopec_init_IRQ; 373 ppc_md.init_IRQ = lopec_init_IRQ;
387 ppc_md.get_irq = openpic_get_irq; 374 ppc_md.get_irq = openpic_get_irq;
388 375
diff --git a/arch/ppc/platforms/mpc885ads.h b/arch/ppc/platforms/mpc885ads.h
index eb386635b0fd..a80b7d116b49 100644
--- a/arch/ppc/platforms/mpc885ads.h
+++ b/arch/ppc/platforms/mpc885ads.h
@@ -88,5 +88,7 @@
88#define SICR_ENET_MASK ((uint)0x00ff0000) 88#define SICR_ENET_MASK ((uint)0x00ff0000)
89#define SICR_ENET_CLKRT ((uint)0x002c0000) 89#define SICR_ENET_CLKRT ((uint)0x002c0000)
90 90
91#define BOARD_CHIP_NAME "MPC885"
92
91#endif /* __ASM_MPC885ADS_H__ */ 93#endif /* __ASM_MPC885ADS_H__ */
92#endif /* __KERNEL__ */ 94#endif /* __KERNEL__ */
diff --git a/arch/ppc/platforms/mvme5100.c b/arch/ppc/platforms/mvme5100.c
index ce2ce88c8033..108eb182dddc 100644
--- a/arch/ppc/platforms/mvme5100.c
+++ b/arch/ppc/platforms/mvme5100.c
@@ -223,11 +223,7 @@ mvme5100_init_IRQ(void)
223 openpic_hookup_cascade(NUM_8259_INTERRUPTS, "82c59 cascade", 223 openpic_hookup_cascade(NUM_8259_INTERRUPTS, "82c59 cascade",
224 &i8259_irq); 224 &i8259_irq);
225 225
226 /* Map i8259 interrupts. */ 226 i8259_init(0, 0);
227 for (i = 0; i < NUM_8259_INTERRUPTS; i++)
228 irq_desc[i].handler = &i8259_pic;
229
230 i8259_init(0);
231#else 227#else
232 openpic_init(0); 228 openpic_init(0);
233#endif 229#endif
diff --git a/arch/ppc/platforms/pal4_setup.c b/arch/ppc/platforms/pal4_setup.c
index 12446b93e38c..f93a3f871932 100644
--- a/arch/ppc/platforms/pal4_setup.c
+++ b/arch/ppc/platforms/pal4_setup.c
@@ -28,6 +28,7 @@
28#include <asm/io.h> 28#include <asm/io.h>
29#include <asm/todc.h> 29#include <asm/todc.h>
30#include <asm/bootinfo.h> 30#include <asm/bootinfo.h>
31#include <asm/machdep.h>
31 32
32#include <syslib/cpc700.h> 33#include <syslib/cpc700.h>
33 34
diff --git a/arch/ppc/platforms/pmac_backlight.c b/arch/ppc/platforms/pmac_backlight.c
index ed2b1cebc19a..8be2f7d071f0 100644
--- a/arch/ppc/platforms/pmac_backlight.c
+++ b/arch/ppc/platforms/pmac_backlight.c
@@ -37,7 +37,7 @@ static int backlight_req_enable = -1;
37static void backlight_callback(void *); 37static void backlight_callback(void *);
38static DECLARE_WORK(backlight_work, backlight_callback, NULL); 38static DECLARE_WORK(backlight_work, backlight_callback, NULL);
39 39
40void __pmac register_backlight_controller(struct backlight_controller *ctrler, 40void register_backlight_controller(struct backlight_controller *ctrler,
41 void *data, char *type) 41 void *data, char *type)
42{ 42{
43 struct device_node* bk_node; 43 struct device_node* bk_node;
@@ -99,7 +99,7 @@ void __pmac register_backlight_controller(struct backlight_controller *ctrler,
99} 99}
100EXPORT_SYMBOL(register_backlight_controller); 100EXPORT_SYMBOL(register_backlight_controller);
101 101
102void __pmac unregister_backlight_controller(struct backlight_controller 102void unregister_backlight_controller(struct backlight_controller
103 *ctrler, void *data) 103 *ctrler, void *data)
104{ 104{
105 /* We keep the current backlight level (for now) */ 105 /* We keep the current backlight level (for now) */
@@ -108,7 +108,7 @@ void __pmac unregister_backlight_controller(struct backlight_controller
108} 108}
109EXPORT_SYMBOL(unregister_backlight_controller); 109EXPORT_SYMBOL(unregister_backlight_controller);
110 110
111static int __pmac __set_backlight_enable(int enable) 111static int __set_backlight_enable(int enable)
112{ 112{
113 int rc; 113 int rc;
114 114
@@ -122,7 +122,7 @@ static int __pmac __set_backlight_enable(int enable)
122 release_console_sem(); 122 release_console_sem();
123 return rc; 123 return rc;
124} 124}
125int __pmac set_backlight_enable(int enable) 125int set_backlight_enable(int enable)
126{ 126{
127 if (!backlighter) 127 if (!backlighter)
128 return -ENODEV; 128 return -ENODEV;
@@ -133,7 +133,7 @@ int __pmac set_backlight_enable(int enable)
133 133
134EXPORT_SYMBOL(set_backlight_enable); 134EXPORT_SYMBOL(set_backlight_enable);
135 135
136int __pmac get_backlight_enable(void) 136int get_backlight_enable(void)
137{ 137{
138 if (!backlighter) 138 if (!backlighter)
139 return -ENODEV; 139 return -ENODEV;
@@ -141,7 +141,7 @@ int __pmac get_backlight_enable(void)
141} 141}
142EXPORT_SYMBOL(get_backlight_enable); 142EXPORT_SYMBOL(get_backlight_enable);
143 143
144static int __pmac __set_backlight_level(int level) 144static int __set_backlight_level(int level)
145{ 145{
146 int rc = 0; 146 int rc = 0;
147 147
@@ -165,7 +165,7 @@ static int __pmac __set_backlight_level(int level)
165 } 165 }
166 return rc; 166 return rc;
167} 167}
168int __pmac set_backlight_level(int level) 168int set_backlight_level(int level)
169{ 169{
170 if (!backlighter) 170 if (!backlighter)
171 return -ENODEV; 171 return -ENODEV;
@@ -176,7 +176,7 @@ int __pmac set_backlight_level(int level)
176 176
177EXPORT_SYMBOL(set_backlight_level); 177EXPORT_SYMBOL(set_backlight_level);
178 178
179int __pmac get_backlight_level(void) 179int get_backlight_level(void)
180{ 180{
181 if (!backlighter) 181 if (!backlighter)
182 return -ENODEV; 182 return -ENODEV;
diff --git a/arch/ppc/platforms/pmac_cpufreq.c b/arch/ppc/platforms/pmac_cpufreq.c
index d4bc5f67ec53..fba7e4d7c0bf 100644
--- a/arch/ppc/platforms/pmac_cpufreq.c
+++ b/arch/ppc/platforms/pmac_cpufreq.c
@@ -136,7 +136,7 @@ static inline void debug_calc_bogomips(void)
136 136
137/* Switch CPU speed under 750FX CPU control 137/* Switch CPU speed under 750FX CPU control
138 */ 138 */
139static int __pmac cpu_750fx_cpu_speed(int low_speed) 139static int cpu_750fx_cpu_speed(int low_speed)
140{ 140{
141 u32 hid2; 141 u32 hid2;
142 142
@@ -172,7 +172,7 @@ static int __pmac cpu_750fx_cpu_speed(int low_speed)
172 return 0; 172 return 0;
173} 173}
174 174
175static unsigned int __pmac cpu_750fx_get_cpu_speed(void) 175static unsigned int cpu_750fx_get_cpu_speed(void)
176{ 176{
177 if (mfspr(SPRN_HID1) & HID1_PS) 177 if (mfspr(SPRN_HID1) & HID1_PS)
178 return low_freq; 178 return low_freq;
@@ -181,7 +181,7 @@ static unsigned int __pmac cpu_750fx_get_cpu_speed(void)
181} 181}
182 182
183/* Switch CPU speed using DFS */ 183/* Switch CPU speed using DFS */
184static int __pmac dfs_set_cpu_speed(int low_speed) 184static int dfs_set_cpu_speed(int low_speed)
185{ 185{
186 if (low_speed == 0) { 186 if (low_speed == 0) {
187 /* ramping up, set voltage first */ 187 /* ramping up, set voltage first */
@@ -205,7 +205,7 @@ static int __pmac dfs_set_cpu_speed(int low_speed)
205 return 0; 205 return 0;
206} 206}
207 207
208static unsigned int __pmac dfs_get_cpu_speed(void) 208static unsigned int dfs_get_cpu_speed(void)
209{ 209{
210 if (mfspr(SPRN_HID1) & HID1_DFS) 210 if (mfspr(SPRN_HID1) & HID1_DFS)
211 return low_freq; 211 return low_freq;
@@ -216,7 +216,7 @@ static unsigned int __pmac dfs_get_cpu_speed(void)
216 216
217/* Switch CPU speed using slewing GPIOs 217/* Switch CPU speed using slewing GPIOs
218 */ 218 */
219static int __pmac gpios_set_cpu_speed(int low_speed) 219static int gpios_set_cpu_speed(int low_speed)
220{ 220{
221 int gpio, timeout = 0; 221 int gpio, timeout = 0;
222 222
@@ -258,7 +258,7 @@ static int __pmac gpios_set_cpu_speed(int low_speed)
258 258
259/* Switch CPU speed under PMU control 259/* Switch CPU speed under PMU control
260 */ 260 */
261static int __pmac pmu_set_cpu_speed(int low_speed) 261static int pmu_set_cpu_speed(int low_speed)
262{ 262{
263 struct adb_request req; 263 struct adb_request req;
264 unsigned long save_l2cr; 264 unsigned long save_l2cr;
@@ -354,7 +354,7 @@ static int __pmac pmu_set_cpu_speed(int low_speed)
354 return 0; 354 return 0;
355} 355}
356 356
357static int __pmac do_set_cpu_speed(int speed_mode, int notify) 357static int do_set_cpu_speed(int speed_mode, int notify)
358{ 358{
359 struct cpufreq_freqs freqs; 359 struct cpufreq_freqs freqs;
360 unsigned long l3cr; 360 unsigned long l3cr;
@@ -391,17 +391,17 @@ static int __pmac do_set_cpu_speed(int speed_mode, int notify)
391 return 0; 391 return 0;
392} 392}
393 393
394static unsigned int __pmac pmac_cpufreq_get_speed(unsigned int cpu) 394static unsigned int pmac_cpufreq_get_speed(unsigned int cpu)
395{ 395{
396 return cur_freq; 396 return cur_freq;
397} 397}
398 398
399static int __pmac pmac_cpufreq_verify(struct cpufreq_policy *policy) 399static int pmac_cpufreq_verify(struct cpufreq_policy *policy)
400{ 400{
401 return cpufreq_frequency_table_verify(policy, pmac_cpu_freqs); 401 return cpufreq_frequency_table_verify(policy, pmac_cpu_freqs);
402} 402}
403 403
404static int __pmac pmac_cpufreq_target( struct cpufreq_policy *policy, 404static int pmac_cpufreq_target( struct cpufreq_policy *policy,
405 unsigned int target_freq, 405 unsigned int target_freq,
406 unsigned int relation) 406 unsigned int relation)
407{ 407{
@@ -414,13 +414,13 @@ static int __pmac pmac_cpufreq_target( struct cpufreq_policy *policy,
414 return do_set_cpu_speed(newstate, 1); 414 return do_set_cpu_speed(newstate, 1);
415} 415}
416 416
417unsigned int __pmac pmac_get_one_cpufreq(int i) 417unsigned int pmac_get_one_cpufreq(int i)
418{ 418{
419 /* Supports only one CPU for now */ 419 /* Supports only one CPU for now */
420 return (i == 0) ? cur_freq : 0; 420 return (i == 0) ? cur_freq : 0;
421} 421}
422 422
423static int __pmac pmac_cpufreq_cpu_init(struct cpufreq_policy *policy) 423static int pmac_cpufreq_cpu_init(struct cpufreq_policy *policy)
424{ 424{
425 if (policy->cpu != 0) 425 if (policy->cpu != 0)
426 return -ENODEV; 426 return -ENODEV;
@@ -433,7 +433,7 @@ static int __pmac pmac_cpufreq_cpu_init(struct cpufreq_policy *policy)
433 return cpufreq_frequency_table_cpuinfo(policy, pmac_cpu_freqs); 433 return cpufreq_frequency_table_cpuinfo(policy, pmac_cpu_freqs);
434} 434}
435 435
436static u32 __pmac read_gpio(struct device_node *np) 436static u32 read_gpio(struct device_node *np)
437{ 437{
438 u32 *reg = (u32 *)get_property(np, "reg", NULL); 438 u32 *reg = (u32 *)get_property(np, "reg", NULL);
439 u32 offset; 439 u32 offset;
@@ -452,7 +452,7 @@ static u32 __pmac read_gpio(struct device_node *np)
452 return offset; 452 return offset;
453} 453}
454 454
455static int __pmac pmac_cpufreq_suspend(struct cpufreq_policy *policy, pm_message_t pmsg) 455static int pmac_cpufreq_suspend(struct cpufreq_policy *policy, pm_message_t pmsg)
456{ 456{
457 /* Ok, this could be made a bit smarter, but let's be robust for now. We 457 /* Ok, this could be made a bit smarter, but let's be robust for now. We
458 * always force a speed change to high speed before sleep, to make sure 458 * always force a speed change to high speed before sleep, to make sure
@@ -468,7 +468,7 @@ static int __pmac pmac_cpufreq_suspend(struct cpufreq_policy *policy, pm_message
468 return 0; 468 return 0;
469} 469}
470 470
471static int __pmac pmac_cpufreq_resume(struct cpufreq_policy *policy) 471static int pmac_cpufreq_resume(struct cpufreq_policy *policy)
472{ 472{
473 /* If we resume, first check if we have a get() function */ 473 /* If we resume, first check if we have a get() function */
474 if (get_speed_proc) 474 if (get_speed_proc)
@@ -501,7 +501,7 @@ static struct cpufreq_driver pmac_cpufreq_driver = {
501}; 501};
502 502
503 503
504static int __pmac pmac_cpufreq_init_MacRISC3(struct device_node *cpunode) 504static int pmac_cpufreq_init_MacRISC3(struct device_node *cpunode)
505{ 505{
506 struct device_node *volt_gpio_np = of_find_node_by_name(NULL, 506 struct device_node *volt_gpio_np = of_find_node_by_name(NULL,
507 "voltage-gpio"); 507 "voltage-gpio");
@@ -593,7 +593,7 @@ static int __pmac pmac_cpufreq_init_MacRISC3(struct device_node *cpunode)
593 return 0; 593 return 0;
594} 594}
595 595
596static int __pmac pmac_cpufreq_init_7447A(struct device_node *cpunode) 596static int pmac_cpufreq_init_7447A(struct device_node *cpunode)
597{ 597{
598 struct device_node *volt_gpio_np; 598 struct device_node *volt_gpio_np;
599 599
@@ -620,7 +620,7 @@ static int __pmac pmac_cpufreq_init_7447A(struct device_node *cpunode)
620 return 0; 620 return 0;
621} 621}
622 622
623static int __pmac pmac_cpufreq_init_750FX(struct device_node *cpunode) 623static int pmac_cpufreq_init_750FX(struct device_node *cpunode)
624{ 624{
625 struct device_node *volt_gpio_np; 625 struct device_node *volt_gpio_np;
626 u32 pvr, *value; 626 u32 pvr, *value;
diff --git a/arch/ppc/platforms/pmac_feature.c b/arch/ppc/platforms/pmac_feature.c
index dd6d45ae0501..58884a63ebdb 100644
--- a/arch/ppc/platforms/pmac_feature.c
+++ b/arch/ppc/platforms/pmac_feature.c
@@ -63,7 +63,7 @@ extern struct device_node *k2_skiplist[2];
63 * We use a single global lock to protect accesses. Each driver has 63 * We use a single global lock to protect accesses. Each driver has
64 * to take care of its own locking 64 * to take care of its own locking
65 */ 65 */
66static DEFINE_SPINLOCK(feature_lock __pmacdata); 66static DEFINE_SPINLOCK(feature_lock);
67 67
68#define LOCK(flags) spin_lock_irqsave(&feature_lock, flags); 68#define LOCK(flags) spin_lock_irqsave(&feature_lock, flags);
69#define UNLOCK(flags) spin_unlock_irqrestore(&feature_lock, flags); 69#define UNLOCK(flags) spin_unlock_irqrestore(&feature_lock, flags);
@@ -72,9 +72,9 @@ static DEFINE_SPINLOCK(feature_lock __pmacdata);
72/* 72/*
73 * Instance of some macio stuffs 73 * Instance of some macio stuffs
74 */ 74 */
75struct macio_chip macio_chips[MAX_MACIO_CHIPS] __pmacdata; 75struct macio_chip macio_chips[MAX_MACIO_CHIPS];
76 76
77struct macio_chip* __pmac macio_find(struct device_node* child, int type) 77struct macio_chip* macio_find(struct device_node* child, int type)
78{ 78{
79 while(child) { 79 while(child) {
80 int i; 80 int i;
@@ -89,7 +89,7 @@ struct macio_chip* __pmac macio_find(struct device_node* child, int type)
89} 89}
90EXPORT_SYMBOL_GPL(macio_find); 90EXPORT_SYMBOL_GPL(macio_find);
91 91
92static const char* macio_names[] __pmacdata = 92static const char* macio_names[] =
93{ 93{
94 "Unknown", 94 "Unknown",
95 "Grand Central", 95 "Grand Central",
@@ -116,10 +116,10 @@ static const char* macio_names[] __pmacdata =
116#define UN_BIS(r,v) (UN_OUT((r), UN_IN(r) | (v))) 116#define UN_BIS(r,v) (UN_OUT((r), UN_IN(r) | (v)))
117#define UN_BIC(r,v) (UN_OUT((r), UN_IN(r) & ~(v))) 117#define UN_BIC(r,v) (UN_OUT((r), UN_IN(r) & ~(v)))
118 118
119static struct device_node* uninorth_node __pmacdata; 119static struct device_node* uninorth_node;
120static u32 __iomem * uninorth_base __pmacdata; 120static u32 __iomem * uninorth_base;
121static u32 uninorth_rev __pmacdata; 121static u32 uninorth_rev;
122static int uninorth_u3 __pmacdata; 122static int uninorth_u3;
123static void __iomem *u3_ht; 123static void __iomem *u3_ht;
124 124
125/* 125/*
@@ -142,13 +142,13 @@ struct pmac_mb_def
142 struct feature_table_entry* features; 142 struct feature_table_entry* features;
143 unsigned long board_flags; 143 unsigned long board_flags;
144}; 144};
145static struct pmac_mb_def pmac_mb __pmacdata; 145static struct pmac_mb_def pmac_mb;
146 146
147/* 147/*
148 * Here are the chip specific feature functions 148 * Here are the chip specific feature functions
149 */ 149 */
150 150
151static inline int __pmac 151static inline int
152simple_feature_tweak(struct device_node* node, int type, int reg, u32 mask, int value) 152simple_feature_tweak(struct device_node* node, int type, int reg, u32 mask, int value)
153{ 153{
154 struct macio_chip* macio; 154 struct macio_chip* macio;
@@ -170,7 +170,7 @@ simple_feature_tweak(struct device_node* node, int type, int reg, u32 mask, int
170 170
171#ifndef CONFIG_POWER4 171#ifndef CONFIG_POWER4
172 172
173static long __pmac 173static long
174ohare_htw_scc_enable(struct device_node* node, long param, long value) 174ohare_htw_scc_enable(struct device_node* node, long param, long value)
175{ 175{
176 struct macio_chip* macio; 176 struct macio_chip* macio;
@@ -263,21 +263,21 @@ ohare_htw_scc_enable(struct device_node* node, long param, long value)
263 return 0; 263 return 0;
264} 264}
265 265
266static long __pmac 266static long
267ohare_floppy_enable(struct device_node* node, long param, long value) 267ohare_floppy_enable(struct device_node* node, long param, long value)
268{ 268{
269 return simple_feature_tweak(node, macio_ohare, 269 return simple_feature_tweak(node, macio_ohare,
270 OHARE_FCR, OH_FLOPPY_ENABLE, value); 270 OHARE_FCR, OH_FLOPPY_ENABLE, value);
271} 271}
272 272
273static long __pmac 273static long
274ohare_mesh_enable(struct device_node* node, long param, long value) 274ohare_mesh_enable(struct device_node* node, long param, long value)
275{ 275{
276 return simple_feature_tweak(node, macio_ohare, 276 return simple_feature_tweak(node, macio_ohare,
277 OHARE_FCR, OH_MESH_ENABLE, value); 277 OHARE_FCR, OH_MESH_ENABLE, value);
278} 278}
279 279
280static long __pmac 280static long
281ohare_ide_enable(struct device_node* node, long param, long value) 281ohare_ide_enable(struct device_node* node, long param, long value)
282{ 282{
283 switch(param) { 283 switch(param) {
@@ -298,7 +298,7 @@ ohare_ide_enable(struct device_node* node, long param, long value)
298 } 298 }
299} 299}
300 300
301static long __pmac 301static long
302ohare_ide_reset(struct device_node* node, long param, long value) 302ohare_ide_reset(struct device_node* node, long param, long value)
303{ 303{
304 switch(param) { 304 switch(param) {
@@ -313,7 +313,7 @@ ohare_ide_reset(struct device_node* node, long param, long value)
313 } 313 }
314} 314}
315 315
316static long __pmac 316static long
317ohare_sleep_state(struct device_node* node, long param, long value) 317ohare_sleep_state(struct device_node* node, long param, long value)
318{ 318{
319 struct macio_chip* macio = &macio_chips[0]; 319 struct macio_chip* macio = &macio_chips[0];
@@ -329,7 +329,7 @@ ohare_sleep_state(struct device_node* node, long param, long value)
329 return 0; 329 return 0;
330} 330}
331 331
332static long __pmac 332static long
333heathrow_modem_enable(struct device_node* node, long param, long value) 333heathrow_modem_enable(struct device_node* node, long param, long value)
334{ 334{
335 struct macio_chip* macio; 335 struct macio_chip* macio;
@@ -373,7 +373,7 @@ heathrow_modem_enable(struct device_node* node, long param, long value)
373 return 0; 373 return 0;
374} 374}
375 375
376static long __pmac 376static long
377heathrow_floppy_enable(struct device_node* node, long param, long value) 377heathrow_floppy_enable(struct device_node* node, long param, long value)
378{ 378{
379 return simple_feature_tweak(node, macio_unknown, 379 return simple_feature_tweak(node, macio_unknown,
@@ -382,7 +382,7 @@ heathrow_floppy_enable(struct device_node* node, long param, long value)
382 value); 382 value);
383} 383}
384 384
385static long __pmac 385static long
386heathrow_mesh_enable(struct device_node* node, long param, long value) 386heathrow_mesh_enable(struct device_node* node, long param, long value)
387{ 387{
388 struct macio_chip* macio; 388 struct macio_chip* macio;
@@ -411,7 +411,7 @@ heathrow_mesh_enable(struct device_node* node, long param, long value)
411 return 0; 411 return 0;
412} 412}
413 413
414static long __pmac 414static long
415heathrow_ide_enable(struct device_node* node, long param, long value) 415heathrow_ide_enable(struct device_node* node, long param, long value)
416{ 416{
417 switch(param) { 417 switch(param) {
@@ -426,7 +426,7 @@ heathrow_ide_enable(struct device_node* node, long param, long value)
426 } 426 }
427} 427}
428 428
429static long __pmac 429static long
430heathrow_ide_reset(struct device_node* node, long param, long value) 430heathrow_ide_reset(struct device_node* node, long param, long value)
431{ 431{
432 switch(param) { 432 switch(param) {
@@ -441,7 +441,7 @@ heathrow_ide_reset(struct device_node* node, long param, long value)
441 } 441 }
442} 442}
443 443
444static long __pmac 444static long
445heathrow_bmac_enable(struct device_node* node, long param, long value) 445heathrow_bmac_enable(struct device_node* node, long param, long value)
446{ 446{
447 struct macio_chip* macio; 447 struct macio_chip* macio;
@@ -470,7 +470,7 @@ heathrow_bmac_enable(struct device_node* node, long param, long value)
470 return 0; 470 return 0;
471} 471}
472 472
473static long __pmac 473static long
474heathrow_sound_enable(struct device_node* node, long param, long value) 474heathrow_sound_enable(struct device_node* node, long param, long value)
475{ 475{
476 struct macio_chip* macio; 476 struct macio_chip* macio;
@@ -501,16 +501,16 @@ heathrow_sound_enable(struct device_node* node, long param, long value)
501 return 0; 501 return 0;
502} 502}
503 503
504static u32 save_fcr[6] __pmacdata; 504static u32 save_fcr[6];
505static u32 save_mbcr __pmacdata; 505static u32 save_mbcr;
506static u32 save_gpio_levels[2] __pmacdata; 506static u32 save_gpio_levels[2];
507static u8 save_gpio_extint[KEYLARGO_GPIO_EXTINT_CNT] __pmacdata; 507static u8 save_gpio_extint[KEYLARGO_GPIO_EXTINT_CNT];
508static u8 save_gpio_normal[KEYLARGO_GPIO_CNT] __pmacdata; 508static u8 save_gpio_normal[KEYLARGO_GPIO_CNT];
509static u32 save_unin_clock_ctl __pmacdata; 509static u32 save_unin_clock_ctl;
510static struct dbdma_regs save_dbdma[13] __pmacdata; 510static struct dbdma_regs save_dbdma[13];
511static struct dbdma_regs save_alt_dbdma[13] __pmacdata; 511static struct dbdma_regs save_alt_dbdma[13];
512 512
513static void __pmac 513static void
514dbdma_save(struct macio_chip* macio, struct dbdma_regs* save) 514dbdma_save(struct macio_chip* macio, struct dbdma_regs* save)
515{ 515{
516 int i; 516 int i;
@@ -527,7 +527,7 @@ dbdma_save(struct macio_chip* macio, struct dbdma_regs* save)
527 } 527 }
528} 528}
529 529
530static void __pmac 530static void
531dbdma_restore(struct macio_chip* macio, struct dbdma_regs* save) 531dbdma_restore(struct macio_chip* macio, struct dbdma_regs* save)
532{ 532{
533 int i; 533 int i;
@@ -547,7 +547,7 @@ dbdma_restore(struct macio_chip* macio, struct dbdma_regs* save)
547 } 547 }
548} 548}
549 549
550static void __pmac 550static void
551heathrow_sleep(struct macio_chip* macio, int secondary) 551heathrow_sleep(struct macio_chip* macio, int secondary)
552{ 552{
553 if (secondary) { 553 if (secondary) {
@@ -580,7 +580,7 @@ heathrow_sleep(struct macio_chip* macio, int secondary)
580 (void)MACIO_IN32(HEATHROW_FCR); 580 (void)MACIO_IN32(HEATHROW_FCR);
581} 581}
582 582
583static void __pmac 583static void
584heathrow_wakeup(struct macio_chip* macio, int secondary) 584heathrow_wakeup(struct macio_chip* macio, int secondary)
585{ 585{
586 if (secondary) { 586 if (secondary) {
@@ -605,7 +605,7 @@ heathrow_wakeup(struct macio_chip* macio, int secondary)
605 } 605 }
606} 606}
607 607
608static long __pmac 608static long
609heathrow_sleep_state(struct device_node* node, long param, long value) 609heathrow_sleep_state(struct device_node* node, long param, long value)
610{ 610{
611 if ((pmac_mb.board_flags & PMAC_MB_CAN_SLEEP) == 0) 611 if ((pmac_mb.board_flags & PMAC_MB_CAN_SLEEP) == 0)
@@ -622,7 +622,7 @@ heathrow_sleep_state(struct device_node* node, long param, long value)
622 return 0; 622 return 0;
623} 623}
624 624
625static long __pmac 625static long
626core99_scc_enable(struct device_node* node, long param, long value) 626core99_scc_enable(struct device_node* node, long param, long value)
627{ 627{
628 struct macio_chip* macio; 628 struct macio_chip* macio;
@@ -723,7 +723,7 @@ core99_scc_enable(struct device_node* node, long param, long value)
723 return 0; 723 return 0;
724} 724}
725 725
726static long __pmac 726static long
727core99_modem_enable(struct device_node* node, long param, long value) 727core99_modem_enable(struct device_node* node, long param, long value)
728{ 728{
729 struct macio_chip* macio; 729 struct macio_chip* macio;
@@ -775,7 +775,7 @@ core99_modem_enable(struct device_node* node, long param, long value)
775 return 0; 775 return 0;
776} 776}
777 777
778static long __pmac 778static long
779pangea_modem_enable(struct device_node* node, long param, long value) 779pangea_modem_enable(struct device_node* node, long param, long value)
780{ 780{
781 struct macio_chip* macio; 781 struct macio_chip* macio;
@@ -830,7 +830,7 @@ pangea_modem_enable(struct device_node* node, long param, long value)
830 return 0; 830 return 0;
831} 831}
832 832
833static long __pmac 833static long
834core99_ata100_enable(struct device_node* node, long value) 834core99_ata100_enable(struct device_node* node, long value)
835{ 835{
836 unsigned long flags; 836 unsigned long flags;
@@ -860,7 +860,7 @@ core99_ata100_enable(struct device_node* node, long value)
860 return 0; 860 return 0;
861} 861}
862 862
863static long __pmac 863static long
864core99_ide_enable(struct device_node* node, long param, long value) 864core99_ide_enable(struct device_node* node, long param, long value)
865{ 865{
866 /* Bus ID 0 to 2 are KeyLargo based IDE, busID 3 is U2 866 /* Bus ID 0 to 2 are KeyLargo based IDE, busID 3 is U2
@@ -883,7 +883,7 @@ core99_ide_enable(struct device_node* node, long param, long value)
883 } 883 }
884} 884}
885 885
886static long __pmac 886static long
887core99_ide_reset(struct device_node* node, long param, long value) 887core99_ide_reset(struct device_node* node, long param, long value)
888{ 888{
889 switch(param) { 889 switch(param) {
@@ -901,7 +901,7 @@ core99_ide_reset(struct device_node* node, long param, long value)
901 } 901 }
902} 902}
903 903
904static long __pmac 904static long
905core99_gmac_enable(struct device_node* node, long param, long value) 905core99_gmac_enable(struct device_node* node, long param, long value)
906{ 906{
907 unsigned long flags; 907 unsigned long flags;
@@ -918,7 +918,7 @@ core99_gmac_enable(struct device_node* node, long param, long value)
918 return 0; 918 return 0;
919} 919}
920 920
921static long __pmac 921static long
922core99_gmac_phy_reset(struct device_node* node, long param, long value) 922core99_gmac_phy_reset(struct device_node* node, long param, long value)
923{ 923{
924 unsigned long flags; 924 unsigned long flags;
@@ -943,7 +943,7 @@ core99_gmac_phy_reset(struct device_node* node, long param, long value)
943 return 0; 943 return 0;
944} 944}
945 945
946static long __pmac 946static long
947core99_sound_chip_enable(struct device_node* node, long param, long value) 947core99_sound_chip_enable(struct device_node* node, long param, long value)
948{ 948{
949 struct macio_chip* macio; 949 struct macio_chip* macio;
@@ -973,7 +973,7 @@ core99_sound_chip_enable(struct device_node* node, long param, long value)
973 return 0; 973 return 0;
974} 974}
975 975
976static long __pmac 976static long
977core99_airport_enable(struct device_node* node, long param, long value) 977core99_airport_enable(struct device_node* node, long param, long value)
978{ 978{
979 struct macio_chip* macio; 979 struct macio_chip* macio;
@@ -1060,7 +1060,7 @@ core99_airport_enable(struct device_node* node, long param, long value)
1060} 1060}
1061 1061
1062#ifdef CONFIG_SMP 1062#ifdef CONFIG_SMP
1063static long __pmac 1063static long
1064core99_reset_cpu(struct device_node* node, long param, long value) 1064core99_reset_cpu(struct device_node* node, long param, long value)
1065{ 1065{
1066 unsigned int reset_io = 0; 1066 unsigned int reset_io = 0;
@@ -1104,7 +1104,7 @@ core99_reset_cpu(struct device_node* node, long param, long value)
1104} 1104}
1105#endif /* CONFIG_SMP */ 1105#endif /* CONFIG_SMP */
1106 1106
1107static long __pmac 1107static long
1108core99_usb_enable(struct device_node* node, long param, long value) 1108core99_usb_enable(struct device_node* node, long param, long value)
1109{ 1109{
1110 struct macio_chip* macio; 1110 struct macio_chip* macio;
@@ -1257,7 +1257,7 @@ core99_usb_enable(struct device_node* node, long param, long value)
1257 return 0; 1257 return 0;
1258} 1258}
1259 1259
1260static long __pmac 1260static long
1261core99_firewire_enable(struct device_node* node, long param, long value) 1261core99_firewire_enable(struct device_node* node, long param, long value)
1262{ 1262{
1263 unsigned long flags; 1263 unsigned long flags;
@@ -1284,7 +1284,7 @@ core99_firewire_enable(struct device_node* node, long param, long value)
1284 return 0; 1284 return 0;
1285} 1285}
1286 1286
1287static long __pmac 1287static long
1288core99_firewire_cable_power(struct device_node* node, long param, long value) 1288core99_firewire_cable_power(struct device_node* node, long param, long value)
1289{ 1289{
1290 unsigned long flags; 1290 unsigned long flags;
@@ -1315,7 +1315,7 @@ core99_firewire_cable_power(struct device_node* node, long param, long value)
1315 return 0; 1315 return 0;
1316} 1316}
1317 1317
1318static long __pmac 1318static long
1319intrepid_aack_delay_enable(struct device_node* node, long param, long value) 1319intrepid_aack_delay_enable(struct device_node* node, long param, long value)
1320{ 1320{
1321 unsigned long flags; 1321 unsigned long flags;
@@ -1336,7 +1336,7 @@ intrepid_aack_delay_enable(struct device_node* node, long param, long value)
1336 1336
1337#endif /* CONFIG_POWER4 */ 1337#endif /* CONFIG_POWER4 */
1338 1338
1339static long __pmac 1339static long
1340core99_read_gpio(struct device_node* node, long param, long value) 1340core99_read_gpio(struct device_node* node, long param, long value)
1341{ 1341{
1342 struct macio_chip* macio = &macio_chips[0]; 1342 struct macio_chip* macio = &macio_chips[0];
@@ -1345,7 +1345,7 @@ core99_read_gpio(struct device_node* node, long param, long value)
1345} 1345}
1346 1346
1347 1347
1348static long __pmac 1348static long
1349core99_write_gpio(struct device_node* node, long param, long value) 1349core99_write_gpio(struct device_node* node, long param, long value)
1350{ 1350{
1351 struct macio_chip* macio = &macio_chips[0]; 1351 struct macio_chip* macio = &macio_chips[0];
@@ -1356,7 +1356,7 @@ core99_write_gpio(struct device_node* node, long param, long value)
1356 1356
1357#ifdef CONFIG_POWER4 1357#ifdef CONFIG_POWER4
1358 1358
1359static long __pmac 1359static long
1360g5_gmac_enable(struct device_node* node, long param, long value) 1360g5_gmac_enable(struct device_node* node, long param, long value)
1361{ 1361{
1362 struct macio_chip* macio = &macio_chips[0]; 1362 struct macio_chip* macio = &macio_chips[0];
@@ -1380,7 +1380,7 @@ g5_gmac_enable(struct device_node* node, long param, long value)
1380 return 0; 1380 return 0;
1381} 1381}
1382 1382
1383static long __pmac 1383static long
1384g5_fw_enable(struct device_node* node, long param, long value) 1384g5_fw_enable(struct device_node* node, long param, long value)
1385{ 1385{
1386 struct macio_chip* macio = &macio_chips[0]; 1386 struct macio_chip* macio = &macio_chips[0];
@@ -1403,7 +1403,7 @@ g5_fw_enable(struct device_node* node, long param, long value)
1403 return 0; 1403 return 0;
1404} 1404}
1405 1405
1406static long __pmac 1406static long
1407g5_mpic_enable(struct device_node* node, long param, long value) 1407g5_mpic_enable(struct device_node* node, long param, long value)
1408{ 1408{
1409 unsigned long flags; 1409 unsigned long flags;
@@ -1419,7 +1419,7 @@ g5_mpic_enable(struct device_node* node, long param, long value)
1419} 1419}
1420 1420
1421#ifdef CONFIG_SMP 1421#ifdef CONFIG_SMP
1422static long __pmac 1422static long
1423g5_reset_cpu(struct device_node* node, long param, long value) 1423g5_reset_cpu(struct device_node* node, long param, long value)
1424{ 1424{
1425 unsigned int reset_io = 0; 1425 unsigned int reset_io = 0;
@@ -1465,7 +1465,7 @@ g5_reset_cpu(struct device_node* node, long param, long value)
1465 * This takes the second CPU off the bus on dual CPU machines 1465 * This takes the second CPU off the bus on dual CPU machines
1466 * running UP 1466 * running UP
1467 */ 1467 */
1468void __pmac g5_phy_disable_cpu1(void) 1468void g5_phy_disable_cpu1(void)
1469{ 1469{
1470 UN_OUT(U3_API_PHY_CONFIG_1, 0); 1470 UN_OUT(U3_API_PHY_CONFIG_1, 0);
1471} 1471}
@@ -1474,7 +1474,7 @@ void __pmac g5_phy_disable_cpu1(void)
1474 1474
1475#ifndef CONFIG_POWER4 1475#ifndef CONFIG_POWER4
1476 1476
1477static void __pmac 1477static void
1478keylargo_shutdown(struct macio_chip* macio, int sleep_mode) 1478keylargo_shutdown(struct macio_chip* macio, int sleep_mode)
1479{ 1479{
1480 u32 temp; 1480 u32 temp;
@@ -1528,7 +1528,7 @@ keylargo_shutdown(struct macio_chip* macio, int sleep_mode)
1528 (void)MACIO_IN32(KEYLARGO_FCR0); mdelay(1); 1528 (void)MACIO_IN32(KEYLARGO_FCR0); mdelay(1);
1529} 1529}
1530 1530
1531static void __pmac 1531static void
1532pangea_shutdown(struct macio_chip* macio, int sleep_mode) 1532pangea_shutdown(struct macio_chip* macio, int sleep_mode)
1533{ 1533{
1534 u32 temp; 1534 u32 temp;
@@ -1562,7 +1562,7 @@ pangea_shutdown(struct macio_chip* macio, int sleep_mode)
1562 (void)MACIO_IN32(KEYLARGO_FCR0); mdelay(1); 1562 (void)MACIO_IN32(KEYLARGO_FCR0); mdelay(1);
1563} 1563}
1564 1564
1565static void __pmac 1565static void
1566intrepid_shutdown(struct macio_chip* macio, int sleep_mode) 1566intrepid_shutdown(struct macio_chip* macio, int sleep_mode)
1567{ 1567{
1568 u32 temp; 1568 u32 temp;
@@ -1591,7 +1591,7 @@ intrepid_shutdown(struct macio_chip* macio, int sleep_mode)
1591} 1591}
1592 1592
1593 1593
1594void __pmac pmac_tweak_clock_spreading(int enable) 1594void pmac_tweak_clock_spreading(int enable)
1595{ 1595{
1596 struct macio_chip* macio = &macio_chips[0]; 1596 struct macio_chip* macio = &macio_chips[0];
1597 1597
@@ -1698,7 +1698,7 @@ void __pmac pmac_tweak_clock_spreading(int enable)
1698} 1698}
1699 1699
1700 1700
1701static int __pmac 1701static int
1702core99_sleep(void) 1702core99_sleep(void)
1703{ 1703{
1704 struct macio_chip* macio; 1704 struct macio_chip* macio;
@@ -1791,7 +1791,7 @@ core99_sleep(void)
1791 return 0; 1791 return 0;
1792} 1792}
1793 1793
1794static int __pmac 1794static int
1795core99_wake_up(void) 1795core99_wake_up(void)
1796{ 1796{
1797 struct macio_chip* macio; 1797 struct macio_chip* macio;
@@ -1854,7 +1854,7 @@ core99_wake_up(void)
1854 return 0; 1854 return 0;
1855} 1855}
1856 1856
1857static long __pmac 1857static long
1858core99_sleep_state(struct device_node* node, long param, long value) 1858core99_sleep_state(struct device_node* node, long param, long value)
1859{ 1859{
1860 /* Param == 1 means to enter the "fake sleep" mode that is 1860 /* Param == 1 means to enter the "fake sleep" mode that is
@@ -1884,7 +1884,7 @@ core99_sleep_state(struct device_node* node, long param, long value)
1884 1884
1885#endif /* CONFIG_POWER4 */ 1885#endif /* CONFIG_POWER4 */
1886 1886
1887static long __pmac 1887static long
1888generic_dev_can_wake(struct device_node* node, long param, long value) 1888generic_dev_can_wake(struct device_node* node, long param, long value)
1889{ 1889{
1890 /* Todo: eventually check we are really dealing with on-board 1890 /* Todo: eventually check we are really dealing with on-board
@@ -1896,7 +1896,7 @@ generic_dev_can_wake(struct device_node* node, long param, long value)
1896 return 0; 1896 return 0;
1897} 1897}
1898 1898
1899static long __pmac 1899static long
1900generic_get_mb_info(struct device_node* node, long param, long value) 1900generic_get_mb_info(struct device_node* node, long param, long value)
1901{ 1901{
1902 switch(param) { 1902 switch(param) {
@@ -1919,7 +1919,7 @@ generic_get_mb_info(struct device_node* node, long param, long value)
1919 1919
1920/* Used on any machine 1920/* Used on any machine
1921 */ 1921 */
1922static struct feature_table_entry any_features[] __pmacdata = { 1922static struct feature_table_entry any_features[] = {
1923 { PMAC_FTR_GET_MB_INFO, generic_get_mb_info }, 1923 { PMAC_FTR_GET_MB_INFO, generic_get_mb_info },
1924 { PMAC_FTR_DEVICE_CAN_WAKE, generic_dev_can_wake }, 1924 { PMAC_FTR_DEVICE_CAN_WAKE, generic_dev_can_wake },
1925 { 0, NULL } 1925 { 0, NULL }
@@ -1931,7 +1931,7 @@ static struct feature_table_entry any_features[] __pmacdata = {
1931 * 2400,3400 and 3500 series powerbooks. Some older desktops seem 1931 * 2400,3400 and 3500 series powerbooks. Some older desktops seem
1932 * to have issues with turning on/off those asic cells 1932 * to have issues with turning on/off those asic cells
1933 */ 1933 */
1934static struct feature_table_entry ohare_features[] __pmacdata = { 1934static struct feature_table_entry ohare_features[] = {
1935 { PMAC_FTR_SCC_ENABLE, ohare_htw_scc_enable }, 1935 { PMAC_FTR_SCC_ENABLE, ohare_htw_scc_enable },
1936 { PMAC_FTR_SWIM3_ENABLE, ohare_floppy_enable }, 1936 { PMAC_FTR_SWIM3_ENABLE, ohare_floppy_enable },
1937 { PMAC_FTR_MESH_ENABLE, ohare_mesh_enable }, 1937 { PMAC_FTR_MESH_ENABLE, ohare_mesh_enable },
@@ -1945,7 +1945,7 @@ static struct feature_table_entry ohare_features[] __pmacdata = {
1945 * Separated as some features couldn't be properly tested 1945 * Separated as some features couldn't be properly tested
1946 * and the serial port control bits appear to confuse it. 1946 * and the serial port control bits appear to confuse it.
1947 */ 1947 */
1948static struct feature_table_entry heathrow_desktop_features[] __pmacdata = { 1948static struct feature_table_entry heathrow_desktop_features[] = {
1949 { PMAC_FTR_SWIM3_ENABLE, heathrow_floppy_enable }, 1949 { PMAC_FTR_SWIM3_ENABLE, heathrow_floppy_enable },
1950 { PMAC_FTR_MESH_ENABLE, heathrow_mesh_enable }, 1950 { PMAC_FTR_MESH_ENABLE, heathrow_mesh_enable },
1951 { PMAC_FTR_IDE_ENABLE, heathrow_ide_enable }, 1951 { PMAC_FTR_IDE_ENABLE, heathrow_ide_enable },
@@ -1957,7 +1957,7 @@ static struct feature_table_entry heathrow_desktop_features[] __pmacdata = {
1957/* Heathrow based laptop, that is the Wallstreet and mainstreet 1957/* Heathrow based laptop, that is the Wallstreet and mainstreet
1958 * powerbooks. 1958 * powerbooks.
1959 */ 1959 */
1960static struct feature_table_entry heathrow_laptop_features[] __pmacdata = { 1960static struct feature_table_entry heathrow_laptop_features[] = {
1961 { PMAC_FTR_SCC_ENABLE, ohare_htw_scc_enable }, 1961 { PMAC_FTR_SCC_ENABLE, ohare_htw_scc_enable },
1962 { PMAC_FTR_MODEM_ENABLE, heathrow_modem_enable }, 1962 { PMAC_FTR_MODEM_ENABLE, heathrow_modem_enable },
1963 { PMAC_FTR_SWIM3_ENABLE, heathrow_floppy_enable }, 1963 { PMAC_FTR_SWIM3_ENABLE, heathrow_floppy_enable },
@@ -1973,7 +1973,7 @@ static struct feature_table_entry heathrow_laptop_features[] __pmacdata = {
1973/* Paddington based machines 1973/* Paddington based machines
1974 * The lombard (101) powerbook, first iMac models, B&W G3 and Yikes G4. 1974 * The lombard (101) powerbook, first iMac models, B&W G3 and Yikes G4.
1975 */ 1975 */
1976static struct feature_table_entry paddington_features[] __pmacdata = { 1976static struct feature_table_entry paddington_features[] = {
1977 { PMAC_FTR_SCC_ENABLE, ohare_htw_scc_enable }, 1977 { PMAC_FTR_SCC_ENABLE, ohare_htw_scc_enable },
1978 { PMAC_FTR_MODEM_ENABLE, heathrow_modem_enable }, 1978 { PMAC_FTR_MODEM_ENABLE, heathrow_modem_enable },
1979 { PMAC_FTR_SWIM3_ENABLE, heathrow_floppy_enable }, 1979 { PMAC_FTR_SWIM3_ENABLE, heathrow_floppy_enable },
@@ -1991,7 +1991,7 @@ static struct feature_table_entry paddington_features[] __pmacdata = {
1991 * chipset. The pangea chipset is the "combo" UniNorth/KeyLargo 1991 * chipset. The pangea chipset is the "combo" UniNorth/KeyLargo
1992 * used on iBook2 & iMac "flow power". 1992 * used on iBook2 & iMac "flow power".
1993 */ 1993 */
1994static struct feature_table_entry core99_features[] __pmacdata = { 1994static struct feature_table_entry core99_features[] = {
1995 { PMAC_FTR_SCC_ENABLE, core99_scc_enable }, 1995 { PMAC_FTR_SCC_ENABLE, core99_scc_enable },
1996 { PMAC_FTR_MODEM_ENABLE, core99_modem_enable }, 1996 { PMAC_FTR_MODEM_ENABLE, core99_modem_enable },
1997 { PMAC_FTR_IDE_ENABLE, core99_ide_enable }, 1997 { PMAC_FTR_IDE_ENABLE, core99_ide_enable },
@@ -2014,7 +2014,7 @@ static struct feature_table_entry core99_features[] __pmacdata = {
2014 2014
2015/* RackMac 2015/* RackMac
2016 */ 2016 */
2017static struct feature_table_entry rackmac_features[] __pmacdata = { 2017static struct feature_table_entry rackmac_features[] = {
2018 { PMAC_FTR_SCC_ENABLE, core99_scc_enable }, 2018 { PMAC_FTR_SCC_ENABLE, core99_scc_enable },
2019 { PMAC_FTR_IDE_ENABLE, core99_ide_enable }, 2019 { PMAC_FTR_IDE_ENABLE, core99_ide_enable },
2020 { PMAC_FTR_IDE_RESET, core99_ide_reset }, 2020 { PMAC_FTR_IDE_RESET, core99_ide_reset },
@@ -2034,7 +2034,7 @@ static struct feature_table_entry rackmac_features[] __pmacdata = {
2034 2034
2035/* Pangea features 2035/* Pangea features
2036 */ 2036 */
2037static struct feature_table_entry pangea_features[] __pmacdata = { 2037static struct feature_table_entry pangea_features[] = {
2038 { PMAC_FTR_SCC_ENABLE, core99_scc_enable }, 2038 { PMAC_FTR_SCC_ENABLE, core99_scc_enable },
2039 { PMAC_FTR_MODEM_ENABLE, pangea_modem_enable }, 2039 { PMAC_FTR_MODEM_ENABLE, pangea_modem_enable },
2040 { PMAC_FTR_IDE_ENABLE, core99_ide_enable }, 2040 { PMAC_FTR_IDE_ENABLE, core99_ide_enable },
@@ -2054,7 +2054,7 @@ static struct feature_table_entry pangea_features[] __pmacdata = {
2054 2054
2055/* Intrepid features 2055/* Intrepid features
2056 */ 2056 */
2057static struct feature_table_entry intrepid_features[] __pmacdata = { 2057static struct feature_table_entry intrepid_features[] = {
2058 { PMAC_FTR_SCC_ENABLE, core99_scc_enable }, 2058 { PMAC_FTR_SCC_ENABLE, core99_scc_enable },
2059 { PMAC_FTR_MODEM_ENABLE, pangea_modem_enable }, 2059 { PMAC_FTR_MODEM_ENABLE, pangea_modem_enable },
2060 { PMAC_FTR_IDE_ENABLE, core99_ide_enable }, 2060 { PMAC_FTR_IDE_ENABLE, core99_ide_enable },
@@ -2077,7 +2077,7 @@ static struct feature_table_entry intrepid_features[] __pmacdata = {
2077 2077
2078/* G5 features 2078/* G5 features
2079 */ 2079 */
2080static struct feature_table_entry g5_features[] __pmacdata = { 2080static struct feature_table_entry g5_features[] = {
2081 { PMAC_FTR_GMAC_ENABLE, g5_gmac_enable }, 2081 { PMAC_FTR_GMAC_ENABLE, g5_gmac_enable },
2082 { PMAC_FTR_1394_ENABLE, g5_fw_enable }, 2082 { PMAC_FTR_1394_ENABLE, g5_fw_enable },
2083 { PMAC_FTR_ENABLE_MPIC, g5_mpic_enable }, 2083 { PMAC_FTR_ENABLE_MPIC, g5_mpic_enable },
@@ -2091,7 +2091,7 @@ static struct feature_table_entry g5_features[] __pmacdata = {
2091 2091
2092#endif /* CONFIG_POWER4 */ 2092#endif /* CONFIG_POWER4 */
2093 2093
2094static struct pmac_mb_def pmac_mb_defs[] __pmacdata = { 2094static struct pmac_mb_def pmac_mb_defs[] = {
2095#ifndef CONFIG_POWER4 2095#ifndef CONFIG_POWER4
2096 /* 2096 /*
2097 * Desktops 2097 * Desktops
@@ -2356,7 +2356,7 @@ static struct pmac_mb_def pmac_mb_defs[] __pmacdata = {
2356/* 2356/*
2357 * The toplevel feature_call callback 2357 * The toplevel feature_call callback
2358 */ 2358 */
2359long __pmac 2359long
2360pmac_do_feature_call(unsigned int selector, ...) 2360pmac_do_feature_call(unsigned int selector, ...)
2361{ 2361{
2362 struct device_node* node; 2362 struct device_node* node;
@@ -2939,8 +2939,8 @@ void __init pmac_check_ht_link(void)
2939 * Early video resume hook 2939 * Early video resume hook
2940 */ 2940 */
2941 2941
2942static void (*pmac_early_vresume_proc)(void *data) __pmacdata; 2942static void (*pmac_early_vresume_proc)(void *data);
2943static void *pmac_early_vresume_data __pmacdata; 2943static void *pmac_early_vresume_data;
2944 2944
2945void pmac_set_early_video_resume(void (*proc)(void *data), void *data) 2945void pmac_set_early_video_resume(void (*proc)(void *data), void *data)
2946{ 2946{
@@ -2953,7 +2953,7 @@ void pmac_set_early_video_resume(void (*proc)(void *data), void *data)
2953} 2953}
2954EXPORT_SYMBOL(pmac_set_early_video_resume); 2954EXPORT_SYMBOL(pmac_set_early_video_resume);
2955 2955
2956void __pmac pmac_call_early_video_resume(void) 2956void pmac_call_early_video_resume(void)
2957{ 2957{
2958 if (pmac_early_vresume_proc) 2958 if (pmac_early_vresume_proc)
2959 pmac_early_vresume_proc(pmac_early_vresume_data); 2959 pmac_early_vresume_proc(pmac_early_vresume_data);
@@ -2963,11 +2963,11 @@ void __pmac pmac_call_early_video_resume(void)
2963 * AGP related suspend/resume code 2963 * AGP related suspend/resume code
2964 */ 2964 */
2965 2965
2966static struct pci_dev *pmac_agp_bridge __pmacdata; 2966static struct pci_dev *pmac_agp_bridge;
2967static int (*pmac_agp_suspend)(struct pci_dev *bridge) __pmacdata; 2967static int (*pmac_agp_suspend)(struct pci_dev *bridge);
2968static int (*pmac_agp_resume)(struct pci_dev *bridge) __pmacdata; 2968static int (*pmac_agp_resume)(struct pci_dev *bridge);
2969 2969
2970void __pmac pmac_register_agp_pm(struct pci_dev *bridge, 2970void pmac_register_agp_pm(struct pci_dev *bridge,
2971 int (*suspend)(struct pci_dev *bridge), 2971 int (*suspend)(struct pci_dev *bridge),
2972 int (*resume)(struct pci_dev *bridge)) 2972 int (*resume)(struct pci_dev *bridge))
2973{ 2973{
@@ -2984,7 +2984,7 @@ void __pmac pmac_register_agp_pm(struct pci_dev *bridge,
2984} 2984}
2985EXPORT_SYMBOL(pmac_register_agp_pm); 2985EXPORT_SYMBOL(pmac_register_agp_pm);
2986 2986
2987void __pmac pmac_suspend_agp_for_card(struct pci_dev *dev) 2987void pmac_suspend_agp_for_card(struct pci_dev *dev)
2988{ 2988{
2989 if (pmac_agp_bridge == NULL || pmac_agp_suspend == NULL) 2989 if (pmac_agp_bridge == NULL || pmac_agp_suspend == NULL)
2990 return; 2990 return;
@@ -2994,7 +2994,7 @@ void __pmac pmac_suspend_agp_for_card(struct pci_dev *dev)
2994} 2994}
2995EXPORT_SYMBOL(pmac_suspend_agp_for_card); 2995EXPORT_SYMBOL(pmac_suspend_agp_for_card);
2996 2996
2997void __pmac pmac_resume_agp_for_card(struct pci_dev *dev) 2997void pmac_resume_agp_for_card(struct pci_dev *dev)
2998{ 2998{
2999 if (pmac_agp_bridge == NULL || pmac_agp_resume == NULL) 2999 if (pmac_agp_bridge == NULL || pmac_agp_resume == NULL)
3000 return; 3000 return;
diff --git a/arch/ppc/platforms/pmac_nvram.c b/arch/ppc/platforms/pmac_nvram.c
index c9de64205996..8c9b008c7226 100644
--- a/arch/ppc/platforms/pmac_nvram.c
+++ b/arch/ppc/platforms/pmac_nvram.c
@@ -88,17 +88,17 @@ extern int system_running;
88static int (*core99_write_bank)(int bank, u8* datas); 88static int (*core99_write_bank)(int bank, u8* datas);
89static int (*core99_erase_bank)(int bank); 89static int (*core99_erase_bank)(int bank);
90 90
91static char *nvram_image __pmacdata; 91static char *nvram_image;
92 92
93 93
94static unsigned char __pmac core99_nvram_read_byte(int addr) 94static unsigned char core99_nvram_read_byte(int addr)
95{ 95{
96 if (nvram_image == NULL) 96 if (nvram_image == NULL)
97 return 0xff; 97 return 0xff;
98 return nvram_image[addr]; 98 return nvram_image[addr];
99} 99}
100 100
101static void __pmac core99_nvram_write_byte(int addr, unsigned char val) 101static void core99_nvram_write_byte(int addr, unsigned char val)
102{ 102{
103 if (nvram_image == NULL) 103 if (nvram_image == NULL)
104 return; 104 return;
@@ -106,18 +106,18 @@ static void __pmac core99_nvram_write_byte(int addr, unsigned char val)
106} 106}
107 107
108 108
109static unsigned char __openfirmware direct_nvram_read_byte(int addr) 109static unsigned char direct_nvram_read_byte(int addr)
110{ 110{
111 return in_8(&nvram_data[(addr & (NVRAM_SIZE - 1)) * nvram_mult]); 111 return in_8(&nvram_data[(addr & (NVRAM_SIZE - 1)) * nvram_mult]);
112} 112}
113 113
114static void __openfirmware direct_nvram_write_byte(int addr, unsigned char val) 114static void direct_nvram_write_byte(int addr, unsigned char val)
115{ 115{
116 out_8(&nvram_data[(addr & (NVRAM_SIZE - 1)) * nvram_mult], val); 116 out_8(&nvram_data[(addr & (NVRAM_SIZE - 1)) * nvram_mult], val);
117} 117}
118 118
119 119
120static unsigned char __pmac indirect_nvram_read_byte(int addr) 120static unsigned char indirect_nvram_read_byte(int addr)
121{ 121{
122 unsigned char val; 122 unsigned char val;
123 unsigned long flags; 123 unsigned long flags;
@@ -130,7 +130,7 @@ static unsigned char __pmac indirect_nvram_read_byte(int addr)
130 return val; 130 return val;
131} 131}
132 132
133static void __pmac indirect_nvram_write_byte(int addr, unsigned char val) 133static void indirect_nvram_write_byte(int addr, unsigned char val)
134{ 134{
135 unsigned long flags; 135 unsigned long flags;
136 136
@@ -143,13 +143,13 @@ static void __pmac indirect_nvram_write_byte(int addr, unsigned char val)
143 143
144#ifdef CONFIG_ADB_PMU 144#ifdef CONFIG_ADB_PMU
145 145
146static void __pmac pmu_nvram_complete(struct adb_request *req) 146static void pmu_nvram_complete(struct adb_request *req)
147{ 147{
148 if (req->arg) 148 if (req->arg)
149 complete((struct completion *)req->arg); 149 complete((struct completion *)req->arg);
150} 150}
151 151
152static unsigned char __pmac pmu_nvram_read_byte(int addr) 152static unsigned char pmu_nvram_read_byte(int addr)
153{ 153{
154 struct adb_request req; 154 struct adb_request req;
155 DECLARE_COMPLETION(req_complete); 155 DECLARE_COMPLETION(req_complete);
@@ -165,7 +165,7 @@ static unsigned char __pmac pmu_nvram_read_byte(int addr)
165 return req.reply[0]; 165 return req.reply[0];
166} 166}
167 167
168static void __pmac pmu_nvram_write_byte(int addr, unsigned char val) 168static void pmu_nvram_write_byte(int addr, unsigned char val)
169{ 169{
170 struct adb_request req; 170 struct adb_request req;
171 DECLARE_COMPLETION(req_complete); 171 DECLARE_COMPLETION(req_complete);
@@ -183,7 +183,7 @@ static void __pmac pmu_nvram_write_byte(int addr, unsigned char val)
183#endif /* CONFIG_ADB_PMU */ 183#endif /* CONFIG_ADB_PMU */
184 184
185 185
186static u8 __pmac chrp_checksum(struct chrp_header* hdr) 186static u8 chrp_checksum(struct chrp_header* hdr)
187{ 187{
188 u8 *ptr; 188 u8 *ptr;
189 u16 sum = hdr->signature; 189 u16 sum = hdr->signature;
@@ -194,7 +194,7 @@ static u8 __pmac chrp_checksum(struct chrp_header* hdr)
194 return sum; 194 return sum;
195} 195}
196 196
197static u32 __pmac core99_calc_adler(u8 *buffer) 197static u32 core99_calc_adler(u8 *buffer)
198{ 198{
199 int cnt; 199 int cnt;
200 u32 low, high; 200 u32 low, high;
@@ -216,7 +216,7 @@ static u32 __pmac core99_calc_adler(u8 *buffer)
216 return (high << 16) | low; 216 return (high << 16) | low;
217} 217}
218 218
219static u32 __pmac core99_check(u8* datas) 219static u32 core99_check(u8* datas)
220{ 220{
221 struct core99_header* hdr99 = (struct core99_header*)datas; 221 struct core99_header* hdr99 = (struct core99_header*)datas;
222 222
@@ -235,7 +235,7 @@ static u32 __pmac core99_check(u8* datas)
235 return hdr99->generation; 235 return hdr99->generation;
236} 236}
237 237
238static int __pmac sm_erase_bank(int bank) 238static int sm_erase_bank(int bank)
239{ 239{
240 int stat, i; 240 int stat, i;
241 unsigned long timeout; 241 unsigned long timeout;
@@ -267,7 +267,7 @@ static int __pmac sm_erase_bank(int bank)
267 return 0; 267 return 0;
268} 268}
269 269
270static int __pmac sm_write_bank(int bank, u8* datas) 270static int sm_write_bank(int bank, u8* datas)
271{ 271{
272 int i, stat = 0; 272 int i, stat = 0;
273 unsigned long timeout; 273 unsigned long timeout;
@@ -302,7 +302,7 @@ static int __pmac sm_write_bank(int bank, u8* datas)
302 return 0; 302 return 0;
303} 303}
304 304
305static int __pmac amd_erase_bank(int bank) 305static int amd_erase_bank(int bank)
306{ 306{
307 int i, stat = 0; 307 int i, stat = 0;
308 unsigned long timeout; 308 unsigned long timeout;
@@ -349,7 +349,7 @@ static int __pmac amd_erase_bank(int bank)
349 return 0; 349 return 0;
350} 350}
351 351
352static int __pmac amd_write_bank(int bank, u8* datas) 352static int amd_write_bank(int bank, u8* datas)
353{ 353{
354 int i, stat = 0; 354 int i, stat = 0;
355 unsigned long timeout; 355 unsigned long timeout;
@@ -430,7 +430,7 @@ static void __init lookup_partitions(void)
430 DBG("nvram: NR partition at 0x%x\n", nvram_partitions[pmac_nvram_NR]); 430 DBG("nvram: NR partition at 0x%x\n", nvram_partitions[pmac_nvram_NR]);
431} 431}
432 432
433static void __pmac core99_nvram_sync(void) 433static void core99_nvram_sync(void)
434{ 434{
435 struct core99_header* hdr99; 435 struct core99_header* hdr99;
436 unsigned long flags; 436 unsigned long flags;
@@ -554,12 +554,12 @@ void __init pmac_nvram_init(void)
554 lookup_partitions(); 554 lookup_partitions();
555} 555}
556 556
557int __pmac pmac_get_partition(int partition) 557int pmac_get_partition(int partition)
558{ 558{
559 return nvram_partitions[partition]; 559 return nvram_partitions[partition];
560} 560}
561 561
562u8 __pmac pmac_xpram_read(int xpaddr) 562u8 pmac_xpram_read(int xpaddr)
563{ 563{
564 int offset = nvram_partitions[pmac_nvram_XPRAM]; 564 int offset = nvram_partitions[pmac_nvram_XPRAM];
565 565
@@ -569,7 +569,7 @@ u8 __pmac pmac_xpram_read(int xpaddr)
569 return ppc_md.nvram_read_val(xpaddr + offset); 569 return ppc_md.nvram_read_val(xpaddr + offset);
570} 570}
571 571
572void __pmac pmac_xpram_write(int xpaddr, u8 data) 572void pmac_xpram_write(int xpaddr, u8 data)
573{ 573{
574 int offset = nvram_partitions[pmac_nvram_XPRAM]; 574 int offset = nvram_partitions[pmac_nvram_XPRAM];
575 575
diff --git a/arch/ppc/platforms/pmac_pci.c b/arch/ppc/platforms/pmac_pci.c
index 719fb49fe2bc..786295b6ddd0 100644
--- a/arch/ppc/platforms/pmac_pci.c
+++ b/arch/ppc/platforms/pmac_pci.c
@@ -141,7 +141,7 @@ fixup_bus_range(struct device_node *bridge)
141 |(((unsigned long)(off)) & 0xFCUL) \ 141 |(((unsigned long)(off)) & 0xFCUL) \
142 |1UL) 142 |1UL)
143 143
144static void volatile __iomem * __pmac 144static void volatile __iomem *
145macrisc_cfg_access(struct pci_controller* hose, u8 bus, u8 dev_fn, u8 offset) 145macrisc_cfg_access(struct pci_controller* hose, u8 bus, u8 dev_fn, u8 offset)
146{ 146{
147 unsigned int caddr; 147 unsigned int caddr;
@@ -162,7 +162,7 @@ macrisc_cfg_access(struct pci_controller* hose, u8 bus, u8 dev_fn, u8 offset)
162 return hose->cfg_data + offset; 162 return hose->cfg_data + offset;
163} 163}
164 164
165static int __pmac 165static int
166macrisc_read_config(struct pci_bus *bus, unsigned int devfn, int offset, 166macrisc_read_config(struct pci_bus *bus, unsigned int devfn, int offset,
167 int len, u32 *val) 167 int len, u32 *val)
168{ 168{
@@ -190,7 +190,7 @@ macrisc_read_config(struct pci_bus *bus, unsigned int devfn, int offset,
190 return PCIBIOS_SUCCESSFUL; 190 return PCIBIOS_SUCCESSFUL;
191} 191}
192 192
193static int __pmac 193static int
194macrisc_write_config(struct pci_bus *bus, unsigned int devfn, int offset, 194macrisc_write_config(struct pci_bus *bus, unsigned int devfn, int offset,
195 int len, u32 val) 195 int len, u32 val)
196{ 196{
@@ -230,7 +230,7 @@ static struct pci_ops macrisc_pci_ops =
230/* 230/*
231 * Verifiy that a specific (bus, dev_fn) exists on chaos 231 * Verifiy that a specific (bus, dev_fn) exists on chaos
232 */ 232 */
233static int __pmac 233static int
234chaos_validate_dev(struct pci_bus *bus, int devfn, int offset) 234chaos_validate_dev(struct pci_bus *bus, int devfn, int offset)
235{ 235{
236 struct device_node *np; 236 struct device_node *np;
@@ -252,7 +252,7 @@ chaos_validate_dev(struct pci_bus *bus, int devfn, int offset)
252 return PCIBIOS_SUCCESSFUL; 252 return PCIBIOS_SUCCESSFUL;
253} 253}
254 254
255static int __pmac 255static int
256chaos_read_config(struct pci_bus *bus, unsigned int devfn, int offset, 256chaos_read_config(struct pci_bus *bus, unsigned int devfn, int offset,
257 int len, u32 *val) 257 int len, u32 *val)
258{ 258{
@@ -264,7 +264,7 @@ chaos_read_config(struct pci_bus *bus, unsigned int devfn, int offset,
264 return macrisc_read_config(bus, devfn, offset, len, val); 264 return macrisc_read_config(bus, devfn, offset, len, val);
265} 265}
266 266
267static int __pmac 267static int
268chaos_write_config(struct pci_bus *bus, unsigned int devfn, int offset, 268chaos_write_config(struct pci_bus *bus, unsigned int devfn, int offset,
269 int len, u32 val) 269 int len, u32 val)
270{ 270{
@@ -294,7 +294,7 @@ static struct pci_ops chaos_pci_ops =
294 + (((unsigned long)bus) << 16) \ 294 + (((unsigned long)bus) << 16) \
295 + 0x01000000UL) 295 + 0x01000000UL)
296 296
297static void volatile __iomem * __pmac 297static void volatile __iomem *
298u3_ht_cfg_access(struct pci_controller* hose, u8 bus, u8 devfn, u8 offset) 298u3_ht_cfg_access(struct pci_controller* hose, u8 bus, u8 devfn, u8 offset)
299{ 299{
300 if (bus == hose->first_busno) { 300 if (bus == hose->first_busno) {
@@ -307,7 +307,7 @@ u3_ht_cfg_access(struct pci_controller* hose, u8 bus, u8 devfn, u8 offset)
307 return hose->cfg_data + U3_HT_CFA1(bus, devfn, offset); 307 return hose->cfg_data + U3_HT_CFA1(bus, devfn, offset);
308} 308}
309 309
310static int __pmac 310static int
311u3_ht_read_config(struct pci_bus *bus, unsigned int devfn, int offset, 311u3_ht_read_config(struct pci_bus *bus, unsigned int devfn, int offset,
312 int len, u32 *val) 312 int len, u32 *val)
313{ 313{
@@ -357,7 +357,7 @@ u3_ht_read_config(struct pci_bus *bus, unsigned int devfn, int offset,
357 return PCIBIOS_SUCCESSFUL; 357 return PCIBIOS_SUCCESSFUL;
358} 358}
359 359
360static int __pmac 360static int
361u3_ht_write_config(struct pci_bus *bus, unsigned int devfn, int offset, 361u3_ht_write_config(struct pci_bus *bus, unsigned int devfn, int offset,
362 int len, u32 val) 362 int len, u32 val)
363{ 363{
@@ -575,7 +575,7 @@ pmac_find_bridges(void)
575 * some offset between bus number and domains for now when we 575 * some offset between bus number and domains for now when we
576 * assign all busses should help for now 576 * assign all busses should help for now
577 */ 577 */
578 if (pci_assign_all_busses) 578 if (pci_assign_all_buses)
579 pcibios_assign_bus_offset = 0x10; 579 pcibios_assign_bus_offset = 0x10;
580 580
581#ifdef CONFIG_POWER4 581#ifdef CONFIG_POWER4
@@ -643,7 +643,7 @@ static inline void grackle_set_loop_snoop(struct pci_controller *bp, int enable)
643static int __init 643static int __init
644setup_uninorth(struct pci_controller* hose, struct reg_property* addr) 644setup_uninorth(struct pci_controller* hose, struct reg_property* addr)
645{ 645{
646 pci_assign_all_busses = 1; 646 pci_assign_all_buses = 1;
647 has_uninorth = 1; 647 has_uninorth = 1;
648 hose->ops = &macrisc_pci_ops; 648 hose->ops = &macrisc_pci_ops;
649 hose->cfg_addr = ioremap(addr->address + 0x800000, 0x1000); 649 hose->cfg_addr = ioremap(addr->address + 0x800000, 0x1000);
@@ -677,7 +677,7 @@ setup_u3_agp(struct pci_controller* hose, struct reg_property* addr)
677{ 677{
678 /* On G5, we move AGP up to high bus number so we don't need 678 /* On G5, we move AGP up to high bus number so we don't need
679 * to reassign bus numbers for HT. If we ever have P2P bridges 679 * to reassign bus numbers for HT. If we ever have P2P bridges
680 * on AGP, we'll have to move pci_assign_all_busses to the 680 * on AGP, we'll have to move pci_assign_all_buses to the
681 * pci_controller structure so we enable it for AGP and not for 681 * pci_controller structure so we enable it for AGP and not for
682 * HT childs. 682 * HT childs.
683 * We hard code the address because of the different size of 683 * We hard code the address because of the different size of
@@ -899,7 +899,7 @@ pmac_pcibios_fixup(void)
899 pcibios_fixup_OF_interrupts(); 899 pcibios_fixup_OF_interrupts();
900} 900}
901 901
902int __pmac 902int
903pmac_pci_enable_device_hook(struct pci_dev *dev, int initial) 903pmac_pci_enable_device_hook(struct pci_dev *dev, int initial)
904{ 904{
905 struct device_node* node; 905 struct device_node* node;
@@ -1096,7 +1096,7 @@ DECLARE_PCI_FIXUP_FINAL(PCI_ANY_ID, PCI_ANY_ID, pmac_pci_fixup_pciata);
1096 * Disable second function on K2-SATA, it's broken 1096 * Disable second function on K2-SATA, it's broken
1097 * and disable IO BARs on first one 1097 * and disable IO BARs on first one
1098 */ 1098 */
1099void __pmac pmac_pci_fixup_k2_sata(struct pci_dev* dev) 1099void pmac_pci_fixup_k2_sata(struct pci_dev* dev)
1100{ 1100{
1101 int i; 1101 int i;
1102 u16 cmd; 1102 u16 cmd;
diff --git a/arch/ppc/platforms/pmac_pic.c b/arch/ppc/platforms/pmac_pic.c
index 2ce058895e03..9f2d95ea8564 100644
--- a/arch/ppc/platforms/pmac_pic.c
+++ b/arch/ppc/platforms/pmac_pic.c
@@ -35,6 +35,7 @@
35#include <asm/open_pic.h> 35#include <asm/open_pic.h>
36#include <asm/xmon.h> 36#include <asm/xmon.h>
37#include <asm/pmac_feature.h> 37#include <asm/pmac_feature.h>
38#include <asm/machdep.h>
38 39
39#include "pmac_pic.h" 40#include "pmac_pic.h"
40 41
@@ -53,7 +54,7 @@ struct pmac_irq_hw {
53}; 54};
54 55
55/* Default addresses */ 56/* Default addresses */
56static volatile struct pmac_irq_hw *pmac_irq_hw[4] __pmacdata = { 57static volatile struct pmac_irq_hw *pmac_irq_hw[4] = {
57 (struct pmac_irq_hw *) 0xf3000020, 58 (struct pmac_irq_hw *) 0xf3000020,
58 (struct pmac_irq_hw *) 0xf3000010, 59 (struct pmac_irq_hw *) 0xf3000010,
59 (struct pmac_irq_hw *) 0xf4000020, 60 (struct pmac_irq_hw *) 0xf4000020,
@@ -64,22 +65,22 @@ static volatile struct pmac_irq_hw *pmac_irq_hw[4] __pmacdata = {
64#define OHARE_LEVEL_MASK 0x1ff00000 65#define OHARE_LEVEL_MASK 0x1ff00000
65#define HEATHROW_LEVEL_MASK 0x1ff00000 66#define HEATHROW_LEVEL_MASK 0x1ff00000
66 67
67static int max_irqs __pmacdata; 68static int max_irqs;
68static int max_real_irqs __pmacdata; 69static int max_real_irqs;
69static u32 level_mask[4] __pmacdata; 70static u32 level_mask[4];
70 71
71static DEFINE_SPINLOCK(pmac_pic_lock __pmacdata); 72static DEFINE_SPINLOCK(pmac_pic_lock);
72 73
73 74
74#define GATWICK_IRQ_POOL_SIZE 10 75#define GATWICK_IRQ_POOL_SIZE 10
75static struct interrupt_info gatwick_int_pool[GATWICK_IRQ_POOL_SIZE] __pmacdata; 76static struct interrupt_info gatwick_int_pool[GATWICK_IRQ_POOL_SIZE];
76 77
77/* 78/*
78 * Mark an irq as "lost". This is only used on the pmac 79 * Mark an irq as "lost". This is only used on the pmac
79 * since it can lose interrupts (see pmac_set_irq_mask). 80 * since it can lose interrupts (see pmac_set_irq_mask).
80 * -- Cort 81 * -- Cort
81 */ 82 */
82void __pmac 83void
83__set_lost(unsigned long irq_nr, int nokick) 84__set_lost(unsigned long irq_nr, int nokick)
84{ 85{
85 if (!test_and_set_bit(irq_nr, ppc_lost_interrupts)) { 86 if (!test_and_set_bit(irq_nr, ppc_lost_interrupts)) {
@@ -89,7 +90,7 @@ __set_lost(unsigned long irq_nr, int nokick)
89 } 90 }
90} 91}
91 92
92static void __pmac 93static void
93pmac_mask_and_ack_irq(unsigned int irq_nr) 94pmac_mask_and_ack_irq(unsigned int irq_nr)
94{ 95{
95 unsigned long bit = 1UL << (irq_nr & 0x1f); 96 unsigned long bit = 1UL << (irq_nr & 0x1f);
@@ -114,7 +115,7 @@ pmac_mask_and_ack_irq(unsigned int irq_nr)
114 spin_unlock_irqrestore(&pmac_pic_lock, flags); 115 spin_unlock_irqrestore(&pmac_pic_lock, flags);
115} 116}
116 117
117static void __pmac pmac_set_irq_mask(unsigned int irq_nr, int nokicklost) 118static void pmac_set_irq_mask(unsigned int irq_nr, int nokicklost)
118{ 119{
119 unsigned long bit = 1UL << (irq_nr & 0x1f); 120 unsigned long bit = 1UL << (irq_nr & 0x1f);
120 int i = irq_nr >> 5; 121 int i = irq_nr >> 5;
@@ -147,7 +148,7 @@ static void __pmac pmac_set_irq_mask(unsigned int irq_nr, int nokicklost)
147/* When an irq gets requested for the first client, if it's an 148/* When an irq gets requested for the first client, if it's an
148 * edge interrupt, we clear any previous one on the controller 149 * edge interrupt, we clear any previous one on the controller
149 */ 150 */
150static unsigned int __pmac pmac_startup_irq(unsigned int irq_nr) 151static unsigned int pmac_startup_irq(unsigned int irq_nr)
151{ 152{
152 unsigned long bit = 1UL << (irq_nr & 0x1f); 153 unsigned long bit = 1UL << (irq_nr & 0x1f);
153 int i = irq_nr >> 5; 154 int i = irq_nr >> 5;
@@ -160,20 +161,20 @@ static unsigned int __pmac pmac_startup_irq(unsigned int irq_nr)
160 return 0; 161 return 0;
161} 162}
162 163
163static void __pmac pmac_mask_irq(unsigned int irq_nr) 164static void pmac_mask_irq(unsigned int irq_nr)
164{ 165{
165 clear_bit(irq_nr, ppc_cached_irq_mask); 166 clear_bit(irq_nr, ppc_cached_irq_mask);
166 pmac_set_irq_mask(irq_nr, 0); 167 pmac_set_irq_mask(irq_nr, 0);
167 mb(); 168 mb();
168} 169}
169 170
170static void __pmac pmac_unmask_irq(unsigned int irq_nr) 171static void pmac_unmask_irq(unsigned int irq_nr)
171{ 172{
172 set_bit(irq_nr, ppc_cached_irq_mask); 173 set_bit(irq_nr, ppc_cached_irq_mask);
173 pmac_set_irq_mask(irq_nr, 0); 174 pmac_set_irq_mask(irq_nr, 0);
174} 175}
175 176
176static void __pmac pmac_end_irq(unsigned int irq_nr) 177static void pmac_end_irq(unsigned int irq_nr)
177{ 178{
178 if (!(irq_desc[irq_nr].status & (IRQ_DISABLED|IRQ_INPROGRESS)) 179 if (!(irq_desc[irq_nr].status & (IRQ_DISABLED|IRQ_INPROGRESS))
179 && irq_desc[irq_nr].action) { 180 && irq_desc[irq_nr].action) {
diff --git a/arch/ppc/platforms/pmac_setup.c b/arch/ppc/platforms/pmac_setup.c
index d6356f480d90..55d2beffe560 100644
--- a/arch/ppc/platforms/pmac_setup.c
+++ b/arch/ppc/platforms/pmac_setup.c
@@ -122,7 +122,7 @@ extern struct smp_ops_t psurge_smp_ops;
122extern struct smp_ops_t core99_smp_ops; 122extern struct smp_ops_t core99_smp_ops;
123#endif /* CONFIG_SMP */ 123#endif /* CONFIG_SMP */
124 124
125static int __pmac 125static int
126pmac_show_cpuinfo(struct seq_file *m) 126pmac_show_cpuinfo(struct seq_file *m)
127{ 127{
128 struct device_node *np; 128 struct device_node *np;
@@ -226,7 +226,7 @@ pmac_show_cpuinfo(struct seq_file *m)
226 return 0; 226 return 0;
227} 227}
228 228
229static int __openfirmware 229static int
230pmac_show_percpuinfo(struct seq_file *m, int i) 230pmac_show_percpuinfo(struct seq_file *m, int i)
231{ 231{
232#ifdef CONFIG_CPU_FREQ_PMAC 232#ifdef CONFIG_CPU_FREQ_PMAC
@@ -330,9 +330,9 @@ pmac_setup_arch(void)
330#ifdef CONFIG_SMP 330#ifdef CONFIG_SMP
331 /* Check for Core99 */ 331 /* Check for Core99 */
332 if (find_devices("uni-n") || find_devices("u3")) 332 if (find_devices("uni-n") || find_devices("u3"))
333 ppc_md.smp_ops = &core99_smp_ops; 333 smp_ops = &core99_smp_ops;
334 else 334 else
335 ppc_md.smp_ops = &psurge_smp_ops; 335 smp_ops = &psurge_smp_ops;
336#endif /* CONFIG_SMP */ 336#endif /* CONFIG_SMP */
337 337
338 pci_create_OF_bus_map(); 338 pci_create_OF_bus_map();
@@ -447,7 +447,7 @@ static int pmac_pm_enter(suspend_state_t state)
447 enable_kernel_fp(); 447 enable_kernel_fp();
448 448
449#ifdef CONFIG_ALTIVEC 449#ifdef CONFIG_ALTIVEC
450 if (cur_cpu_spec[0]->cpu_features & CPU_FTR_ALTIVEC) 450 if (cur_cpu_spec->cpu_features & CPU_FTR_ALTIVEC)
451 enable_kernel_altivec(); 451 enable_kernel_altivec();
452#endif /* CONFIG_ALTIVEC */ 452#endif /* CONFIG_ALTIVEC */
453 453
@@ -485,7 +485,7 @@ static int pmac_late_init(void)
485late_initcall(pmac_late_init); 485late_initcall(pmac_late_init);
486 486
487/* can't be __init - can be called whenever a disk is first accessed */ 487/* can't be __init - can be called whenever a disk is first accessed */
488void __pmac 488void
489note_bootable_part(dev_t dev, int part, int goodness) 489note_bootable_part(dev_t dev, int part, int goodness)
490{ 490{
491 static int found_boot = 0; 491 static int found_boot = 0;
@@ -511,7 +511,7 @@ note_bootable_part(dev_t dev, int part, int goodness)
511 } 511 }
512} 512}
513 513
514static void __pmac 514static void
515pmac_restart(char *cmd) 515pmac_restart(char *cmd)
516{ 516{
517#ifdef CONFIG_ADB_CUDA 517#ifdef CONFIG_ADB_CUDA
@@ -536,7 +536,7 @@ pmac_restart(char *cmd)
536 } 536 }
537} 537}
538 538
539static void __pmac 539static void
540pmac_power_off(void) 540pmac_power_off(void)
541{ 541{
542#ifdef CONFIG_ADB_CUDA 542#ifdef CONFIG_ADB_CUDA
@@ -561,7 +561,7 @@ pmac_power_off(void)
561 } 561 }
562} 562}
563 563
564static void __pmac 564static void
565pmac_halt(void) 565pmac_halt(void)
566{ 566{
567 pmac_power_off(); 567 pmac_power_off();
@@ -661,7 +661,6 @@ pmac_init(unsigned long r3, unsigned long r4, unsigned long r5,
661 ppc_md.setup_arch = pmac_setup_arch; 661 ppc_md.setup_arch = pmac_setup_arch;
662 ppc_md.show_cpuinfo = pmac_show_cpuinfo; 662 ppc_md.show_cpuinfo = pmac_show_cpuinfo;
663 ppc_md.show_percpuinfo = pmac_show_percpuinfo; 663 ppc_md.show_percpuinfo = pmac_show_percpuinfo;
664 ppc_md.irq_canonicalize = NULL;
665 ppc_md.init_IRQ = pmac_pic_init; 664 ppc_md.init_IRQ = pmac_pic_init;
666 ppc_md.get_irq = pmac_get_irq; /* Changed later on ... */ 665 ppc_md.get_irq = pmac_get_irq; /* Changed later on ... */
667 666
diff --git a/arch/ppc/platforms/pmac_sleep.S b/arch/ppc/platforms/pmac_sleep.S
index 88419c77ac43..22b113d19b24 100644
--- a/arch/ppc/platforms/pmac_sleep.S
+++ b/arch/ppc/platforms/pmac_sleep.S
@@ -387,10 +387,10 @@ turn_on_mmu:
387#endif /* defined(CONFIG_PM) || defined(CONFIG_CPU_FREQ) */ 387#endif /* defined(CONFIG_PM) || defined(CONFIG_CPU_FREQ) */
388 388
389 .section .data 389 .section .data
390 .balign L1_CACHE_LINE_SIZE 390 .balign L1_CACHE_BYTES
391sleep_storage: 391sleep_storage:
392 .long 0 392 .long 0
393 .balign L1_CACHE_LINE_SIZE, 0 393 .balign L1_CACHE_BYTES, 0
394 394
395#endif /* CONFIG_6xx */ 395#endif /* CONFIG_6xx */
396 .section .text 396 .section .text
diff --git a/arch/ppc/platforms/pmac_smp.c b/arch/ppc/platforms/pmac_smp.c
index 794a23994b82..26ff26238f03 100644
--- a/arch/ppc/platforms/pmac_smp.c
+++ b/arch/ppc/platforms/pmac_smp.c
@@ -186,7 +186,7 @@ static inline void psurge_clr_ipi(int cpu)
186 */ 186 */
187static unsigned long psurge_smp_message[NR_CPUS]; 187static unsigned long psurge_smp_message[NR_CPUS];
188 188
189void __pmac psurge_smp_message_recv(struct pt_regs *regs) 189void psurge_smp_message_recv(struct pt_regs *regs)
190{ 190{
191 int cpu = smp_processor_id(); 191 int cpu = smp_processor_id();
192 int msg; 192 int msg;
@@ -203,14 +203,13 @@ void __pmac psurge_smp_message_recv(struct pt_regs *regs)
203 smp_message_recv(msg, regs); 203 smp_message_recv(msg, regs);
204} 204}
205 205
206irqreturn_t __pmac psurge_primary_intr(int irq, void *d, struct pt_regs *regs) 206irqreturn_t psurge_primary_intr(int irq, void *d, struct pt_regs *regs)
207{ 207{
208 psurge_smp_message_recv(regs); 208 psurge_smp_message_recv(regs);
209 return IRQ_HANDLED; 209 return IRQ_HANDLED;
210} 210}
211 211
212static void __pmac smp_psurge_message_pass(int target, int msg, unsigned long data, 212static void smp_psurge_message_pass(int target, int msg)
213 int wait)
214{ 213{
215 int i; 214 int i;
216 215
@@ -629,7 +628,7 @@ void smp_core99_give_timebase(void)
629 628
630 629
631/* PowerSurge-style Macs */ 630/* PowerSurge-style Macs */
632struct smp_ops_t psurge_smp_ops __pmacdata = { 631struct smp_ops_t psurge_smp_ops = {
633 .message_pass = smp_psurge_message_pass, 632 .message_pass = smp_psurge_message_pass,
634 .probe = smp_psurge_probe, 633 .probe = smp_psurge_probe,
635 .kick_cpu = smp_psurge_kick_cpu, 634 .kick_cpu = smp_psurge_kick_cpu,
@@ -639,7 +638,7 @@ struct smp_ops_t psurge_smp_ops __pmacdata = {
639}; 638};
640 639
641/* Core99 Macs (dual G4s) */ 640/* Core99 Macs (dual G4s) */
642struct smp_ops_t core99_smp_ops __pmacdata = { 641struct smp_ops_t core99_smp_ops = {
643 .message_pass = smp_openpic_message_pass, 642 .message_pass = smp_openpic_message_pass,
644 .probe = smp_core99_probe, 643 .probe = smp_core99_probe,
645 .kick_cpu = smp_core99_kick_cpu, 644 .kick_cpu = smp_core99_kick_cpu,
diff --git a/arch/ppc/platforms/pmac_time.c b/arch/ppc/platforms/pmac_time.c
index efb819f9490d..edb9fcc64790 100644
--- a/arch/ppc/platforms/pmac_time.c
+++ b/arch/ppc/platforms/pmac_time.c
@@ -77,7 +77,7 @@ pmac_time_init(void)
77#endif 77#endif
78} 78}
79 79
80unsigned long __pmac 80unsigned long
81pmac_get_rtc_time(void) 81pmac_get_rtc_time(void)
82{ 82{
83#if defined(CONFIG_ADB_CUDA) || defined(CONFIG_ADB_PMU) 83#if defined(CONFIG_ADB_CUDA) || defined(CONFIG_ADB_PMU)
@@ -118,7 +118,7 @@ pmac_get_rtc_time(void)
118 return 0; 118 return 0;
119} 119}
120 120
121int __pmac 121int
122pmac_set_rtc_time(unsigned long nowtime) 122pmac_set_rtc_time(unsigned long nowtime)
123{ 123{
124#if defined(CONFIG_ADB_CUDA) || defined(CONFIG_ADB_PMU) 124#if defined(CONFIG_ADB_CUDA) || defined(CONFIG_ADB_PMU)
@@ -210,7 +210,7 @@ via_calibrate_decr(void)
210/* 210/*
211 * Reset the time after a sleep. 211 * Reset the time after a sleep.
212 */ 212 */
213static int __pmac 213static int
214time_sleep_notify(struct pmu_sleep_notifier *self, int when) 214time_sleep_notify(struct pmu_sleep_notifier *self, int when)
215{ 215{
216 static unsigned long time_diff; 216 static unsigned long time_diff;
@@ -235,7 +235,7 @@ time_sleep_notify(struct pmu_sleep_notifier *self, int when)
235 return PBOOK_SLEEP_OK; 235 return PBOOK_SLEEP_OK;
236} 236}
237 237
238static struct pmu_sleep_notifier time_sleep_notifier __pmacdata = { 238static struct pmu_sleep_notifier time_sleep_notifier = {
239 time_sleep_notify, SLEEP_LEVEL_MISC, 239 time_sleep_notify, SLEEP_LEVEL_MISC,
240}; 240};
241#endif /* CONFIG_PM */ 241#endif /* CONFIG_PM */
diff --git a/arch/ppc/platforms/pplus.c b/arch/ppc/platforms/pplus.c
index e70aae20d6f9..22bd40cfb092 100644
--- a/arch/ppc/platforms/pplus.c
+++ b/arch/ppc/platforms/pplus.c
@@ -646,14 +646,6 @@ static void pplus_power_off(void)
646 pplus_halt(); 646 pplus_halt();
647} 647}
648 648
649static unsigned int pplus_irq_canonicalize(u_int irq)
650{
651 if (irq == 2)
652 return 9;
653 else
654 return irq;
655}
656
657static void __init pplus_init_IRQ(void) 649static void __init pplus_init_IRQ(void)
658{ 650{
659 int i; 651 int i;
@@ -673,10 +665,7 @@ static void __init pplus_init_IRQ(void)
673 ppc_md.get_irq = openpic_get_irq; 665 ppc_md.get_irq = openpic_get_irq;
674 } 666 }
675 667
676 for (i = 0; i < NUM_8259_INTERRUPTS; i++) 668 i8259_init(0, 0);
677 irq_desc[i].handler = &i8259_pic;
678
679 i8259_init(0);
680 669
681 if (ppc_md.progress) 670 if (ppc_md.progress)
682 ppc_md.progress("init_irq: exit", 0); 671 ppc_md.progress("init_irq: exit", 0);
@@ -872,10 +861,10 @@ platform_init(unsigned long r3, unsigned long r4, unsigned long r5,
872 ISA_DMA_THRESHOLD = 0x00ffffff; 861 ISA_DMA_THRESHOLD = 0x00ffffff;
873 DMA_MODE_READ = 0x44; 862 DMA_MODE_READ = 0x44;
874 DMA_MODE_WRITE = 0x48; 863 DMA_MODE_WRITE = 0x48;
864 ppc_do_canonicalize_irqs = 1;
875 865
876 ppc_md.setup_arch = pplus_setup_arch; 866 ppc_md.setup_arch = pplus_setup_arch;
877 ppc_md.show_cpuinfo = pplus_show_cpuinfo; 867 ppc_md.show_cpuinfo = pplus_show_cpuinfo;
878 ppc_md.irq_canonicalize = pplus_irq_canonicalize;
879 ppc_md.init_IRQ = pplus_init_IRQ; 868 ppc_md.init_IRQ = pplus_init_IRQ;
880 /* this gets changed later on if we have an OpenPIC -- Cort */ 869 /* this gets changed later on if we have an OpenPIC -- Cort */
881 ppc_md.get_irq = i8259_irq; 870 ppc_md.get_irq = i8259_irq;
@@ -911,6 +900,6 @@ platform_init(unsigned long r3, unsigned long r4, unsigned long r5,
911 ppc_md.kgdb_map_scc = gen550_kgdb_map_scc; 900 ppc_md.kgdb_map_scc = gen550_kgdb_map_scc;
912#endif 901#endif
913#ifdef CONFIG_SMP 902#ifdef CONFIG_SMP
914 ppc_md.smp_ops = &pplus_smp_ops; 903 smp_ops = &pplus_smp_ops;
915#endif /* CONFIG_SMP */ 904#endif /* CONFIG_SMP */
916} 905}
diff --git a/arch/ppc/platforms/prep_pci.c b/arch/ppc/platforms/prep_pci.c
index 4760cb64251d..e50b9996848c 100644
--- a/arch/ppc/platforms/prep_pci.c
+++ b/arch/ppc/platforms/prep_pci.c
@@ -43,7 +43,7 @@ static unsigned long *ProcInfo;
43/* Tables for known hardware */ 43/* Tables for known hardware */
44 44
45/* Motorola PowerStackII - Utah */ 45/* Motorola PowerStackII - Utah */
46static char Utah_pci_IRQ_map[23] __prepdata = 46static char Utah_pci_IRQ_map[23] =
47{ 47{
48 0, /* Slot 0 - unused */ 48 0, /* Slot 0 - unused */
49 0, /* Slot 1 - unused */ 49 0, /* Slot 1 - unused */
@@ -72,7 +72,7 @@ static char Utah_pci_IRQ_map[23] __prepdata =
72 0, /* Slot 22 - unused */ 72 0, /* Slot 22 - unused */
73}; 73};
74 74
75static char Utah_pci_IRQ_routes[] __prepdata = 75static char Utah_pci_IRQ_routes[] =
76{ 76{
77 0, /* Line 0 - Unused */ 77 0, /* Line 0 - Unused */
78 9, /* Line 1 */ 78 9, /* Line 1 */
@@ -84,7 +84,7 @@ static char Utah_pci_IRQ_routes[] __prepdata =
84 84
85/* Motorola PowerStackII - Omaha */ 85/* Motorola PowerStackII - Omaha */
86/* no integrated SCSI or ethernet */ 86/* no integrated SCSI or ethernet */
87static char Omaha_pci_IRQ_map[23] __prepdata = 87static char Omaha_pci_IRQ_map[23] =
88{ 88{
89 0, /* Slot 0 - unused */ 89 0, /* Slot 0 - unused */
90 0, /* Slot 1 - unused */ 90 0, /* Slot 1 - unused */
@@ -111,7 +111,7 @@ static char Omaha_pci_IRQ_map[23] __prepdata =
111 0, 111 0,
112}; 112};
113 113
114static char Omaha_pci_IRQ_routes[] __prepdata = 114static char Omaha_pci_IRQ_routes[] =
115{ 115{
116 0, /* Line 0 - Unused */ 116 0, /* Line 0 - Unused */
117 9, /* Line 1 */ 117 9, /* Line 1 */
@@ -121,7 +121,7 @@ static char Omaha_pci_IRQ_routes[] __prepdata =
121}; 121};
122 122
123/* Motorola PowerStack */ 123/* Motorola PowerStack */
124static char Blackhawk_pci_IRQ_map[19] __prepdata = 124static char Blackhawk_pci_IRQ_map[19] =
125{ 125{
126 0, /* Slot 0 - unused */ 126 0, /* Slot 0 - unused */
127 0, /* Slot 1 - unused */ 127 0, /* Slot 1 - unused */
@@ -144,7 +144,7 @@ static char Blackhawk_pci_IRQ_map[19] __prepdata =
144 3, /* Slot P5 */ 144 3, /* Slot P5 */
145}; 145};
146 146
147static char Blackhawk_pci_IRQ_routes[] __prepdata = 147static char Blackhawk_pci_IRQ_routes[] =
148{ 148{
149 0, /* Line 0 - Unused */ 149 0, /* Line 0 - Unused */
150 9, /* Line 1 */ 150 9, /* Line 1 */
@@ -154,7 +154,7 @@ static char Blackhawk_pci_IRQ_routes[] __prepdata =
154}; 154};
155 155
156/* Motorola Mesquite */ 156/* Motorola Mesquite */
157static char Mesquite_pci_IRQ_map[23] __prepdata = 157static char Mesquite_pci_IRQ_map[23] =
158{ 158{
159 0, /* Slot 0 - unused */ 159 0, /* Slot 0 - unused */
160 0, /* Slot 1 - unused */ 160 0, /* Slot 1 - unused */
@@ -182,7 +182,7 @@ static char Mesquite_pci_IRQ_map[23] __prepdata =
182}; 182};
183 183
184/* Motorola Sitka */ 184/* Motorola Sitka */
185static char Sitka_pci_IRQ_map[21] __prepdata = 185static char Sitka_pci_IRQ_map[21] =
186{ 186{
187 0, /* Slot 0 - unused */ 187 0, /* Slot 0 - unused */
188 0, /* Slot 1 - unused */ 188 0, /* Slot 1 - unused */
@@ -208,7 +208,7 @@ static char Sitka_pci_IRQ_map[21] __prepdata =
208}; 208};
209 209
210/* Motorola MTX */ 210/* Motorola MTX */
211static char MTX_pci_IRQ_map[23] __prepdata = 211static char MTX_pci_IRQ_map[23] =
212{ 212{
213 0, /* Slot 0 - unused */ 213 0, /* Slot 0 - unused */
214 0, /* Slot 1 - unused */ 214 0, /* Slot 1 - unused */
@@ -237,7 +237,7 @@ static char MTX_pci_IRQ_map[23] __prepdata =
237 237
238/* Motorola MTX Plus */ 238/* Motorola MTX Plus */
239/* Secondary bus interrupt routing is not supported yet */ 239/* Secondary bus interrupt routing is not supported yet */
240static char MTXplus_pci_IRQ_map[23] __prepdata = 240static char MTXplus_pci_IRQ_map[23] =
241{ 241{
242 0, /* Slot 0 - unused */ 242 0, /* Slot 0 - unused */
243 0, /* Slot 1 - unused */ 243 0, /* Slot 1 - unused */
@@ -264,13 +264,13 @@ static char MTXplus_pci_IRQ_map[23] __prepdata =
264 0, /* Slot 22 - unused */ 264 0, /* Slot 22 - unused */
265}; 265};
266 266
267static char Raven_pci_IRQ_routes[] __prepdata = 267static char Raven_pci_IRQ_routes[] =
268{ 268{
269 0, /* This is a dummy structure */ 269 0, /* This is a dummy structure */
270}; 270};
271 271
272/* Motorola MVME16xx */ 272/* Motorola MVME16xx */
273static char Genesis_pci_IRQ_map[16] __prepdata = 273static char Genesis_pci_IRQ_map[16] =
274{ 274{
275 0, /* Slot 0 - unused */ 275 0, /* Slot 0 - unused */
276 0, /* Slot 1 - unused */ 276 0, /* Slot 1 - unused */
@@ -290,7 +290,7 @@ static char Genesis_pci_IRQ_map[16] __prepdata =
290 0, /* Slot 15 - unused */ 290 0, /* Slot 15 - unused */
291}; 291};
292 292
293static char Genesis_pci_IRQ_routes[] __prepdata = 293static char Genesis_pci_IRQ_routes[] =
294{ 294{
295 0, /* Line 0 - Unused */ 295 0, /* Line 0 - Unused */
296 10, /* Line 1 */ 296 10, /* Line 1 */
@@ -299,7 +299,7 @@ static char Genesis_pci_IRQ_routes[] __prepdata =
299 15 /* Line 4 */ 299 15 /* Line 4 */
300}; 300};
301 301
302static char Genesis2_pci_IRQ_map[23] __prepdata = 302static char Genesis2_pci_IRQ_map[23] =
303{ 303{
304 0, /* Slot 0 - unused */ 304 0, /* Slot 0 - unused */
305 0, /* Slot 1 - unused */ 305 0, /* Slot 1 - unused */
@@ -327,7 +327,7 @@ static char Genesis2_pci_IRQ_map[23] __prepdata =
327}; 327};
328 328
329/* Motorola Series-E */ 329/* Motorola Series-E */
330static char Comet_pci_IRQ_map[23] __prepdata = 330static char Comet_pci_IRQ_map[23] =
331{ 331{
332 0, /* Slot 0 - unused */ 332 0, /* Slot 0 - unused */
333 0, /* Slot 1 - unused */ 333 0, /* Slot 1 - unused */
@@ -354,7 +354,7 @@ static char Comet_pci_IRQ_map[23] __prepdata =
354 0, 354 0,
355}; 355};
356 356
357static char Comet_pci_IRQ_routes[] __prepdata = 357static char Comet_pci_IRQ_routes[] =
358{ 358{
359 0, /* Line 0 - Unused */ 359 0, /* Line 0 - Unused */
360 10, /* Line 1 */ 360 10, /* Line 1 */
@@ -364,7 +364,7 @@ static char Comet_pci_IRQ_routes[] __prepdata =
364}; 364};
365 365
366/* Motorola Series-EX */ 366/* Motorola Series-EX */
367static char Comet2_pci_IRQ_map[23] __prepdata = 367static char Comet2_pci_IRQ_map[23] =
368{ 368{
369 0, /* Slot 0 - unused */ 369 0, /* Slot 0 - unused */
370 0, /* Slot 1 - unused */ 370 0, /* Slot 1 - unused */
@@ -391,7 +391,7 @@ static char Comet2_pci_IRQ_map[23] __prepdata =
391 0, 391 0,
392}; 392};
393 393
394static char Comet2_pci_IRQ_routes[] __prepdata = 394static char Comet2_pci_IRQ_routes[] =
395{ 395{
396 0, /* Line 0 - Unused */ 396 0, /* Line 0 - Unused */
397 10, /* Line 1 */ 397 10, /* Line 1 */
@@ -405,7 +405,7 @@ static char Comet2_pci_IRQ_routes[] __prepdata =
405 * This is actually based on the Carolina motherboard 405 * This is actually based on the Carolina motherboard
406 * -- Cort 406 * -- Cort
407 */ 407 */
408static char ibm8xx_pci_IRQ_map[23] __prepdata = { 408static char ibm8xx_pci_IRQ_map[23] = {
409 0, /* Slot 0 - unused */ 409 0, /* Slot 0 - unused */
410 0, /* Slot 1 - unused */ 410 0, /* Slot 1 - unused */
411 0, /* Slot 2 - unused */ 411 0, /* Slot 2 - unused */
@@ -431,7 +431,7 @@ static char ibm8xx_pci_IRQ_map[23] __prepdata = {
431 2, /* Slot 22 - PCI slot 1 PCIINTx# (See below) */ 431 2, /* Slot 22 - PCI slot 1 PCIINTx# (See below) */
432}; 432};
433 433
434static char ibm8xx_pci_IRQ_routes[] __prepdata = { 434static char ibm8xx_pci_IRQ_routes[] = {
435 0, /* Line 0 - unused */ 435 0, /* Line 0 - unused */
436 15, /* Line 1 */ 436 15, /* Line 1 */
437 15, /* Line 2 */ 437 15, /* Line 2 */
@@ -443,7 +443,7 @@ static char ibm8xx_pci_IRQ_routes[] __prepdata = {
443 * a 6015 ibm board 443 * a 6015 ibm board
444 * -- Cort 444 * -- Cort
445 */ 445 */
446static char ibm6015_pci_IRQ_map[23] __prepdata = { 446static char ibm6015_pci_IRQ_map[23] = {
447 0, /* Slot 0 - unused */ 447 0, /* Slot 0 - unused */
448 0, /* Slot 1 - unused */ 448 0, /* Slot 1 - unused */
449 0, /* Slot 2 - unused */ 449 0, /* Slot 2 - unused */
@@ -469,7 +469,7 @@ static char ibm6015_pci_IRQ_map[23] __prepdata = {
469 2, /* Slot 22 - */ 469 2, /* Slot 22 - */
470}; 470};
471 471
472static char ibm6015_pci_IRQ_routes[] __prepdata = { 472static char ibm6015_pci_IRQ_routes[] = {
473 0, /* Line 0 - unused */ 473 0, /* Line 0 - unused */
474 13, /* Line 1 */ 474 13, /* Line 1 */
475 15, /* Line 2 */ 475 15, /* Line 2 */
@@ -479,7 +479,7 @@ static char ibm6015_pci_IRQ_routes[] __prepdata = {
479 479
480 480
481/* IBM Nobis and Thinkpad 850 */ 481/* IBM Nobis and Thinkpad 850 */
482static char Nobis_pci_IRQ_map[23] __prepdata ={ 482static char Nobis_pci_IRQ_map[23] ={
483 0, /* Slot 0 - unused */ 483 0, /* Slot 0 - unused */
484 0, /* Slot 1 - unused */ 484 0, /* Slot 1 - unused */
485 0, /* Slot 2 - unused */ 485 0, /* Slot 2 - unused */
@@ -498,7 +498,7 @@ static char Nobis_pci_IRQ_map[23] __prepdata ={
498 0, /* Slot 15 - unused */ 498 0, /* Slot 15 - unused */
499}; 499};
500 500
501static char Nobis_pci_IRQ_routes[] __prepdata = { 501static char Nobis_pci_IRQ_routes[] = {
502 0, /* Line 0 - Unused */ 502 0, /* Line 0 - Unused */
503 13, /* Line 1 */ 503 13, /* Line 1 */
504 13, /* Line 2 */ 504 13, /* Line 2 */
@@ -510,7 +510,7 @@ static char Nobis_pci_IRQ_routes[] __prepdata = {
510 * IBM RS/6000 43p/140 -- paulus 510 * IBM RS/6000 43p/140 -- paulus
511 * XXX we should get all this from the residual data 511 * XXX we should get all this from the residual data
512 */ 512 */
513static char ibm43p_pci_IRQ_map[23] __prepdata = { 513static char ibm43p_pci_IRQ_map[23] = {
514 0, /* Slot 0 - unused */ 514 0, /* Slot 0 - unused */
515 0, /* Slot 1 - unused */ 515 0, /* Slot 1 - unused */
516 0, /* Slot 2 - unused */ 516 0, /* Slot 2 - unused */
@@ -536,7 +536,7 @@ static char ibm43p_pci_IRQ_map[23] __prepdata = {
536 1, /* Slot 22 - PCI slot 1 PCIINTx# (See below) */ 536 1, /* Slot 22 - PCI slot 1 PCIINTx# (See below) */
537}; 537};
538 538
539static char ibm43p_pci_IRQ_routes[] __prepdata = { 539static char ibm43p_pci_IRQ_routes[] = {
540 0, /* Line 0 - unused */ 540 0, /* Line 0 - unused */
541 15, /* Line 1 */ 541 15, /* Line 1 */
542 15, /* Line 2 */ 542 15, /* Line 2 */
@@ -559,7 +559,7 @@ struct powerplus_irq_list
559 * are routed to OpenPIC inputs 5-8. These values are offset by 559 * are routed to OpenPIC inputs 5-8. These values are offset by
560 * 16 in the table to reflect the Linux kernel interrupt value. 560 * 16 in the table to reflect the Linux kernel interrupt value.
561 */ 561 */
562struct powerplus_irq_list Powerplus_pci_IRQ_list __prepdata = 562struct powerplus_irq_list Powerplus_pci_IRQ_list =
563{ 563{
564 {25, 26, 27, 28}, 564 {25, 26, 27, 28},
565 {21, 22, 23, 24} 565 {21, 22, 23, 24}
@@ -572,7 +572,7 @@ struct powerplus_irq_list Powerplus_pci_IRQ_list __prepdata =
572 * are routed to OpenPIC inputs 12-15. These values are offset by 572 * are routed to OpenPIC inputs 12-15. These values are offset by
573 * 16 in the table to reflect the Linux kernel interrupt value. 573 * 16 in the table to reflect the Linux kernel interrupt value.
574 */ 574 */
575struct powerplus_irq_list Mesquite_pci_IRQ_list __prepdata = 575struct powerplus_irq_list Mesquite_pci_IRQ_list =
576{ 576{
577 {24, 25, 26, 27}, 577 {24, 25, 26, 27},
578 {28, 29, 30, 31} 578 {28, 29, 30, 31}
@@ -582,7 +582,7 @@ struct powerplus_irq_list Mesquite_pci_IRQ_list __prepdata =
582 * This table represents the standard PCI swizzle defined in the 582 * This table represents the standard PCI swizzle defined in the
583 * PCI bus specification. 583 * PCI bus specification.
584 */ 584 */
585static unsigned char prep_pci_intpins[4][4] __prepdata = 585static unsigned char prep_pci_intpins[4][4] =
586{ 586{
587 { 1, 2, 3, 4}, /* Buses 0, 4, 8, ... */ 587 { 1, 2, 3, 4}, /* Buses 0, 4, 8, ... */
588 { 2, 3, 4, 1}, /* Buses 1, 5, 9, ... */ 588 { 2, 3, 4, 1}, /* Buses 1, 5, 9, ... */
@@ -622,7 +622,7 @@ static unsigned char prep_pci_intpins[4][4] __prepdata =
622#define MIN_DEVNR 11 622#define MIN_DEVNR 11
623#define MAX_DEVNR 22 623#define MAX_DEVNR 22
624 624
625static int __prep 625static int
626prep_read_config(struct pci_bus *bus, unsigned int devfn, int offset, 626prep_read_config(struct pci_bus *bus, unsigned int devfn, int offset,
627 int len, u32 *val) 627 int len, u32 *val)
628{ 628{
@@ -652,7 +652,7 @@ prep_read_config(struct pci_bus *bus, unsigned int devfn, int offset,
652 return PCIBIOS_SUCCESSFUL; 652 return PCIBIOS_SUCCESSFUL;
653} 653}
654 654
655static int __prep 655static int
656prep_write_config(struct pci_bus *bus, unsigned int devfn, int offset, 656prep_write_config(struct pci_bus *bus, unsigned int devfn, int offset,
657 int len, u32 val) 657 int len, u32 val)
658{ 658{
@@ -804,7 +804,7 @@ struct mot_info {
804 void (*map_non0_bus)(struct pci_dev *); /* For boards with more than bus 0 devices. */ 804 void (*map_non0_bus)(struct pci_dev *); /* For boards with more than bus 0 devices. */
805 struct powerplus_irq_list *pci_irq_list; /* List of PCI MPIC inputs */ 805 struct powerplus_irq_list *pci_irq_list; /* List of PCI MPIC inputs */
806 unsigned char secondary_bridge_devfn; /* devfn of secondary bus transparent bridge */ 806 unsigned char secondary_bridge_devfn; /* devfn of secondary bus transparent bridge */
807} mot_info[] __prepdata = { 807} mot_info[] = {
808 {0x300, 0x00, 0x00, "MVME 2400", Genesis2_pci_IRQ_map, Raven_pci_IRQ_routes, Powerplus_Map_Non0, &Powerplus_pci_IRQ_list, 0xFF}, 808 {0x300, 0x00, 0x00, "MVME 2400", Genesis2_pci_IRQ_map, Raven_pci_IRQ_routes, Powerplus_Map_Non0, &Powerplus_pci_IRQ_list, 0xFF},
809 {0x010, 0x00, 0x00, "Genesis", Genesis_pci_IRQ_map, Genesis_pci_IRQ_routes, Powerplus_Map_Non0, &Powerplus_pci_IRQ_list, 0x00}, 809 {0x010, 0x00, 0x00, "Genesis", Genesis_pci_IRQ_map, Genesis_pci_IRQ_routes, Powerplus_Map_Non0, &Powerplus_pci_IRQ_list, 0x00},
810 {0x020, 0x00, 0x00, "Powerstack (Series E)", Comet_pci_IRQ_map, Comet_pci_IRQ_routes, NULL, NULL, 0x00}, 810 {0x020, 0x00, 0x00, "Powerstack (Series E)", Comet_pci_IRQ_map, Comet_pci_IRQ_routes, NULL, NULL, 0x00},
diff --git a/arch/ppc/platforms/prep_setup.c b/arch/ppc/platforms/prep_setup.c
index bc926be95472..067d7d53b81e 100644
--- a/arch/ppc/platforms/prep_setup.c
+++ b/arch/ppc/platforms/prep_setup.c
@@ -89,9 +89,6 @@ extern void prep_tiger1_setup_pci(char *irq_edge_mask_lo, char *irq_edge_mask_hi
89#define cached_21 (((char *)(ppc_cached_irq_mask))[3]) 89#define cached_21 (((char *)(ppc_cached_irq_mask))[3])
90#define cached_A1 (((char *)(ppc_cached_irq_mask))[2]) 90#define cached_A1 (((char *)(ppc_cached_irq_mask))[2])
91 91
92/* for the mac fs */
93dev_t boot_dev;
94
95#ifdef CONFIG_SOUND_CS4232 92#ifdef CONFIG_SOUND_CS4232
96long ppc_cs4232_dma, ppc_cs4232_dma2; 93long ppc_cs4232_dma, ppc_cs4232_dma2;
97#endif 94#endif
@@ -173,7 +170,7 @@ prep_carolina_enable_l2(void)
173} 170}
174 171
175/* cpuinfo code common to all IBM PReP */ 172/* cpuinfo code common to all IBM PReP */
176static void __prep 173static void
177prep_ibm_cpuinfo(struct seq_file *m) 174prep_ibm_cpuinfo(struct seq_file *m)
178{ 175{
179 unsigned int equip_reg = inb(PREP_IBM_EQUIPMENT); 176 unsigned int equip_reg = inb(PREP_IBM_EQUIPMENT);
@@ -209,14 +206,14 @@ prep_ibm_cpuinfo(struct seq_file *m)
209 } 206 }
210} 207}
211 208
212static int __prep 209static int
213prep_gen_cpuinfo(struct seq_file *m) 210prep_gen_cpuinfo(struct seq_file *m)
214{ 211{
215 prep_ibm_cpuinfo(m); 212 prep_ibm_cpuinfo(m);
216 return 0; 213 return 0;
217} 214}
218 215
219static int __prep 216static int
220prep_sandalfoot_cpuinfo(struct seq_file *m) 217prep_sandalfoot_cpuinfo(struct seq_file *m)
221{ 218{
222 unsigned int equip_reg = inb(PREP_IBM_EQUIPMENT); 219 unsigned int equip_reg = inb(PREP_IBM_EQUIPMENT);
@@ -243,7 +240,7 @@ prep_sandalfoot_cpuinfo(struct seq_file *m)
243 return 0; 240 return 0;
244} 241}
245 242
246static int __prep 243static int
247prep_thinkpad_cpuinfo(struct seq_file *m) 244prep_thinkpad_cpuinfo(struct seq_file *m)
248{ 245{
249 unsigned int equip_reg = inb(PREP_IBM_EQUIPMENT); 246 unsigned int equip_reg = inb(PREP_IBM_EQUIPMENT);
@@ -314,7 +311,7 @@ prep_thinkpad_cpuinfo(struct seq_file *m)
314 return 0; 311 return 0;
315} 312}
316 313
317static int __prep 314static int
318prep_carolina_cpuinfo(struct seq_file *m) 315prep_carolina_cpuinfo(struct seq_file *m)
319{ 316{
320 unsigned int equip_reg = inb(PREP_IBM_EQUIPMENT); 317 unsigned int equip_reg = inb(PREP_IBM_EQUIPMENT);
@@ -350,7 +347,7 @@ prep_carolina_cpuinfo(struct seq_file *m)
350 return 0; 347 return 0;
351} 348}
352 349
353static int __prep 350static int
354prep_tiger1_cpuinfo(struct seq_file *m) 351prep_tiger1_cpuinfo(struct seq_file *m)
355{ 352{
356 unsigned int l2_reg = inb(PREP_IBM_L2INFO); 353 unsigned int l2_reg = inb(PREP_IBM_L2INFO);
@@ -393,7 +390,7 @@ prep_tiger1_cpuinfo(struct seq_file *m)
393 390
394 391
395/* Used by all Motorola PReP */ 392/* Used by all Motorola PReP */
396static int __prep 393static int
397prep_mot_cpuinfo(struct seq_file *m) 394prep_mot_cpuinfo(struct seq_file *m)
398{ 395{
399 unsigned int cachew = *((unsigned char *)CACHECRBA); 396 unsigned int cachew = *((unsigned char *)CACHECRBA);
@@ -454,7 +451,7 @@ no_l2:
454 return 0; 451 return 0;
455} 452}
456 453
457static void __prep 454static void
458prep_restart(char *cmd) 455prep_restart(char *cmd)
459{ 456{
460#define PREP_SP92 0x92 /* Special Port 92 */ 457#define PREP_SP92 0x92 /* Special Port 92 */
@@ -473,7 +470,7 @@ prep_restart(char *cmd)
473#undef PREP_SP92 470#undef PREP_SP92
474} 471}
475 472
476static void __prep 473static void
477prep_halt(void) 474prep_halt(void)
478{ 475{
479 local_irq_disable(); /* no interrupts */ 476 local_irq_disable(); /* no interrupts */
@@ -488,7 +485,7 @@ prep_halt(void)
488/* Carrera is the power manager in the Thinkpads. Unfortunately not much is 485/* Carrera is the power manager in the Thinkpads. Unfortunately not much is
489 * known about it, so we can't power down. 486 * known about it, so we can't power down.
490 */ 487 */
491static void __prep 488static void
492prep_carrera_poweroff(void) 489prep_carrera_poweroff(void)
493{ 490{
494 prep_halt(); 491 prep_halt();
@@ -501,7 +498,7 @@ prep_carrera_poweroff(void)
501 * somewhat in the IBM Carolina Technical Specification. 498 * somewhat in the IBM Carolina Technical Specification.
502 * -Hollis 499 * -Hollis
503 */ 500 */
504static void __prep 501static void
505utah_sig87c750_setbit(unsigned int bytenum, unsigned int bitnum, int value) 502utah_sig87c750_setbit(unsigned int bytenum, unsigned int bitnum, int value)
506{ 503{
507 /* 504 /*
@@ -539,7 +536,7 @@ utah_sig87c750_setbit(unsigned int bytenum, unsigned int bitnum, int value)
539 udelay(100); /* important: let controller recover */ 536 udelay(100); /* important: let controller recover */
540} 537}
541 538
542static void __prep 539static void
543prep_sig750_poweroff(void) 540prep_sig750_poweroff(void)
544{ 541{
545 /* tweak the power manager found in most IBM PRePs (except Thinkpads) */ 542 /* tweak the power manager found in most IBM PRePs (except Thinkpads) */
@@ -554,7 +551,7 @@ prep_sig750_poweroff(void)
554 /* not reached */ 551 /* not reached */
555} 552}
556 553
557static int __prep 554static int
558prep_show_percpuinfo(struct seq_file *m, int i) 555prep_show_percpuinfo(struct seq_file *m, int i)
559{ 556{
560 /* PREP's without residual data will give incorrect values here */ 557 /* PREP's without residual data will give incorrect values here */
@@ -700,12 +697,12 @@ prep_set_bat(void)
700/* 697/*
701 * IBM 3-digit status LED 698 * IBM 3-digit status LED
702 */ 699 */
703static unsigned int ibm_statusled_base __prepdata; 700static unsigned int ibm_statusled_base;
704 701
705static void __prep 702static void
706ibm_statusled_progress(char *s, unsigned short hex); 703ibm_statusled_progress(char *s, unsigned short hex);
707 704
708static int __prep 705static int
709ibm_statusled_panic(struct notifier_block *dummy1, unsigned long dummy2, 706ibm_statusled_panic(struct notifier_block *dummy1, unsigned long dummy2,
710 void * dummy3) 707 void * dummy3)
711{ 708{
@@ -713,13 +710,13 @@ ibm_statusled_panic(struct notifier_block *dummy1, unsigned long dummy2,
713 return NOTIFY_DONE; 710 return NOTIFY_DONE;
714} 711}
715 712
716static struct notifier_block ibm_statusled_block __prepdata = { 713static struct notifier_block ibm_statusled_block = {
717 ibm_statusled_panic, 714 ibm_statusled_panic,
718 NULL, 715 NULL,
719 INT_MAX /* try to do it first */ 716 INT_MAX /* try to do it first */
720}; 717};
721 718
722static void __prep 719static void
723ibm_statusled_progress(char *s, unsigned short hex) 720ibm_statusled_progress(char *s, unsigned short hex)
724{ 721{
725 static int notifier_installed; 722 static int notifier_installed;
@@ -945,19 +942,6 @@ prep_calibrate_decr(void)
945 todc_calibrate_decr(); 942 todc_calibrate_decr();
946} 943}
947 944
948static unsigned int __prep
949prep_irq_canonicalize(u_int irq)
950{
951 if (irq == 2)
952 {
953 return 9;
954 }
955 else
956 {
957 return irq;
958 }
959}
960
961static void __init 945static void __init
962prep_init_IRQ(void) 946prep_init_IRQ(void)
963{ 947{
@@ -970,11 +954,9 @@ prep_init_IRQ(void)
970 openpic_hookup_cascade(NUM_8259_INTERRUPTS, "82c59 cascade", 954 openpic_hookup_cascade(NUM_8259_INTERRUPTS, "82c59 cascade",
971 i8259_irq); 955 i8259_irq);
972 } 956 }
973 for ( i = 0 ; i < NUM_8259_INTERRUPTS ; i++ )
974 irq_desc[i].handler = &i8259_pic;
975 957
976 if (have_residual_data) { 958 if (have_residual_data) {
977 i8259_init(residual_isapic_addr()); 959 i8259_init(residual_isapic_addr(), 0);
978 return; 960 return;
979 } 961 }
980 962
@@ -985,18 +967,18 @@ prep_init_IRQ(void)
985 if (((pci_viddid & 0xffff) == PCI_VENDOR_ID_MOTOROLA) 967 if (((pci_viddid & 0xffff) == PCI_VENDOR_ID_MOTOROLA)
986 && ((pci_did == PCI_DEVICE_ID_MOTOROLA_RAVEN) 968 && ((pci_did == PCI_DEVICE_ID_MOTOROLA_RAVEN)
987 || (pci_did == PCI_DEVICE_ID_MOTOROLA_HAWK))) 969 || (pci_did == PCI_DEVICE_ID_MOTOROLA_HAWK)))
988 i8259_init(0); 970 i8259_init(0, 0);
989 else 971 else
990 /* PCI interrupt ack address given in section 6.1.8 of the 972 /* PCI interrupt ack address given in section 6.1.8 of the
991 * PReP specification. */ 973 * PReP specification. */
992 i8259_init(MPC10X_MAPA_PCI_INTACK_ADDR); 974 i8259_init(MPC10X_MAPA_PCI_INTACK_ADDR, 0);
993} 975}
994 976
995#if defined(CONFIG_BLK_DEV_IDE) || defined(CONFIG_BLK_DEV_IDE_MODULE) 977#if defined(CONFIG_BLK_DEV_IDE) || defined(CONFIG_BLK_DEV_IDE_MODULE)
996/* 978/*
997 * IDE stuff. 979 * IDE stuff.
998 */ 980 */
999static int __prep 981static int
1000prep_ide_default_irq(unsigned long base) 982prep_ide_default_irq(unsigned long base)
1001{ 983{
1002 switch (base) { 984 switch (base) {
@@ -1010,7 +992,7 @@ prep_ide_default_irq(unsigned long base)
1010 } 992 }
1011} 993}
1012 994
1013static unsigned long __prep 995static unsigned long
1014prep_ide_default_io_base(int index) 996prep_ide_default_io_base(int index)
1015{ 997{
1016 switch (index) { 998 switch (index) {
@@ -1055,7 +1037,7 @@ smp_prep_setup_cpu(int cpu_nr)
1055 do_openpic_setup_cpu(); 1037 do_openpic_setup_cpu();
1056} 1038}
1057 1039
1058static struct smp_ops_t prep_smp_ops __prepdata = { 1040static struct smp_ops_t prep_smp_ops = {
1059 smp_openpic_message_pass, 1041 smp_openpic_message_pass,
1060 smp_prep_probe, 1042 smp_prep_probe,
1061 smp_prep_kick_cpu, 1043 smp_prep_kick_cpu,
@@ -1113,6 +1095,7 @@ prep_init(unsigned long r3, unsigned long r4, unsigned long r5,
1113 ISA_DMA_THRESHOLD = 0x00ffffff; 1095 ISA_DMA_THRESHOLD = 0x00ffffff;
1114 DMA_MODE_READ = 0x44; 1096 DMA_MODE_READ = 0x44;
1115 DMA_MODE_WRITE = 0x48; 1097 DMA_MODE_WRITE = 0x48;
1098 ppc_do_canonicalize_irqs = 1;
1116 1099
1117 /* figure out what kind of prep workstation we are */ 1100 /* figure out what kind of prep workstation we are */
1118 if (have_residual_data) { 1101 if (have_residual_data) {
@@ -1139,7 +1122,6 @@ prep_init(unsigned long r3, unsigned long r4, unsigned long r5,
1139 ppc_md.setup_arch = prep_setup_arch; 1122 ppc_md.setup_arch = prep_setup_arch;
1140 ppc_md.show_percpuinfo = prep_show_percpuinfo; 1123 ppc_md.show_percpuinfo = prep_show_percpuinfo;
1141 ppc_md.show_cpuinfo = NULL; /* set in prep_setup_arch() */ 1124 ppc_md.show_cpuinfo = NULL; /* set in prep_setup_arch() */
1142 ppc_md.irq_canonicalize = prep_irq_canonicalize;
1143 ppc_md.init_IRQ = prep_init_IRQ; 1125 ppc_md.init_IRQ = prep_init_IRQ;
1144 /* this gets changed later on if we have an OpenPIC -- Cort */ 1126 /* this gets changed later on if we have an OpenPIC -- Cort */
1145 ppc_md.get_irq = i8259_irq; 1127 ppc_md.get_irq = i8259_irq;
@@ -1176,6 +1158,6 @@ prep_init(unsigned long r3, unsigned long r4, unsigned long r5,
1176#endif 1158#endif
1177 1159
1178#ifdef CONFIG_SMP 1160#ifdef CONFIG_SMP
1179 ppc_md.smp_ops = &prep_smp_ops; 1161 smp_ops = &prep_smp_ops;
1180#endif /* CONFIG_SMP */ 1162#endif /* CONFIG_SMP */
1181} 1163}
diff --git a/arch/ppc/platforms/radstone_ppc7d.c b/arch/ppc/platforms/radstone_ppc7d.c
index 0376c8cff5d1..6f97911c330d 100644
--- a/arch/ppc/platforms/radstone_ppc7d.c
+++ b/arch/ppc/platforms/radstone_ppc7d.c
@@ -514,13 +514,9 @@ static void __init ppc7d_init_irq(void)
514 int irq; 514 int irq;
515 515
516 pr_debug("%s\n", __FUNCTION__); 516 pr_debug("%s\n", __FUNCTION__);
517 i8259_init(0); 517 i8259_init(0, 0);
518 mv64360_init_irq(); 518 mv64360_init_irq();
519 519
520 /* IRQ 0..15 are handled by the cascaded 8259's of the Ali1535 */
521 for (irq = 0; irq < 16; irq++) {
522 irq_desc[irq].handler = &i8259_pic;
523 }
524 /* IRQs 5,6,9,10,11,14,15 are level sensitive */ 520 /* IRQs 5,6,9,10,11,14,15 are level sensitive */
525 irq_desc[5].status |= IRQ_LEVEL; 521 irq_desc[5].status |= IRQ_LEVEL;
526 irq_desc[6].status |= IRQ_LEVEL; 522 irq_desc[6].status |= IRQ_LEVEL;
@@ -1183,18 +1179,18 @@ static void __init ppc7d_setup_arch(void)
1183 ROOT_DEV = Root_HDA1; 1179 ROOT_DEV = Root_HDA1;
1184#endif 1180#endif
1185 1181
1186 if ((cur_cpu_spec[0]->cpu_features & CPU_FTR_SPEC7450) || 1182 if ((cur_cpu_spec->cpu_features & CPU_FTR_SPEC7450) ||
1187 (cur_cpu_spec[0]->cpu_features & CPU_FTR_L3CR)) 1183 (cur_cpu_spec->cpu_features & CPU_FTR_L3CR))
1188 /* 745x is different. We only want to pass along enable. */ 1184 /* 745x is different. We only want to pass along enable. */
1189 _set_L2CR(L2CR_L2E); 1185 _set_L2CR(L2CR_L2E);
1190 else if (cur_cpu_spec[0]->cpu_features & CPU_FTR_L2CR) 1186 else if (cur_cpu_spec->cpu_features & CPU_FTR_L2CR)
1191 /* All modules have 1MB of L2. We also assume that an 1187 /* All modules have 1MB of L2. We also assume that an
1192 * L2 divisor of 3 will work. 1188 * L2 divisor of 3 will work.
1193 */ 1189 */
1194 _set_L2CR(L2CR_L2E | L2CR_L2SIZ_1MB | L2CR_L2CLK_DIV3 1190 _set_L2CR(L2CR_L2E | L2CR_L2SIZ_1MB | L2CR_L2CLK_DIV3
1195 | L2CR_L2RAM_PIPE | L2CR_L2OH_1_0 | L2CR_L2DF); 1191 | L2CR_L2RAM_PIPE | L2CR_L2OH_1_0 | L2CR_L2DF);
1196 1192
1197 if (cur_cpu_spec[0]->cpu_features & CPU_FTR_L3CR) 1193 if (cur_cpu_spec->cpu_features & CPU_FTR_L3CR)
1198 /* No L3 cache */ 1194 /* No L3 cache */
1199 _set_L3CR(0); 1195 _set_L3CR(0);
1200 1196
@@ -1424,6 +1420,7 @@ void __init platform_init(unsigned long r3, unsigned long r4, unsigned long r5,
1424 ppc_md.setup_arch = ppc7d_setup_arch; 1420 ppc_md.setup_arch = ppc7d_setup_arch;
1425 ppc_md.init = ppc7d_init2; 1421 ppc_md.init = ppc7d_init2;
1426 ppc_md.show_cpuinfo = ppc7d_show_cpuinfo; 1422 ppc_md.show_cpuinfo = ppc7d_show_cpuinfo;
1423 /* XXX this is broken... */
1427 ppc_md.irq_canonicalize = ppc7d_irq_canonicalize; 1424 ppc_md.irq_canonicalize = ppc7d_irq_canonicalize;
1428 ppc_md.init_IRQ = ppc7d_init_irq; 1425 ppc_md.init_IRQ = ppc7d_init_irq;
1429 ppc_md.get_irq = ppc7d_get_irq; 1426 ppc_md.get_irq = ppc7d_get_irq;
diff --git a/arch/ppc/platforms/residual.c b/arch/ppc/platforms/residual.c
index 0f84ca603612..c9911601cfdf 100644
--- a/arch/ppc/platforms/residual.c
+++ b/arch/ppc/platforms/residual.c
@@ -47,7 +47,7 @@
47#include <asm/ide.h> 47#include <asm/ide.h>
48 48
49 49
50unsigned char __res[sizeof(RESIDUAL)] __prepdata = {0,}; 50unsigned char __res[sizeof(RESIDUAL)] = {0,};
51RESIDUAL *res = (RESIDUAL *)&__res; 51RESIDUAL *res = (RESIDUAL *)&__res;
52 52
53char * PnP_BASE_TYPES[] __initdata = { 53char * PnP_BASE_TYPES[] __initdata = {
diff --git a/arch/ppc/platforms/sandpoint.c b/arch/ppc/platforms/sandpoint.c
index 5232283c1974..9eeed3572309 100644
--- a/arch/ppc/platforms/sandpoint.c
+++ b/arch/ppc/platforms/sandpoint.c
@@ -494,27 +494,10 @@ sandpoint_init_IRQ(void)
494 i8259_irq); 494 i8259_irq);
495 495
496 /* 496 /*
497 * openpic_init() has set up irq_desc[16-31] to be openpic
498 * interrupts. We need to set irq_desc[0-15] to be i8259
499 * interrupts.
500 */
501 for(i=0; i < NUM_8259_INTERRUPTS; i++)
502 irq_desc[i].handler = &i8259_pic;
503
504 /*
505 * The EPIC allows for a read in the range of 0xFEF00000 -> 497 * The EPIC allows for a read in the range of 0xFEF00000 ->
506 * 0xFEFFFFFF to generate a PCI interrupt-acknowledge transaction. 498 * 0xFEFFFFFF to generate a PCI interrupt-acknowledge transaction.
507 */ 499 */
508 i8259_init(0xfef00000); 500 i8259_init(0xfef00000, 0);
509}
510
511static u32
512sandpoint_irq_canonicalize(u32 irq)
513{
514 if (irq == 2)
515 return 9;
516 else
517 return irq;
518} 501}
519 502
520static unsigned long __init 503static unsigned long __init
@@ -727,10 +710,10 @@ platform_init(unsigned long r3, unsigned long r4, unsigned long r5,
727 ISA_DMA_THRESHOLD = 0x00ffffff; 710 ISA_DMA_THRESHOLD = 0x00ffffff;
728 DMA_MODE_READ = 0x44; 711 DMA_MODE_READ = 0x44;
729 DMA_MODE_WRITE = 0x48; 712 DMA_MODE_WRITE = 0x48;
713 ppc_do_canonicalize_irqs = 1;
730 714
731 ppc_md.setup_arch = sandpoint_setup_arch; 715 ppc_md.setup_arch = sandpoint_setup_arch;
732 ppc_md.show_cpuinfo = sandpoint_show_cpuinfo; 716 ppc_md.show_cpuinfo = sandpoint_show_cpuinfo;
733 ppc_md.irq_canonicalize = sandpoint_irq_canonicalize;
734 ppc_md.init_IRQ = sandpoint_init_IRQ; 717 ppc_md.init_IRQ = sandpoint_init_IRQ;
735 ppc_md.get_irq = openpic_get_irq; 718 ppc_md.get_irq = openpic_get_irq;
736 719
diff --git a/arch/ppc/syslib/Makefile b/arch/ppc/syslib/Makefile
index b8d08f33f7ee..b4ef15b45c4a 100644
--- a/arch/ppc/syslib/Makefile
+++ b/arch/ppc/syslib/Makefile
@@ -31,52 +31,49 @@ obj-$(CONFIG_GEN_RTC) += todc_time.o
31obj-$(CONFIG_PPC4xx_DMA) += ppc4xx_dma.o 31obj-$(CONFIG_PPC4xx_DMA) += ppc4xx_dma.o
32obj-$(CONFIG_PPC4xx_EDMA) += ppc4xx_sgdma.o 32obj-$(CONFIG_PPC4xx_EDMA) += ppc4xx_sgdma.o
33ifeq ($(CONFIG_40x),y) 33ifeq ($(CONFIG_40x),y)
34obj-$(CONFIG_PCI) += indirect_pci.o pci_auto.o ppc405_pci.o 34obj-$(CONFIG_PCI) += pci_auto.o ppc405_pci.o
35endif 35endif
36endif 36endif
37obj-$(CONFIG_8xx) += m8xx_setup.o ppc8xx_pic.o $(wdt-mpc8xx-y) \ 37obj-$(CONFIG_8xx) += m8xx_setup.o ppc8xx_pic.o $(wdt-mpc8xx-y) \
38 ppc_sys.o mpc8xx_devices.o mpc8xx_sys.o 38 ppc_sys.o mpc8xx_devices.o mpc8xx_sys.o
39ifeq ($(CONFIG_8xx),y) 39obj-$(CONFIG_PCI_QSPAN) += qspan_pci.o
40obj-$(CONFIG_PCI) += qspan_pci.o i8259.o 40obj-$(CONFIG_PPC_OF) += prom_init.o prom.o
41endif 41obj-$(CONFIG_PPC_PMAC) += open_pic.o
42obj-$(CONFIG_PPC_OF) += prom_init.o prom.o of_device.o
43obj-$(CONFIG_PPC_PMAC) += open_pic.o indirect_pci.o
44obj-$(CONFIG_POWER4) += open_pic2.o 42obj-$(CONFIG_POWER4) += open_pic2.o
45obj-$(CONFIG_PPC_CHRP) += open_pic.o indirect_pci.o i8259.o 43obj-$(CONFIG_PPC_CHRP) += open_pic.o
46obj-$(CONFIG_PPC_PREP) += open_pic.o indirect_pci.o i8259.o todc_time.o 44obj-$(CONFIG_PPC_PREP) += open_pic.o todc_time.o
47obj-$(CONFIG_BAMBOO) += indirect_pci.o pci_auto.o todc_time.o 45obj-$(CONFIG_BAMBOO) += pci_auto.o todc_time.o
48obj-$(CONFIG_CPCI690) += todc_time.o pci_auto.o 46obj-$(CONFIG_CPCI690) += todc_time.o pci_auto.o
49obj-$(CONFIG_EBONY) += indirect_pci.o pci_auto.o todc_time.o 47obj-$(CONFIG_EBONY) += pci_auto.o todc_time.o
50obj-$(CONFIG_EV64260) += todc_time.o pci_auto.o 48obj-$(CONFIG_EV64260) += todc_time.o pci_auto.o
51obj-$(CONFIG_CHESTNUT) += mv64360_pic.o pci_auto.o 49obj-$(CONFIG_CHESTNUT) += mv64360_pic.o pci_auto.o
52obj-$(CONFIG_GEMINI) += open_pic.o indirect_pci.o 50obj-$(CONFIG_GEMINI) += open_pic.o
53obj-$(CONFIG_GT64260) += gt64260_pic.o 51obj-$(CONFIG_GT64260) += gt64260_pic.o
54obj-$(CONFIG_LOPEC) += i8259.o pci_auto.o todc_time.o 52obj-$(CONFIG_LOPEC) += pci_auto.o todc_time.o
55obj-$(CONFIG_HDPU) += pci_auto.o 53obj-$(CONFIG_HDPU) += pci_auto.o
56obj-$(CONFIG_LUAN) += indirect_pci.o pci_auto.o todc_time.o 54obj-$(CONFIG_LUAN) += pci_auto.o todc_time.o
57obj-$(CONFIG_KATANA) += pci_auto.o 55obj-$(CONFIG_KATANA) += pci_auto.o
58obj-$(CONFIG_MV64360) += mv64360_pic.o 56obj-$(CONFIG_MV64360) += mv64360_pic.o
59obj-$(CONFIG_MV64X60) += mv64x60.o mv64x60_win.o indirect_pci.o 57obj-$(CONFIG_MV64X60) += mv64x60.o mv64x60_win.o
60obj-$(CONFIG_MVME5100) += open_pic.o todc_time.o indirect_pci.o \ 58obj-$(CONFIG_MVME5100) += open_pic.o todc_time.o \
61 pci_auto.o hawk_common.o 59 pci_auto.o hawk_common.o
62obj-$(CONFIG_MVME5100_IPMC761_PRESENT) += i8259.o 60obj-$(CONFIG_OCOTEA) += pci_auto.o todc_time.o
63obj-$(CONFIG_OCOTEA) += indirect_pci.o pci_auto.o todc_time.o
64obj-$(CONFIG_PAL4) += cpc700_pic.o 61obj-$(CONFIG_PAL4) += cpc700_pic.o
65obj-$(CONFIG_POWERPMC250) += pci_auto.o 62obj-$(CONFIG_POWERPMC250) += pci_auto.o
66obj-$(CONFIG_PPLUS) += hawk_common.o open_pic.o i8259.o \ 63obj-$(CONFIG_PPLUS) += hawk_common.o open_pic.o \
67 indirect_pci.o todc_time.o pci_auto.o 64 todc_time.o pci_auto.o
68obj-$(CONFIG_PRPMC750) += open_pic.o indirect_pci.o pci_auto.o \ 65obj-$(CONFIG_PRPMC750) += open_pic.o pci_auto.o \
69 hawk_common.o 66 hawk_common.o
70obj-$(CONFIG_HARRIER) += harrier.o 67obj-$(CONFIG_HARRIER) += harrier.o
71obj-$(CONFIG_PRPMC800) += open_pic.o indirect_pci.o pci_auto.o 68obj-$(CONFIG_PRPMC800) += open_pic.o pci_auto.o
72obj-$(CONFIG_RADSTONE_PPC7D) += i8259.o pci_auto.o 69obj-$(CONFIG_RADSTONE_PPC7D) += pci_auto.o
73obj-$(CONFIG_SANDPOINT) += i8259.o pci_auto.o todc_time.o 70obj-$(CONFIG_SANDPOINT) += pci_auto.o todc_time.o
74obj-$(CONFIG_SBC82xx) += todc_time.o 71obj-$(CONFIG_SBC82xx) += todc_time.o
75obj-$(CONFIG_SPRUCE) += cpc700_pic.o indirect_pci.o pci_auto.o \ 72obj-$(CONFIG_SPRUCE) += cpc700_pic.o pci_auto.o \
76 todc_time.o 73 todc_time.o
77obj-$(CONFIG_8260) += m8260_setup.o pq2_devices.o pq2_sys.o \ 74obj-$(CONFIG_8260) += m8260_setup.o pq2_devices.o pq2_sys.o \
78 ppc_sys.o 75 ppc_sys.o
79obj-$(CONFIG_PCI_8260) += m82xx_pci.o indirect_pci.o pci_auto.o 76obj-$(CONFIG_PCI_8260) += m82xx_pci.o pci_auto.o
80obj-$(CONFIG_8260_PCI9) += m8260_pci_erratum9.o 77obj-$(CONFIG_8260_PCI9) += m8260_pci_erratum9.o
81obj-$(CONFIG_CPM2) += cpm2_common.o cpm2_pic.o 78obj-$(CONFIG_CPM2) += cpm2_common.o cpm2_pic.o
82ifeq ($(CONFIG_PPC_GEN550),y) 79ifeq ($(CONFIG_PPC_GEN550),y)
@@ -87,20 +84,18 @@ ifeq ($(CONFIG_SERIAL_MPSC_CONSOLE),y)
87obj-$(CONFIG_SERIAL_TEXT_DEBUG) += mv64x60_dbg.o 84obj-$(CONFIG_SERIAL_TEXT_DEBUG) += mv64x60_dbg.o
88endif 85endif
89obj-$(CONFIG_BOOTX_TEXT) += btext.o 86obj-$(CONFIG_BOOTX_TEXT) += btext.o
90obj-$(CONFIG_MPC10X_BRIDGE) += mpc10x_common.o indirect_pci.o ppc_sys.o 87obj-$(CONFIG_MPC10X_BRIDGE) += mpc10x_common.o ppc_sys.o
91obj-$(CONFIG_MPC10X_OPENPIC) += open_pic.o 88obj-$(CONFIG_MPC10X_OPENPIC) += open_pic.o
92obj-$(CONFIG_40x) += dcr.o
93obj-$(CONFIG_BOOKE) += dcr.o
94obj-$(CONFIG_85xx) += open_pic.o ppc85xx_common.o ppc85xx_setup.o \ 89obj-$(CONFIG_85xx) += open_pic.o ppc85xx_common.o ppc85xx_setup.o \
95 ppc_sys.o i8259.o mpc85xx_sys.o \ 90 ppc_sys.o mpc85xx_sys.o \
96 mpc85xx_devices.o 91 mpc85xx_devices.o
97ifeq ($(CONFIG_85xx),y) 92ifeq ($(CONFIG_85xx),y)
98obj-$(CONFIG_PCI) += indirect_pci.o pci_auto.o 93obj-$(CONFIG_PCI) += pci_auto.o
99endif 94endif
100obj-$(CONFIG_83xx) += ipic.o ppc83xx_setup.o ppc_sys.o \ 95obj-$(CONFIG_83xx) += ipic.o ppc83xx_setup.o ppc_sys.o \
101 mpc83xx_sys.o mpc83xx_devices.o 96 mpc83xx_sys.o mpc83xx_devices.o
102ifeq ($(CONFIG_83xx),y) 97ifeq ($(CONFIG_83xx),y)
103obj-$(CONFIG_PCI) += indirect_pci.o pci_auto.o 98obj-$(CONFIG_PCI) += pci_auto.o
104endif 99endif
105obj-$(CONFIG_MPC8548_CDS) += todc_time.o 100obj-$(CONFIG_MPC8548_CDS) += todc_time.o
106obj-$(CONFIG_MPC8555_CDS) += todc_time.o 101obj-$(CONFIG_MPC8555_CDS) += todc_time.o
diff --git a/arch/ppc/syslib/btext.c b/arch/ppc/syslib/btext.c
index 7734f6836174..12fa83e6774a 100644
--- a/arch/ppc/syslib/btext.c
+++ b/arch/ppc/syslib/btext.c
@@ -53,8 +53,8 @@ extern char *klimit;
53 * chrp only uses it during early boot. 53 * chrp only uses it during early boot.
54 */ 54 */
55#ifdef CONFIG_XMON 55#ifdef CONFIG_XMON
56#define BTEXT __pmac 56#define BTEXT
57#define BTDATA __pmacdata 57#define BTDATA
58#else 58#else
59#define BTEXT __init 59#define BTEXT __init
60#define BTDATA __initdata 60#define BTDATA __initdata
@@ -187,7 +187,7 @@ btext_setup_display(int width, int height, int depth, int pitch,
187 * changes. 187 * changes.
188 */ 188 */
189 189
190void __openfirmware 190void
191map_boot_text(void) 191map_boot_text(void)
192{ 192{
193 unsigned long base, offset, size; 193 unsigned long base, offset, size;
diff --git a/arch/ppc/syslib/gt64260_pic.c b/arch/ppc/syslib/gt64260_pic.c
index 44aa87385451..f97b3a9abd1e 100644
--- a/arch/ppc/syslib/gt64260_pic.c
+++ b/arch/ppc/syslib/gt64260_pic.c
@@ -45,6 +45,7 @@
45#include <asm/system.h> 45#include <asm/system.h>
46#include <asm/irq.h> 46#include <asm/irq.h>
47#include <asm/mv64x60.h> 47#include <asm/mv64x60.h>
48#include <asm/machdep.h>
48 49
49#define CPU_INTR_STR "gt64260 cpu interface error" 50#define CPU_INTR_STR "gt64260 cpu interface error"
50#define PCI0_INTR_STR "gt64260 pci 0 error" 51#define PCI0_INTR_STR "gt64260 pci 0 error"
diff --git a/arch/ppc/syslib/ibm440gx_common.c b/arch/ppc/syslib/ibm440gx_common.c
index 0bb919859b8b..c36db279b43d 100644
--- a/arch/ppc/syslib/ibm440gx_common.c
+++ b/arch/ppc/syslib/ibm440gx_common.c
@@ -236,9 +236,9 @@ void __init ibm440gx_l2c_setup(struct ibm44x_clocks* p)
236 /* Disable L2C on rev.A, rev.B and 800MHz version of rev.C, 236 /* Disable L2C on rev.A, rev.B and 800MHz version of rev.C,
237 enable it on all other revisions 237 enable it on all other revisions
238 */ 238 */
239 if (strcmp(cur_cpu_spec[0]->cpu_name, "440GX Rev. A") == 0 || 239 if (strcmp(cur_cpu_spec->cpu_name, "440GX Rev. A") == 0 ||
240 strcmp(cur_cpu_spec[0]->cpu_name, "440GX Rev. B") == 0 240 strcmp(cur_cpu_spec->cpu_name, "440GX Rev. B") == 0
241 || (strcmp(cur_cpu_spec[0]->cpu_name, "440GX Rev. C") 241 || (strcmp(cur_cpu_spec->cpu_name, "440GX Rev. C")
242 == 0 && p->cpu > 667000000)) 242 == 0 && p->cpu > 667000000))
243 ibm440gx_l2c_disable(); 243 ibm440gx_l2c_disable();
244 else 244 else
diff --git a/arch/ppc/syslib/ibm44x_common.c b/arch/ppc/syslib/ibm44x_common.c
index 7612e0623f99..5152c8e41340 100644
--- a/arch/ppc/syslib/ibm44x_common.c
+++ b/arch/ppc/syslib/ibm44x_common.c
@@ -27,9 +27,14 @@
27#include <asm/time.h> 27#include <asm/time.h>
28#include <asm/ppc4xx_pic.h> 28#include <asm/ppc4xx_pic.h>
29#include <asm/param.h> 29#include <asm/param.h>
30#include <asm/bootinfo.h>
31#include <asm/ppcboot.h>
30 32
31#include <syslib/gen550.h> 33#include <syslib/gen550.h>
32 34
35/* Global Variables */
36bd_t __res;
37
33phys_addr_t fixup_bigphys_addr(phys_addr_t addr, phys_addr_t size) 38phys_addr_t fixup_bigphys_addr(phys_addr_t addr, phys_addr_t size)
34{ 39{
35 phys_addr_t page_4gb = 0; 40 phys_addr_t page_4gb = 0;
@@ -150,8 +155,36 @@ static unsigned long __init ibm44x_find_end_of_memory(void)
150 return mem_size; 155 return mem_size;
151} 156}
152 157
153void __init ibm44x_platform_init(void) 158void __init ibm44x_platform_init(unsigned long r3, unsigned long r4, unsigned long r5,
159 unsigned long r6, unsigned long r7)
154{ 160{
161 parse_bootinfo(find_bootinfo());
162
163 /*
164 * If we were passed in a board information, copy it into the
165 * residual data area.
166 */
167 if (r3)
168 __res = *(bd_t *)(r3 + KERNELBASE);
169
170#if defined(CONFIG_BLK_DEV_INITRD)
171 /*
172 * If the init RAM disk has been configured in, and there's a valid
173 * starting address for it, set it up.
174 */
175 if (r4) {
176 initrd_start = r4 + KERNELBASE;
177 initrd_end = r5 + KERNELBASE;
178 }
179#endif /* CONFIG_BLK_DEV_INITRD */
180
181 /* Copy the kernel command line arguments to a safe place. */
182
183 if (r6) {
184 *(char *) (r7 + KERNELBASE) = 0;
185 strcpy(cmd_line, (char *) (r6 + KERNELBASE));
186 }
187
155 ppc_md.init_IRQ = ppc4xx_pic_init; 188 ppc_md.init_IRQ = ppc4xx_pic_init;
156 ppc_md.find_end_of_memory = ibm44x_find_end_of_memory; 189 ppc_md.find_end_of_memory = ibm44x_find_end_of_memory;
157 ppc_md.restart = ibm44x_restart; 190 ppc_md.restart = ibm44x_restart;
@@ -178,7 +211,7 @@ void __init ibm44x_platform_init(void)
178#endif 211#endif
179} 212}
180 213
181/* Called from MachineCheckException */ 214/* Called from machine_check_exception */
182void platform_machine_check(struct pt_regs *regs) 215void platform_machine_check(struct pt_regs *regs)
183{ 216{
184 printk("PLB0: BEAR=0x%08x%08x ACR= 0x%08x BESR= 0x%08x\n", 217 printk("PLB0: BEAR=0x%08x%08x ACR= 0x%08x BESR= 0x%08x\n",
diff --git a/arch/ppc/syslib/ibm44x_common.h b/arch/ppc/syslib/ibm44x_common.h
index c16b6a5ac6ab..b25a8995e4e9 100644
--- a/arch/ppc/syslib/ibm44x_common.h
+++ b/arch/ppc/syslib/ibm44x_common.h
@@ -36,7 +36,8 @@ struct ibm44x_clocks {
36}; 36};
37 37
38/* common 44x platform init */ 38/* common 44x platform init */
39void ibm44x_platform_init(void) __init; 39void ibm44x_platform_init(unsigned long r3, unsigned long r4, unsigned long r5,
40 unsigned long r6, unsigned long r7) __init;
40 41
41/* initialize decrementer and tick-related variables */ 42/* initialize decrementer and tick-related variables */
42void ibm44x_calibrate_decr(unsigned int freq) __init; 43void ibm44x_calibrate_decr(unsigned int freq) __init;
diff --git a/arch/ppc/syslib/m8260_setup.c b/arch/ppc/syslib/m8260_setup.c
index 8f80a42dfdb7..76a2aa4ce65e 100644
--- a/arch/ppc/syslib/m8260_setup.c
+++ b/arch/ppc/syslib/m8260_setup.c
@@ -62,6 +62,10 @@ m8260_setup_arch(void)
62 if (initrd_start) 62 if (initrd_start)
63 ROOT_DEV = Root_RAM0; 63 ROOT_DEV = Root_RAM0;
64#endif 64#endif
65
66 identify_ppc_sys_by_name_and_id(BOARD_CHIP_NAME,
67 in_be32(CPM_MAP_ADDR + CPM_IMMR_OFFSET));
68
65 m82xx_board_setup(); 69 m82xx_board_setup();
66} 70}
67 71
diff --git a/arch/ppc/syslib/m82xx_pci.c b/arch/ppc/syslib/m82xx_pci.c
index 9db58c587b46..1d1c3956c1ae 100644
--- a/arch/ppc/syslib/m82xx_pci.c
+++ b/arch/ppc/syslib/m82xx_pci.c
@@ -302,11 +302,11 @@ pq2ads_setup_pci(struct pci_controller *hose)
302 302
303void __init pq2_find_bridges(void) 303void __init pq2_find_bridges(void)
304{ 304{
305 extern int pci_assign_all_busses; 305 extern int pci_assign_all_buses;
306 struct pci_controller * hose; 306 struct pci_controller * hose;
307 int host_bridge; 307 int host_bridge;
308 308
309 pci_assign_all_busses = 1; 309 pci_assign_all_buses = 1;
310 310
311 hose = pcibios_alloc_controller(); 311 hose = pcibios_alloc_controller();
312 312
diff --git a/arch/ppc/syslib/m8xx_setup.c b/arch/ppc/syslib/m8xx_setup.c
index 4c888da89b3c..97ffbc70574f 100644
--- a/arch/ppc/syslib/m8xx_setup.c
+++ b/arch/ppc/syslib/m8xx_setup.c
@@ -144,12 +144,12 @@ void __init m8xx_calibrate_decr(void)
144 int freq, fp, divisor; 144 int freq, fp, divisor;
145 145
146 /* Unlock the SCCR. */ 146 /* Unlock the SCCR. */
147 ((volatile immap_t *)IMAP_ADDR)->im_clkrstk.cark_sccrk = ~KAPWR_KEY; 147 out_be32(&((immap_t *)IMAP_ADDR)->im_clkrstk.cark_sccrk, ~KAPWR_KEY);
148 ((volatile immap_t *)IMAP_ADDR)->im_clkrstk.cark_sccrk = KAPWR_KEY; 148 out_be32(&((immap_t *)IMAP_ADDR)->im_clkrstk.cark_sccrk, KAPWR_KEY);
149 149
150 /* Force all 8xx processors to use divide by 16 processor clock. */ 150 /* Force all 8xx processors to use divide by 16 processor clock. */
151 ((volatile immap_t *)IMAP_ADDR)->im_clkrst.car_sccr |= 0x02000000; 151 out_be32(&((immap_t *)IMAP_ADDR)->im_clkrst.car_sccr,
152 152 in_be32(&((immap_t *)IMAP_ADDR)->im_clkrst.car_sccr)|0x02000000);
153 /* Processor frequency is MHz. 153 /* Processor frequency is MHz.
154 * The value 'fp' is the number of decrementer ticks per second. 154 * The value 'fp' is the number of decrementer ticks per second.
155 */ 155 */
@@ -175,28 +175,24 @@ void __init m8xx_calibrate_decr(void)
175 * we guarantee the registers are locked, then we unlock them 175 * we guarantee the registers are locked, then we unlock them
176 * for our use. 176 * for our use.
177 */ 177 */
178 ((volatile immap_t *)IMAP_ADDR)->im_sitk.sitk_tbscrk = ~KAPWR_KEY; 178 out_be32(&((immap_t *)IMAP_ADDR)->im_sitk.sitk_tbscrk, ~KAPWR_KEY);
179 ((volatile immap_t *)IMAP_ADDR)->im_sitk.sitk_rtcsck = ~KAPWR_KEY; 179 out_be32(&((immap_t *)IMAP_ADDR)->im_sitk.sitk_rtcsck, ~KAPWR_KEY);
180 ((volatile immap_t *)IMAP_ADDR)->im_sitk.sitk_tbk = ~KAPWR_KEY; 180 out_be32(&((immap_t *)IMAP_ADDR)->im_sitk.sitk_tbk, ~KAPWR_KEY);
181 ((volatile immap_t *)IMAP_ADDR)->im_sitk.sitk_tbscrk = KAPWR_KEY; 181 out_be32(&((immap_t *)IMAP_ADDR)->im_sitk.sitk_tbscrk, KAPWR_KEY);
182 ((volatile immap_t *)IMAP_ADDR)->im_sitk.sitk_rtcsck = KAPWR_KEY; 182 out_be32(&((immap_t *)IMAP_ADDR)->im_sitk.sitk_rtcsck, KAPWR_KEY);
183 ((volatile immap_t *)IMAP_ADDR)->im_sitk.sitk_tbk = KAPWR_KEY; 183 out_be32(&((immap_t *)IMAP_ADDR)->im_sitk.sitk_tbk, KAPWR_KEY);
184 184
185 /* Disable the RTC one second and alarm interrupts. */ 185 /* Disable the RTC one second and alarm interrupts. */
186 ((volatile immap_t *)IMAP_ADDR)->im_sit.sit_rtcsc &= 186 out_be16(&((immap_t *)IMAP_ADDR)->im_sit.sit_rtcsc, in_be16(&((immap_t *)IMAP_ADDR)->im_sit.sit_rtcsc) & ~(RTCSC_SIE | RTCSC_ALE));
187 ~(RTCSC_SIE | RTCSC_ALE);
188 /* Enable the RTC */ 187 /* Enable the RTC */
189 ((volatile immap_t *)IMAP_ADDR)->im_sit.sit_rtcsc |= 188 out_be16(&((immap_t *)IMAP_ADDR)->im_sit.sit_rtcsc, in_be16(&((immap_t *)IMAP_ADDR)->im_sit.sit_rtcsc) | (RTCSC_RTF | RTCSC_RTE));
190 (RTCSC_RTF | RTCSC_RTE);
191 189
192 /* Enabling the decrementer also enables the timebase interrupts 190 /* Enabling the decrementer also enables the timebase interrupts
193 * (or from the other point of view, to get decrementer interrupts 191 * (or from the other point of view, to get decrementer interrupts
194 * we have to enable the timebase). The decrementer interrupt 192 * we have to enable the timebase). The decrementer interrupt
195 * is wired into the vector table, nothing to do here for that. 193 * is wired into the vector table, nothing to do here for that.
196 */ 194 */
197 ((volatile immap_t *)IMAP_ADDR)->im_sit.sit_tbscr = 195 out_be16(&((immap_t *)IMAP_ADDR)->im_sit.sit_tbscr, (mk_int_int_mask(DEC_INTERRUPT) << 8) | (TBSCR_TBF | TBSCR_TBE));
198 ((mk_int_int_mask(DEC_INTERRUPT) << 8) |
199 (TBSCR_TBF | TBSCR_TBE));
200 196
201 if (setup_irq(DEC_INTERRUPT, &tbint_irqaction)) 197 if (setup_irq(DEC_INTERRUPT, &tbint_irqaction))
202 panic("Could not allocate timer IRQ!"); 198 panic("Could not allocate timer IRQ!");
@@ -216,9 +212,9 @@ void __init m8xx_calibrate_decr(void)
216static int 212static int
217m8xx_set_rtc_time(unsigned long time) 213m8xx_set_rtc_time(unsigned long time)
218{ 214{
219 ((volatile immap_t *)IMAP_ADDR)->im_sitk.sitk_rtck = KAPWR_KEY; 215 out_be32(&((immap_t *)IMAP_ADDR)->im_sitk.sitk_rtck, KAPWR_KEY);
220 ((volatile immap_t *)IMAP_ADDR)->im_sit.sit_rtc = time; 216 out_be32(&((immap_t *)IMAP_ADDR)->im_sit.sit_rtc, time);
221 ((volatile immap_t *)IMAP_ADDR)->im_sitk.sitk_rtck = ~KAPWR_KEY; 217 out_be32(&((immap_t *)IMAP_ADDR)->im_sitk.sitk_rtck, ~KAPWR_KEY);
222 return(0); 218 return(0);
223} 219}
224 220
@@ -226,7 +222,7 @@ static unsigned long
226m8xx_get_rtc_time(void) 222m8xx_get_rtc_time(void)
227{ 223{
228 /* Get time from the RTC. */ 224 /* Get time from the RTC. */
229 return((unsigned long)(((immap_t *)IMAP_ADDR)->im_sit.sit_rtc)); 225 return (unsigned long) in_be32(&((immap_t *)IMAP_ADDR)->im_sit.sit_rtc);
230} 226}
231 227
232static void 228static void
@@ -235,13 +231,13 @@ m8xx_restart(char *cmd)
235 __volatile__ unsigned char dummy; 231 __volatile__ unsigned char dummy;
236 232
237 local_irq_disable(); 233 local_irq_disable();
238 ((immap_t *)IMAP_ADDR)->im_clkrst.car_plprcr |= 0x00000080; 234 out_be32(&((immap_t *)IMAP_ADDR)->im_clkrst.car_plprcr, in_be32(&((immap_t *)IMAP_ADDR)->im_clkrst.car_plprcr) | 0x00000080);
239 235
240 /* Clear the ME bit in MSR to cause checkstop on machine check 236 /* Clear the ME bit in MSR to cause checkstop on machine check
241 */ 237 */
242 mtmsr(mfmsr() & ~0x1000); 238 mtmsr(mfmsr() & ~0x1000);
243 239
244 dummy = ((immap_t *)IMAP_ADDR)->im_clkrst.res[0]; 240 dummy = in_8(&((immap_t *)IMAP_ADDR)->im_clkrst.res[0]);
245 printk("Restart failed\n"); 241 printk("Restart failed\n");
246 while(1); 242 while(1);
247} 243}
@@ -306,8 +302,7 @@ m8xx_init_IRQ(void)
306 i8259_init(0); 302 i8259_init(0);
307 303
308 /* The i8259 cascade interrupt must be level sensitive. */ 304 /* The i8259 cascade interrupt must be level sensitive. */
309 ((immap_t *)IMAP_ADDR)->im_siu_conf.sc_siel &= 305 out_be32(&((immap_t *)IMAP_ADDR)->im_siu_conf.sc_siel, in_be32(&((immap_t *)IMAP_ADDR)->im_siu_conf.sc_siel & ~(0x80000000 >> ISA_BRIDGE_INT)));
310 ~(0x80000000 >> ISA_BRIDGE_INT);
311 306
312 if (setup_irq(ISA_BRIDGE_INT, &mbx_i8259_irqaction)) 307 if (setup_irq(ISA_BRIDGE_INT, &mbx_i8259_irqaction))
313 enable_irq(ISA_BRIDGE_INT); 308 enable_irq(ISA_BRIDGE_INT);
@@ -404,9 +399,10 @@ platform_init(unsigned long r3, unsigned long r4, unsigned long r5,
404 strcpy(cmd_line, (char *)(r6+KERNELBASE)); 399 strcpy(cmd_line, (char *)(r6+KERNELBASE));
405 } 400 }
406 401
402 identify_ppc_sys_by_name(BOARD_CHIP_NAME);
403
407 ppc_md.setup_arch = m8xx_setup_arch; 404 ppc_md.setup_arch = m8xx_setup_arch;
408 ppc_md.show_percpuinfo = m8xx_show_percpuinfo; 405 ppc_md.show_percpuinfo = m8xx_show_percpuinfo;
409 ppc_md.irq_canonicalize = NULL;
410 ppc_md.init_IRQ = m8xx_init_IRQ; 406 ppc_md.init_IRQ = m8xx_init_IRQ;
411 ppc_md.get_irq = m8xx_get_irq; 407 ppc_md.get_irq = m8xx_get_irq;
412 ppc_md.init = NULL; 408 ppc_md.init = NULL;
diff --git a/arch/ppc/syslib/m8xx_wdt.c b/arch/ppc/syslib/m8xx_wdt.c
index 2ddc857e7fc7..c5ac5ce5d7d2 100644
--- a/arch/ppc/syslib/m8xx_wdt.c
+++ b/arch/ppc/syslib/m8xx_wdt.c
@@ -29,8 +29,8 @@ void m8xx_wdt_reset(void)
29{ 29{
30 volatile immap_t *imap = (volatile immap_t *)IMAP_ADDR; 30 volatile immap_t *imap = (volatile immap_t *)IMAP_ADDR;
31 31
32 imap->im_siu_conf.sc_swsr = 0x556c; /* write magic1 */ 32 out_be16(imap->im_siu_conf.sc_swsr, 0x556c); /* write magic1 */
33 imap->im_siu_conf.sc_swsr = 0xaa39; /* write magic2 */ 33 out_be16(imap->im_siu_conf.sc_swsr, 0xaa39); /* write magic2 */
34} 34}
35 35
36static irqreturn_t m8xx_wdt_interrupt(int irq, void *dev, struct pt_regs *regs) 36static irqreturn_t m8xx_wdt_interrupt(int irq, void *dev, struct pt_regs *regs)
@@ -39,7 +39,7 @@ static irqreturn_t m8xx_wdt_interrupt(int irq, void *dev, struct pt_regs *regs)
39 39
40 m8xx_wdt_reset(); 40 m8xx_wdt_reset();
41 41
42 imap->im_sit.sit_piscr |= PISCR_PS; /* clear irq */ 42 out_be16(imap->im_sit.sit_piscr, in_be16(imap->im_sit.sit_piscr | PISCR_PS)); /* clear irq */
43 43
44 return IRQ_HANDLED; 44 return IRQ_HANDLED;
45} 45}
@@ -51,7 +51,7 @@ void __init m8xx_wdt_handler_install(bd_t * binfo)
51 u32 sypcr; 51 u32 sypcr;
52 u32 pitrtclk; 52 u32 pitrtclk;
53 53
54 sypcr = imap->im_siu_conf.sc_sypcr; 54 sypcr = in_be32(imap->im_siu_conf.sc_sypcr);
55 55
56 if (!(sypcr & 0x04)) { 56 if (!(sypcr & 0x04)) {
57 printk(KERN_NOTICE "m8xx_wdt: wdt disabled (SYPCR: 0x%08X)\n", 57 printk(KERN_NOTICE "m8xx_wdt: wdt disabled (SYPCR: 0x%08X)\n",
@@ -87,9 +87,9 @@ void __init m8xx_wdt_handler_install(bd_t * binfo)
87 else 87 else
88 pitc = pitrtclk * wdt_timeout / binfo->bi_intfreq / 2; 88 pitc = pitrtclk * wdt_timeout / binfo->bi_intfreq / 2;
89 89
90 imap->im_sit.sit_pitc = pitc << 16; 90 out_be32(imap->im_sit.sit_pitc, pitc << 16);
91 imap->im_sit.sit_piscr = 91
92 (mk_int_int_mask(PIT_INTERRUPT) << 8) | PISCR_PIE | PISCR_PTE; 92 out_be16(imap->im_sit.sit_piscr, (mk_int_int_mask(PIT_INTERRUPT) << 8) | PISCR_PIE | PISCR_PTE);
93 93
94 if (setup_irq(PIT_INTERRUPT, &m8xx_wdt_irqaction)) 94 if (setup_irq(PIT_INTERRUPT, &m8xx_wdt_irqaction))
95 panic("m8xx_wdt: error setting up the watchdog irq!"); 95 panic("m8xx_wdt: error setting up the watchdog irq!");
diff --git a/arch/ppc/syslib/mpc52xx_pci.c b/arch/ppc/syslib/mpc52xx_pci.c
index 59cf3e8bd1a0..4ac19080eb85 100644
--- a/arch/ppc/syslib/mpc52xx_pci.c
+++ b/arch/ppc/syslib/mpc52xx_pci.c
@@ -21,6 +21,7 @@
21#include "mpc52xx_pci.h" 21#include "mpc52xx_pci.h"
22 22
23#include <asm/delay.h> 23#include <asm/delay.h>
24#include <asm/machdep.h>
24 25
25 26
26static int 27static int
@@ -181,7 +182,7 @@ mpc52xx_find_bridges(void)
181 struct mpc52xx_pci __iomem *pci_regs; 182 struct mpc52xx_pci __iomem *pci_regs;
182 struct pci_controller *hose; 183 struct pci_controller *hose;
183 184
184 pci_assign_all_busses = 1; 185 pci_assign_all_buses = 1;
185 186
186 pci_regs = ioremap(MPC52xx_PA(MPC52xx_PCI_OFFSET), MPC52xx_PCI_SIZE); 187 pci_regs = ioremap(MPC52xx_PA(MPC52xx_PCI_OFFSET), MPC52xx_PCI_SIZE);
187 if (!pci_regs) 188 if (!pci_regs)
diff --git a/arch/ppc/syslib/mpc83xx_devices.c b/arch/ppc/syslib/mpc83xx_devices.c
index 95b3b8a7f0ba..dbf8acac507f 100644
--- a/arch/ppc/syslib/mpc83xx_devices.c
+++ b/arch/ppc/syslib/mpc83xx_devices.c
@@ -21,6 +21,7 @@
21#include <asm/mpc83xx.h> 21#include <asm/mpc83xx.h>
22#include <asm/irq.h> 22#include <asm/irq.h>
23#include <asm/ppc_sys.h> 23#include <asm/ppc_sys.h>
24#include <asm/machdep.h>
24 25
25/* We use offsets for IORESOURCE_MEM since we do not know at compile time 26/* We use offsets for IORESOURCE_MEM since we do not know at compile time
26 * what IMMRBAR is, will get fixed up by mach_mpc83xx_fixup 27 * what IMMRBAR is, will get fixed up by mach_mpc83xx_fixup
diff --git a/arch/ppc/syslib/mpc85xx_devices.c b/arch/ppc/syslib/mpc85xx_devices.c
index bbc5ac0de878..2ede677a0a53 100644
--- a/arch/ppc/syslib/mpc85xx_devices.c
+++ b/arch/ppc/syslib/mpc85xx_devices.c
@@ -25,19 +25,20 @@
25/* We use offsets for IORESOURCE_MEM since we do not know at compile time 25/* We use offsets for IORESOURCE_MEM since we do not know at compile time
26 * what CCSRBAR is, will get fixed up by mach_mpc85xx_fixup 26 * what CCSRBAR is, will get fixed up by mach_mpc85xx_fixup
27 */ 27 */
28struct gianfar_mdio_data mpc85xx_mdio_pdata = {
29 .paddr = MPC85xx_MIIM_OFFSET,
30};
28 31
29static struct gianfar_platform_data mpc85xx_tsec1_pdata = { 32static struct gianfar_platform_data mpc85xx_tsec1_pdata = {
30 .device_flags = FSL_GIANFAR_DEV_HAS_GIGABIT | 33 .device_flags = FSL_GIANFAR_DEV_HAS_GIGABIT |
31 FSL_GIANFAR_DEV_HAS_COALESCE | FSL_GIANFAR_DEV_HAS_RMON | 34 FSL_GIANFAR_DEV_HAS_COALESCE | FSL_GIANFAR_DEV_HAS_RMON |
32 FSL_GIANFAR_DEV_HAS_MULTI_INTR, 35 FSL_GIANFAR_DEV_HAS_MULTI_INTR,
33 .phy_reg_addr = MPC85xx_ENET1_OFFSET,
34}; 36};
35 37
36static struct gianfar_platform_data mpc85xx_tsec2_pdata = { 38static struct gianfar_platform_data mpc85xx_tsec2_pdata = {
37 .device_flags = FSL_GIANFAR_DEV_HAS_GIGABIT | 39 .device_flags = FSL_GIANFAR_DEV_HAS_GIGABIT |
38 FSL_GIANFAR_DEV_HAS_COALESCE | FSL_GIANFAR_DEV_HAS_RMON | 40 FSL_GIANFAR_DEV_HAS_COALESCE | FSL_GIANFAR_DEV_HAS_RMON |
39 FSL_GIANFAR_DEV_HAS_MULTI_INTR, 41 FSL_GIANFAR_DEV_HAS_MULTI_INTR,
40 .phy_reg_addr = MPC85xx_ENET1_OFFSET,
41}; 42};
42 43
43static struct gianfar_platform_data mpc85xx_etsec1_pdata = { 44static struct gianfar_platform_data mpc85xx_etsec1_pdata = {
@@ -46,7 +47,6 @@ static struct gianfar_platform_data mpc85xx_etsec1_pdata = {
46 FSL_GIANFAR_DEV_HAS_MULTI_INTR | 47 FSL_GIANFAR_DEV_HAS_MULTI_INTR |
47 FSL_GIANFAR_DEV_HAS_CSUM | FSL_GIANFAR_DEV_HAS_VLAN | 48 FSL_GIANFAR_DEV_HAS_CSUM | FSL_GIANFAR_DEV_HAS_VLAN |
48 FSL_GIANFAR_DEV_HAS_EXTENDED_HASH, 49 FSL_GIANFAR_DEV_HAS_EXTENDED_HASH,
49 .phy_reg_addr = MPC85xx_ENET1_OFFSET,
50}; 50};
51 51
52static struct gianfar_platform_data mpc85xx_etsec2_pdata = { 52static struct gianfar_platform_data mpc85xx_etsec2_pdata = {
@@ -55,7 +55,6 @@ static struct gianfar_platform_data mpc85xx_etsec2_pdata = {
55 FSL_GIANFAR_DEV_HAS_MULTI_INTR | 55 FSL_GIANFAR_DEV_HAS_MULTI_INTR |
56 FSL_GIANFAR_DEV_HAS_CSUM | FSL_GIANFAR_DEV_HAS_VLAN | 56 FSL_GIANFAR_DEV_HAS_CSUM | FSL_GIANFAR_DEV_HAS_VLAN |
57 FSL_GIANFAR_DEV_HAS_EXTENDED_HASH, 57 FSL_GIANFAR_DEV_HAS_EXTENDED_HASH,
58 .phy_reg_addr = MPC85xx_ENET1_OFFSET,
59}; 58};
60 59
61static struct gianfar_platform_data mpc85xx_etsec3_pdata = { 60static struct gianfar_platform_data mpc85xx_etsec3_pdata = {
@@ -64,7 +63,6 @@ static struct gianfar_platform_data mpc85xx_etsec3_pdata = {
64 FSL_GIANFAR_DEV_HAS_MULTI_INTR | 63 FSL_GIANFAR_DEV_HAS_MULTI_INTR |
65 FSL_GIANFAR_DEV_HAS_CSUM | FSL_GIANFAR_DEV_HAS_VLAN | 64 FSL_GIANFAR_DEV_HAS_CSUM | FSL_GIANFAR_DEV_HAS_VLAN |
66 FSL_GIANFAR_DEV_HAS_EXTENDED_HASH, 65 FSL_GIANFAR_DEV_HAS_EXTENDED_HASH,
67 .phy_reg_addr = MPC85xx_ENET1_OFFSET,
68}; 66};
69 67
70static struct gianfar_platform_data mpc85xx_etsec4_pdata = { 68static struct gianfar_platform_data mpc85xx_etsec4_pdata = {
@@ -73,11 +71,10 @@ static struct gianfar_platform_data mpc85xx_etsec4_pdata = {
73 FSL_GIANFAR_DEV_HAS_MULTI_INTR | 71 FSL_GIANFAR_DEV_HAS_MULTI_INTR |
74 FSL_GIANFAR_DEV_HAS_CSUM | FSL_GIANFAR_DEV_HAS_VLAN | 72 FSL_GIANFAR_DEV_HAS_CSUM | FSL_GIANFAR_DEV_HAS_VLAN |
75 FSL_GIANFAR_DEV_HAS_EXTENDED_HASH, 73 FSL_GIANFAR_DEV_HAS_EXTENDED_HASH,
76 .phy_reg_addr = MPC85xx_ENET1_OFFSET,
77}; 74};
78 75
79static struct gianfar_platform_data mpc85xx_fec_pdata = { 76static struct gianfar_platform_data mpc85xx_fec_pdata = {
80 .phy_reg_addr = MPC85xx_ENET1_OFFSET, 77 .device_flags = 0,
81}; 78};
82 79
83static struct fsl_i2c_platform_data mpc85xx_fsl_i2c_pdata = { 80static struct fsl_i2c_platform_data mpc85xx_fsl_i2c_pdata = {
@@ -719,6 +716,12 @@ struct platform_device ppc_sys_platform_devices[] = {
719 }, 716 },
720 }, 717 },
721 }, 718 },
719 [MPC85xx_MDIO] = {
720 .name = "fsl-gianfar_mdio",
721 .id = 0,
722 .dev.platform_data = &mpc85xx_mdio_pdata,
723 .num_resources = 0,
724 },
722}; 725};
723 726
724static int __init mach_mpc85xx_fixup(struct platform_device *pdev) 727static int __init mach_mpc85xx_fixup(struct platform_device *pdev)
diff --git a/arch/ppc/syslib/mpc85xx_sys.c b/arch/ppc/syslib/mpc85xx_sys.c
index 6e3184ab354f..cb68d8c58348 100644
--- a/arch/ppc/syslib/mpc85xx_sys.c
+++ b/arch/ppc/syslib/mpc85xx_sys.c
@@ -24,19 +24,19 @@ struct ppc_sys_spec ppc_sys_specs[] = {
24 .ppc_sys_name = "8540", 24 .ppc_sys_name = "8540",
25 .mask = 0xFFFF0000, 25 .mask = 0xFFFF0000,
26 .value = 0x80300000, 26 .value = 0x80300000,
27 .num_devices = 10, 27 .num_devices = 11,
28 .device_list = (enum ppc_sys_devices[]) 28 .device_list = (enum ppc_sys_devices[])
29 { 29 {
30 MPC85xx_TSEC1, MPC85xx_TSEC2, MPC85xx_FEC, MPC85xx_IIC1, 30 MPC85xx_TSEC1, MPC85xx_TSEC2, MPC85xx_FEC, MPC85xx_IIC1,
31 MPC85xx_DMA0, MPC85xx_DMA1, MPC85xx_DMA2, MPC85xx_DMA3, 31 MPC85xx_DMA0, MPC85xx_DMA1, MPC85xx_DMA2, MPC85xx_DMA3,
32 MPC85xx_PERFMON, MPC85xx_DUART, 32 MPC85xx_PERFMON, MPC85xx_DUART, MPC85xx_MDIO,
33 }, 33 },
34 }, 34 },
35 { 35 {
36 .ppc_sys_name = "8560", 36 .ppc_sys_name = "8560",
37 .mask = 0xFFFF0000, 37 .mask = 0xFFFF0000,
38 .value = 0x80700000, 38 .value = 0x80700000,
39 .num_devices = 19, 39 .num_devices = 20,
40 .device_list = (enum ppc_sys_devices[]) 40 .device_list = (enum ppc_sys_devices[])
41 { 41 {
42 MPC85xx_TSEC1, MPC85xx_TSEC2, MPC85xx_IIC1, 42 MPC85xx_TSEC1, MPC85xx_TSEC2, MPC85xx_IIC1,
@@ -45,14 +45,14 @@ struct ppc_sys_spec ppc_sys_specs[] = {
45 MPC85xx_CPM_SPI, MPC85xx_CPM_I2C, MPC85xx_CPM_SCC1, 45 MPC85xx_CPM_SPI, MPC85xx_CPM_I2C, MPC85xx_CPM_SCC1,
46 MPC85xx_CPM_SCC2, MPC85xx_CPM_SCC3, MPC85xx_CPM_SCC4, 46 MPC85xx_CPM_SCC2, MPC85xx_CPM_SCC3, MPC85xx_CPM_SCC4,
47 MPC85xx_CPM_FCC1, MPC85xx_CPM_FCC2, MPC85xx_CPM_FCC3, 47 MPC85xx_CPM_FCC1, MPC85xx_CPM_FCC2, MPC85xx_CPM_FCC3,
48 MPC85xx_CPM_MCC1, MPC85xx_CPM_MCC2, 48 MPC85xx_CPM_MCC1, MPC85xx_CPM_MCC2, MPC85xx_MDIO,
49 }, 49 },
50 }, 50 },
51 { 51 {
52 .ppc_sys_name = "8541", 52 .ppc_sys_name = "8541",
53 .mask = 0xFFFF0000, 53 .mask = 0xFFFF0000,
54 .value = 0x80720000, 54 .value = 0x80720000,
55 .num_devices = 13, 55 .num_devices = 14,
56 .device_list = (enum ppc_sys_devices[]) 56 .device_list = (enum ppc_sys_devices[])
57 { 57 {
58 MPC85xx_TSEC1, MPC85xx_TSEC2, MPC85xx_IIC1, 58 MPC85xx_TSEC1, MPC85xx_TSEC2, MPC85xx_IIC1,
@@ -60,13 +60,14 @@ struct ppc_sys_spec ppc_sys_specs[] = {
60 MPC85xx_PERFMON, MPC85xx_DUART, 60 MPC85xx_PERFMON, MPC85xx_DUART,
61 MPC85xx_CPM_SPI, MPC85xx_CPM_I2C, 61 MPC85xx_CPM_SPI, MPC85xx_CPM_I2C,
62 MPC85xx_CPM_FCC1, MPC85xx_CPM_FCC2, 62 MPC85xx_CPM_FCC1, MPC85xx_CPM_FCC2,
63 MPC85xx_MDIO,
63 }, 64 },
64 }, 65 },
65 { 66 {
66 .ppc_sys_name = "8541E", 67 .ppc_sys_name = "8541E",
67 .mask = 0xFFFF0000, 68 .mask = 0xFFFF0000,
68 .value = 0x807A0000, 69 .value = 0x807A0000,
69 .num_devices = 14, 70 .num_devices = 15,
70 .device_list = (enum ppc_sys_devices[]) 71 .device_list = (enum ppc_sys_devices[])
71 { 72 {
72 MPC85xx_TSEC1, MPC85xx_TSEC2, MPC85xx_IIC1, 73 MPC85xx_TSEC1, MPC85xx_TSEC2, MPC85xx_IIC1,
@@ -74,13 +75,14 @@ struct ppc_sys_spec ppc_sys_specs[] = {
74 MPC85xx_PERFMON, MPC85xx_DUART, MPC85xx_SEC2, 75 MPC85xx_PERFMON, MPC85xx_DUART, MPC85xx_SEC2,
75 MPC85xx_CPM_SPI, MPC85xx_CPM_I2C, 76 MPC85xx_CPM_SPI, MPC85xx_CPM_I2C,
76 MPC85xx_CPM_FCC1, MPC85xx_CPM_FCC2, 77 MPC85xx_CPM_FCC1, MPC85xx_CPM_FCC2,
78 MPC85xx_MDIO,
77 }, 79 },
78 }, 80 },
79 { 81 {
80 .ppc_sys_name = "8555", 82 .ppc_sys_name = "8555",
81 .mask = 0xFFFF0000, 83 .mask = 0xFFFF0000,
82 .value = 0x80710000, 84 .value = 0x80710000,
83 .num_devices = 19, 85 .num_devices = 20,
84 .device_list = (enum ppc_sys_devices[]) 86 .device_list = (enum ppc_sys_devices[])
85 { 87 {
86 MPC85xx_TSEC1, MPC85xx_TSEC2, MPC85xx_IIC1, 88 MPC85xx_TSEC1, MPC85xx_TSEC2, MPC85xx_IIC1,
@@ -91,13 +93,14 @@ struct ppc_sys_spec ppc_sys_specs[] = {
91 MPC85xx_CPM_FCC1, MPC85xx_CPM_FCC2, 93 MPC85xx_CPM_FCC1, MPC85xx_CPM_FCC2,
92 MPC85xx_CPM_SMC1, MPC85xx_CPM_SMC2, 94 MPC85xx_CPM_SMC1, MPC85xx_CPM_SMC2,
93 MPC85xx_CPM_USB, 95 MPC85xx_CPM_USB,
96 MPC85xx_MDIO,
94 }, 97 },
95 }, 98 },
96 { 99 {
97 .ppc_sys_name = "8555E", 100 .ppc_sys_name = "8555E",
98 .mask = 0xFFFF0000, 101 .mask = 0xFFFF0000,
99 .value = 0x80790000, 102 .value = 0x80790000,
100 .num_devices = 20, 103 .num_devices = 21,
101 .device_list = (enum ppc_sys_devices[]) 104 .device_list = (enum ppc_sys_devices[])
102 { 105 {
103 MPC85xx_TSEC1, MPC85xx_TSEC2, MPC85xx_IIC1, 106 MPC85xx_TSEC1, MPC85xx_TSEC2, MPC85xx_IIC1,
@@ -108,6 +111,7 @@ struct ppc_sys_spec ppc_sys_specs[] = {
108 MPC85xx_CPM_FCC1, MPC85xx_CPM_FCC2, 111 MPC85xx_CPM_FCC1, MPC85xx_CPM_FCC2,
109 MPC85xx_CPM_SMC1, MPC85xx_CPM_SMC2, 112 MPC85xx_CPM_SMC1, MPC85xx_CPM_SMC2,
110 MPC85xx_CPM_USB, 113 MPC85xx_CPM_USB,
114 MPC85xx_MDIO,
111 }, 115 },
112 }, 116 },
113 /* SVRs on 8548 rev1.0 matches for 8548/8547/8545 */ 117 /* SVRs on 8548 rev1.0 matches for 8548/8547/8545 */
@@ -115,104 +119,112 @@ struct ppc_sys_spec ppc_sys_specs[] = {
115 .ppc_sys_name = "8548E", 119 .ppc_sys_name = "8548E",
116 .mask = 0xFFFF00F0, 120 .mask = 0xFFFF00F0,
117 .value = 0x80390010, 121 .value = 0x80390010,
118 .num_devices = 13, 122 .num_devices = 14,
119 .device_list = (enum ppc_sys_devices[]) 123 .device_list = (enum ppc_sys_devices[])
120 { 124 {
121 MPC85xx_eTSEC1, MPC85xx_eTSEC2, MPC85xx_eTSEC3, 125 MPC85xx_eTSEC1, MPC85xx_eTSEC2, MPC85xx_eTSEC3,
122 MPC85xx_eTSEC4, MPC85xx_IIC1, MPC85xx_IIC2, 126 MPC85xx_eTSEC4, MPC85xx_IIC1, MPC85xx_IIC2,
123 MPC85xx_DMA0, MPC85xx_DMA1, MPC85xx_DMA2, MPC85xx_DMA3, 127 MPC85xx_DMA0, MPC85xx_DMA1, MPC85xx_DMA2, MPC85xx_DMA3,
124 MPC85xx_PERFMON, MPC85xx_DUART, MPC85xx_SEC2, 128 MPC85xx_PERFMON, MPC85xx_DUART, MPC85xx_SEC2,
129 MPC85xx_MDIO,
125 }, 130 },
126 }, 131 },
127 { 132 {
128 .ppc_sys_name = "8548", 133 .ppc_sys_name = "8548",
129 .mask = 0xFFFF00F0, 134 .mask = 0xFFFF00F0,
130 .value = 0x80310010, 135 .value = 0x80310010,
131 .num_devices = 12, 136 .num_devices = 13,
132 .device_list = (enum ppc_sys_devices[]) 137 .device_list = (enum ppc_sys_devices[])
133 { 138 {
134 MPC85xx_eTSEC1, MPC85xx_eTSEC2, MPC85xx_eTSEC3, 139 MPC85xx_eTSEC1, MPC85xx_eTSEC2, MPC85xx_eTSEC3,
135 MPC85xx_eTSEC4, MPC85xx_IIC1, MPC85xx_IIC2, 140 MPC85xx_eTSEC4, MPC85xx_IIC1, MPC85xx_IIC2,
136 MPC85xx_DMA0, MPC85xx_DMA1, MPC85xx_DMA2, MPC85xx_DMA3, 141 MPC85xx_DMA0, MPC85xx_DMA1, MPC85xx_DMA2, MPC85xx_DMA3,
137 MPC85xx_PERFMON, MPC85xx_DUART, 142 MPC85xx_PERFMON, MPC85xx_DUART,
143 MPC85xx_MDIO,
138 }, 144 },
139 }, 145 },
140 { 146 {
141 .ppc_sys_name = "8547E", 147 .ppc_sys_name = "8547E",
142 .mask = 0xFFFF00F0, 148 .mask = 0xFFFF00F0,
143 .value = 0x80390010, 149 .value = 0x80390010,
144 .num_devices = 13, 150 .num_devices = 14,
145 .device_list = (enum ppc_sys_devices[]) 151 .device_list = (enum ppc_sys_devices[])
146 { 152 {
147 MPC85xx_eTSEC1, MPC85xx_eTSEC2, MPC85xx_eTSEC3, 153 MPC85xx_eTSEC1, MPC85xx_eTSEC2, MPC85xx_eTSEC3,
148 MPC85xx_eTSEC4, MPC85xx_IIC1, MPC85xx_IIC2, 154 MPC85xx_eTSEC4, MPC85xx_IIC1, MPC85xx_IIC2,
149 MPC85xx_DMA0, MPC85xx_DMA1, MPC85xx_DMA2, MPC85xx_DMA3, 155 MPC85xx_DMA0, MPC85xx_DMA1, MPC85xx_DMA2, MPC85xx_DMA3,
150 MPC85xx_PERFMON, MPC85xx_DUART, MPC85xx_SEC2, 156 MPC85xx_PERFMON, MPC85xx_DUART, MPC85xx_SEC2,
157 MPC85xx_MDIO,
151 }, 158 },
152 }, 159 },
153 { 160 {
154 .ppc_sys_name = "8547", 161 .ppc_sys_name = "8547",
155 .mask = 0xFFFF00F0, 162 .mask = 0xFFFF00F0,
156 .value = 0x80310010, 163 .value = 0x80310010,
157 .num_devices = 12, 164 .num_devices = 13,
158 .device_list = (enum ppc_sys_devices[]) 165 .device_list = (enum ppc_sys_devices[])
159 { 166 {
160 MPC85xx_eTSEC1, MPC85xx_eTSEC2, MPC85xx_eTSEC3, 167 MPC85xx_eTSEC1, MPC85xx_eTSEC2, MPC85xx_eTSEC3,
161 MPC85xx_eTSEC4, MPC85xx_IIC1, MPC85xx_IIC2, 168 MPC85xx_eTSEC4, MPC85xx_IIC1, MPC85xx_IIC2,
162 MPC85xx_DMA0, MPC85xx_DMA1, MPC85xx_DMA2, MPC85xx_DMA3, 169 MPC85xx_DMA0, MPC85xx_DMA1, MPC85xx_DMA2, MPC85xx_DMA3,
163 MPC85xx_PERFMON, MPC85xx_DUART, 170 MPC85xx_PERFMON, MPC85xx_DUART,
171 MPC85xx_MDIO,
164 }, 172 },
165 }, 173 },
166 { 174 {
167 .ppc_sys_name = "8545E", 175 .ppc_sys_name = "8545E",
168 .mask = 0xFFFF00F0, 176 .mask = 0xFFFF00F0,
169 .value = 0x80390010, 177 .value = 0x80390010,
170 .num_devices = 11, 178 .num_devices = 12,
171 .device_list = (enum ppc_sys_devices[]) 179 .device_list = (enum ppc_sys_devices[])
172 { 180 {
173 MPC85xx_eTSEC1, MPC85xx_eTSEC2, 181 MPC85xx_eTSEC1, MPC85xx_eTSEC2,
174 MPC85xx_IIC1, MPC85xx_IIC2, 182 MPC85xx_IIC1, MPC85xx_IIC2,
175 MPC85xx_DMA0, MPC85xx_DMA1, MPC85xx_DMA2, MPC85xx_DMA3, 183 MPC85xx_DMA0, MPC85xx_DMA1, MPC85xx_DMA2, MPC85xx_DMA3,
176 MPC85xx_PERFMON, MPC85xx_DUART, MPC85xx_SEC2, 184 MPC85xx_PERFMON, MPC85xx_DUART, MPC85xx_SEC2,
185 MPC85xx_MDIO,
177 }, 186 },
178 }, 187 },
179 { 188 {
180 .ppc_sys_name = "8545", 189 .ppc_sys_name = "8545",
181 .mask = 0xFFFF00F0, 190 .mask = 0xFFFF00F0,
182 .value = 0x80310010, 191 .value = 0x80310010,
183 .num_devices = 10, 192 .num_devices = 11,
184 .device_list = (enum ppc_sys_devices[]) 193 .device_list = (enum ppc_sys_devices[])
185 { 194 {
186 MPC85xx_eTSEC1, MPC85xx_eTSEC2, 195 MPC85xx_eTSEC1, MPC85xx_eTSEC2,
187 MPC85xx_IIC1, MPC85xx_IIC2, 196 MPC85xx_IIC1, MPC85xx_IIC2,
188 MPC85xx_DMA0, MPC85xx_DMA1, MPC85xx_DMA2, MPC85xx_DMA3, 197 MPC85xx_DMA0, MPC85xx_DMA1, MPC85xx_DMA2, MPC85xx_DMA3,
189 MPC85xx_PERFMON, MPC85xx_DUART, 198 MPC85xx_PERFMON, MPC85xx_DUART,
199 MPC85xx_MDIO,
190 }, 200 },
191 }, 201 },
192 { 202 {
193 .ppc_sys_name = "8543E", 203 .ppc_sys_name = "8543E",
194 .mask = 0xFFFF00F0, 204 .mask = 0xFFFF00F0,
195 .value = 0x803A0010, 205 .value = 0x803A0010,
196 .num_devices = 11, 206 .num_devices = 12,
197 .device_list = (enum ppc_sys_devices[]) 207 .device_list = (enum ppc_sys_devices[])
198 { 208 {
199 MPC85xx_eTSEC1, MPC85xx_eTSEC2, 209 MPC85xx_eTSEC1, MPC85xx_eTSEC2,
200 MPC85xx_IIC1, MPC85xx_IIC2, 210 MPC85xx_IIC1, MPC85xx_IIC2,
201 MPC85xx_DMA0, MPC85xx_DMA1, MPC85xx_DMA2, MPC85xx_DMA3, 211 MPC85xx_DMA0, MPC85xx_DMA1, MPC85xx_DMA2, MPC85xx_DMA3,
202 MPC85xx_PERFMON, MPC85xx_DUART, MPC85xx_SEC2, 212 MPC85xx_PERFMON, MPC85xx_DUART, MPC85xx_SEC2,
213 MPC85xx_MDIO,
203 }, 214 },
204 }, 215 },
205 { 216 {
206 .ppc_sys_name = "8543", 217 .ppc_sys_name = "8543",
207 .mask = 0xFFFF00F0, 218 .mask = 0xFFFF00F0,
208 .value = 0x80320010, 219 .value = 0x80320010,
209 .num_devices = 10, 220 .num_devices = 11,
210 .device_list = (enum ppc_sys_devices[]) 221 .device_list = (enum ppc_sys_devices[])
211 { 222 {
212 MPC85xx_eTSEC1, MPC85xx_eTSEC2, 223 MPC85xx_eTSEC1, MPC85xx_eTSEC2,
213 MPC85xx_IIC1, MPC85xx_IIC2, 224 MPC85xx_IIC1, MPC85xx_IIC2,
214 MPC85xx_DMA0, MPC85xx_DMA1, MPC85xx_DMA2, MPC85xx_DMA3, 225 MPC85xx_DMA0, MPC85xx_DMA1, MPC85xx_DMA2, MPC85xx_DMA3,
215 MPC85xx_PERFMON, MPC85xx_DUART, 226 MPC85xx_PERFMON, MPC85xx_DUART,
227 MPC85xx_MDIO,
216 }, 228 },
217 }, 229 },
218 { /* default match */ 230 { /* default match */
diff --git a/arch/ppc/syslib/mpc8xx_sys.c b/arch/ppc/syslib/mpc8xx_sys.c
index a532ccc861c0..3cc27d29e3af 100644
--- a/arch/ppc/syslib/mpc8xx_sys.c
+++ b/arch/ppc/syslib/mpc8xx_sys.c
@@ -24,7 +24,7 @@ struct ppc_sys_spec ppc_sys_specs[] = {
24 .ppc_sys_name = "MPC86X", 24 .ppc_sys_name = "MPC86X",
25 .mask = 0xFFFFFFFF, 25 .mask = 0xFFFFFFFF,
26 .value = 0x00000000, 26 .value = 0x00000000,
27 .num_devices = 2, 27 .num_devices = 7,
28 .device_list = (enum ppc_sys_devices[]) 28 .device_list = (enum ppc_sys_devices[])
29 { 29 {
30 MPC8xx_CPM_FEC1, 30 MPC8xx_CPM_FEC1,
@@ -40,7 +40,7 @@ struct ppc_sys_spec ppc_sys_specs[] = {
40 .ppc_sys_name = "MPC885", 40 .ppc_sys_name = "MPC885",
41 .mask = 0xFFFFFFFF, 41 .mask = 0xFFFFFFFF,
42 .value = 0x00000000, 42 .value = 0x00000000,
43 .num_devices = 3, 43 .num_devices = 8,
44 .device_list = (enum ppc_sys_devices[]) 44 .device_list = (enum ppc_sys_devices[])
45 { 45 {
46 MPC8xx_CPM_FEC1, 46 MPC8xx_CPM_FEC1,
diff --git a/arch/ppc/syslib/mv64360_pic.c b/arch/ppc/syslib/mv64360_pic.c
index 8356da4678a2..58b0aa813e85 100644
--- a/arch/ppc/syslib/mv64360_pic.c
+++ b/arch/ppc/syslib/mv64360_pic.c
@@ -48,6 +48,7 @@
48#include <asm/system.h> 48#include <asm/system.h>
49#include <asm/irq.h> 49#include <asm/irq.h>
50#include <asm/mv64x60.h> 50#include <asm/mv64x60.h>
51#include <asm/machdep.h>
51 52
52#ifdef CONFIG_IRQ_ALL_CPUS 53#ifdef CONFIG_IRQ_ALL_CPUS
53#error "The mv64360 does not support distribution of IRQs on all CPUs" 54#error "The mv64360 does not support distribution of IRQs on all CPUs"
diff --git a/arch/ppc/syslib/mv64x60.c b/arch/ppc/syslib/mv64x60.c
index 4849850a59ed..a781c50d2f4c 100644
--- a/arch/ppc/syslib/mv64x60.c
+++ b/arch/ppc/syslib/mv64x60.c
@@ -1304,7 +1304,7 @@ mv64x60_config_pci_params(struct pci_controller *hose,
1304 early_write_config_word(hose, 0, devfn, PCI_COMMAND, u16_val); 1304 early_write_config_word(hose, 0, devfn, PCI_COMMAND, u16_val);
1305 1305
1306 /* Set latency timer, cache line size, clear BIST */ 1306 /* Set latency timer, cache line size, clear BIST */
1307 u16_val = (pi->latency_timer << 8) | (L1_CACHE_LINE_SIZE >> 2); 1307 u16_val = (pi->latency_timer << 8) | (L1_CACHE_BYTES >> 2);
1308 early_write_config_word(hose, 0, devfn, PCI_CACHE_LINE_SIZE, u16_val); 1308 early_write_config_word(hose, 0, devfn, PCI_CACHE_LINE_SIZE, u16_val);
1309 1309
1310 mv64x60_pci_exclude_bridge = save_exclude; 1310 mv64x60_pci_exclude_bridge = save_exclude;
diff --git a/arch/ppc/syslib/mv64x60_dbg.c b/arch/ppc/syslib/mv64x60_dbg.c
index 2927c7adf5e5..fa5b2e45e0ca 100644
--- a/arch/ppc/syslib/mv64x60_dbg.c
+++ b/arch/ppc/syslib/mv64x60_dbg.c
@@ -24,6 +24,7 @@
24#include <linux/irq.h> 24#include <linux/irq.h>
25#include <asm/delay.h> 25#include <asm/delay.h>
26#include <asm/mv64x60.h> 26#include <asm/mv64x60.h>
27#include <asm/machdep.h>
27 28
28 29
29#if defined(CONFIG_SERIAL_TEXT_DEBUG) 30#if defined(CONFIG_SERIAL_TEXT_DEBUG)
diff --git a/arch/ppc/syslib/of_device.c b/arch/ppc/syslib/of_device.c
deleted file mode 100644
index 93c7231ea709..000000000000
--- a/arch/ppc/syslib/of_device.c
+++ /dev/null
@@ -1,276 +0,0 @@
1#include <linux/config.h>
2#include <linux/string.h>
3#include <linux/kernel.h>
4#include <linux/init.h>
5#include <linux/module.h>
6#include <linux/mod_devicetable.h>
7#include <asm/errno.h>
8#include <asm/of_device.h>
9
10/**
11 * of_match_device - Tell if an of_device structure has a matching
12 * of_match structure
13 * @ids: array of of device match structures to search in
14 * @dev: the of device structure to match against
15 *
16 * Used by a driver to check whether an of_device present in the
17 * system is in its list of supported devices.
18 */
19const struct of_device_id * of_match_device(const struct of_device_id *matches,
20 const struct of_device *dev)
21{
22 if (!dev->node)
23 return NULL;
24 while (matches->name[0] || matches->type[0] || matches->compatible[0]) {
25 int match = 1;
26 if (matches->name[0])
27 match &= dev->node->name
28 && !strcmp(matches->name, dev->node->name);
29 if (matches->type[0])
30 match &= dev->node->type
31 && !strcmp(matches->type, dev->node->type);
32 if (matches->compatible[0])
33 match &= device_is_compatible(dev->node,
34 matches->compatible);
35 if (match)
36 return matches;
37 matches++;
38 }
39 return NULL;
40}
41
42static int of_platform_bus_match(struct device *dev, struct device_driver *drv)
43{
44 struct of_device * of_dev = to_of_device(dev);
45 struct of_platform_driver * of_drv = to_of_platform_driver(drv);
46 const struct of_device_id * matches = of_drv->match_table;
47
48 if (!matches)
49 return 0;
50
51 return of_match_device(matches, of_dev) != NULL;
52}
53
54struct of_device *of_dev_get(struct of_device *dev)
55{
56 struct device *tmp;
57
58 if (!dev)
59 return NULL;
60 tmp = get_device(&dev->dev);
61 if (tmp)
62 return to_of_device(tmp);
63 else
64 return NULL;
65}
66
67void of_dev_put(struct of_device *dev)
68{
69 if (dev)
70 put_device(&dev->dev);
71}
72
73
74static int of_device_probe(struct device *dev)
75{
76 int error = -ENODEV;
77 struct of_platform_driver *drv;
78 struct of_device *of_dev;
79 const struct of_device_id *match;
80
81 drv = to_of_platform_driver(dev->driver);
82 of_dev = to_of_device(dev);
83
84 if (!drv->probe)
85 return error;
86
87 of_dev_get(of_dev);
88
89 match = of_match_device(drv->match_table, of_dev);
90 if (match)
91 error = drv->probe(of_dev, match);
92 if (error)
93 of_dev_put(of_dev);
94
95 return error;
96}
97
98static int of_device_remove(struct device *dev)
99{
100 struct of_device * of_dev = to_of_device(dev);
101 struct of_platform_driver * drv = to_of_platform_driver(dev->driver);
102
103 if (dev->driver && drv->remove)
104 drv->remove(of_dev);
105 return 0;
106}
107
108static int of_device_suspend(struct device *dev, pm_message_t state)
109{
110 struct of_device * of_dev = to_of_device(dev);
111 struct of_platform_driver * drv = to_of_platform_driver(dev->driver);
112 int error = 0;
113
114 if (dev->driver && drv->suspend)
115 error = drv->suspend(of_dev, state);
116 return error;
117}
118
119static int of_device_resume(struct device * dev)
120{
121 struct of_device * of_dev = to_of_device(dev);
122 struct of_platform_driver * drv = to_of_platform_driver(dev->driver);
123 int error = 0;
124
125 if (dev->driver && drv->resume)
126 error = drv->resume(of_dev);
127 return error;
128}
129
130struct bus_type of_platform_bus_type = {
131 .name = "of_platform",
132 .match = of_platform_bus_match,
133 .suspend = of_device_suspend,
134 .resume = of_device_resume,
135};
136
137static int __init of_bus_driver_init(void)
138{
139 return bus_register(&of_platform_bus_type);
140}
141
142postcore_initcall(of_bus_driver_init);
143
144int of_register_driver(struct of_platform_driver *drv)
145{
146 int count = 0;
147
148 /* initialize common driver fields */
149 drv->driver.name = drv->name;
150 drv->driver.bus = &of_platform_bus_type;
151 drv->driver.probe = of_device_probe;
152 drv->driver.remove = of_device_remove;
153
154 /* register with core */
155 count = driver_register(&drv->driver);
156 return count ? count : 1;
157}
158
159void of_unregister_driver(struct of_platform_driver *drv)
160{
161 driver_unregister(&drv->driver);
162}
163
164
165static ssize_t dev_show_devspec(struct device *dev, struct device_attribute *attr, char *buf)
166{
167 struct of_device *ofdev;
168
169 ofdev = to_of_device(dev);
170 return sprintf(buf, "%s", ofdev->node->full_name);
171}
172
173static DEVICE_ATTR(devspec, S_IRUGO, dev_show_devspec, NULL);
174
175/**
176 * of_release_dev - free an of device structure when all users of it are finished.
177 * @dev: device that's been disconnected
178 *
179 * Will be called only by the device core when all users of this of device are
180 * done.
181 */
182void of_release_dev(struct device *dev)
183{
184 struct of_device *ofdev;
185
186 ofdev = to_of_device(dev);
187 of_node_put(ofdev->node);
188 kfree(ofdev);
189}
190
191int of_device_register(struct of_device *ofdev)
192{
193 int rc;
194 struct of_device **odprop;
195
196 BUG_ON(ofdev->node == NULL);
197
198 odprop = (struct of_device **)get_property(ofdev->node, "linux,device", NULL);
199 if (!odprop) {
200 struct property *new_prop;
201
202 new_prop = kmalloc(sizeof(struct property) + sizeof(struct of_device *),
203 GFP_KERNEL);
204 if (new_prop == NULL)
205 return -ENOMEM;
206 new_prop->name = "linux,device";
207 new_prop->length = sizeof(sizeof(struct of_device *));
208 new_prop->value = (unsigned char *)&new_prop[1];
209 odprop = (struct of_device **)new_prop->value;
210 *odprop = NULL;
211 prom_add_property(ofdev->node, new_prop);
212 }
213 *odprop = ofdev;
214
215 rc = device_register(&ofdev->dev);
216 if (rc)
217 return rc;
218
219 device_create_file(&ofdev->dev, &dev_attr_devspec);
220
221 return 0;
222}
223
224void of_device_unregister(struct of_device *ofdev)
225{
226 struct of_device **odprop;
227
228 device_remove_file(&ofdev->dev, &dev_attr_devspec);
229
230 odprop = (struct of_device **)get_property(ofdev->node, "linux,device", NULL);
231 if (odprop)
232 *odprop = NULL;
233
234 device_unregister(&ofdev->dev);
235}
236
237struct of_device* of_platform_device_create(struct device_node *np,
238 const char *bus_id,
239 struct device *parent)
240{
241 struct of_device *dev;
242 u32 *reg;
243
244 dev = kmalloc(sizeof(*dev), GFP_KERNEL);
245 if (!dev)
246 return NULL;
247 memset(dev, 0, sizeof(*dev));
248
249 dev->node = of_node_get(np);
250 dev->dma_mask = 0xffffffffUL;
251 dev->dev.dma_mask = &dev->dma_mask;
252 dev->dev.parent = parent;
253 dev->dev.bus = &of_platform_bus_type;
254 dev->dev.release = of_release_dev;
255
256 reg = (u32 *)get_property(np, "reg", NULL);
257 strlcpy(dev->dev.bus_id, bus_id, BUS_ID_SIZE);
258
259 if (of_device_register(dev) != 0) {
260 kfree(dev);
261 return NULL;
262 }
263
264 return dev;
265}
266
267EXPORT_SYMBOL(of_match_device);
268EXPORT_SYMBOL(of_platform_bus_type);
269EXPORT_SYMBOL(of_register_driver);
270EXPORT_SYMBOL(of_unregister_driver);
271EXPORT_SYMBOL(of_device_register);
272EXPORT_SYMBOL(of_device_unregister);
273EXPORT_SYMBOL(of_dev_get);
274EXPORT_SYMBOL(of_dev_put);
275EXPORT_SYMBOL(of_platform_device_create);
276EXPORT_SYMBOL(of_release_dev);
diff --git a/arch/ppc/syslib/open_pic.c b/arch/ppc/syslib/open_pic.c
index 1cf5de21a3fd..894779712b46 100644
--- a/arch/ppc/syslib/open_pic.c
+++ b/arch/ppc/syslib/open_pic.c
@@ -23,6 +23,7 @@
23#include <asm/sections.h> 23#include <asm/sections.h>
24#include <asm/open_pic.h> 24#include <asm/open_pic.h>
25#include <asm/i8259.h> 25#include <asm/i8259.h>
26#include <asm/machdep.h>
26 27
27#include "open_pic_defs.h" 28#include "open_pic_defs.h"
28 29
@@ -889,7 +890,7 @@ openpic_get_irq(struct pt_regs *regs)
889 890
890#ifdef CONFIG_SMP 891#ifdef CONFIG_SMP
891void 892void
892smp_openpic_message_pass(int target, int msg, unsigned long data, int wait) 893smp_openpic_message_pass(int target, int msg)
893{ 894{
894 cpumask_t mask = CPU_MASK_ALL; 895 cpumask_t mask = CPU_MASK_ALL;
895 /* make sure we're sending something that translates to an IPI */ 896 /* make sure we're sending something that translates to an IPI */
diff --git a/arch/ppc/syslib/open_pic2.c b/arch/ppc/syslib/open_pic2.c
index 16cff91d9f41..1c40049b9a45 100644
--- a/arch/ppc/syslib/open_pic2.c
+++ b/arch/ppc/syslib/open_pic2.c
@@ -27,6 +27,7 @@
27#include <asm/sections.h> 27#include <asm/sections.h>
28#include <asm/open_pic.h> 28#include <asm/open_pic.h>
29#include <asm/i8259.h> 29#include <asm/i8259.h>
30#include <asm/machdep.h>
30 31
31#include "open_pic_defs.h" 32#include "open_pic_defs.h"
32 33
diff --git a/arch/ppc/syslib/ppc403_pic.c b/arch/ppc/syslib/ppc403_pic.c
index ce4d1deb86e9..c46043c47225 100644
--- a/arch/ppc/syslib/ppc403_pic.c
+++ b/arch/ppc/syslib/ppc403_pic.c
@@ -26,6 +26,7 @@
26#include <asm/system.h> 26#include <asm/system.h>
27#include <asm/irq.h> 27#include <asm/irq.h>
28#include <asm/ppc4xx_pic.h> 28#include <asm/ppc4xx_pic.h>
29#include <asm/machdep.h>
29 30
30/* Function Prototypes */ 31/* Function Prototypes */
31 32
diff --git a/arch/ppc/syslib/ppc4xx_pic.c b/arch/ppc/syslib/ppc4xx_pic.c
index 40086212b9c3..0b435633a0d1 100644
--- a/arch/ppc/syslib/ppc4xx_pic.c
+++ b/arch/ppc/syslib/ppc4xx_pic.c
@@ -25,6 +25,7 @@
25#include <asm/system.h> 25#include <asm/system.h>
26#include <asm/irq.h> 26#include <asm/irq.h>
27#include <asm/ppc4xx_pic.h> 27#include <asm/ppc4xx_pic.h>
28#include <asm/machdep.h>
28 29
29/* See comment in include/arch-ppc/ppc4xx_pic.h 30/* See comment in include/arch-ppc/ppc4xx_pic.h
30 * for more info about these two variables 31 * for more info about these two variables
diff --git a/arch/ppc/syslib/ppc4xx_setup.c b/arch/ppc/syslib/ppc4xx_setup.c
index bf83240689dc..e83a83fd95e1 100644
--- a/arch/ppc/syslib/ppc4xx_setup.c
+++ b/arch/ppc/syslib/ppc4xx_setup.c
@@ -278,7 +278,7 @@ ppc4xx_init(unsigned long r3, unsigned long r4, unsigned long r5,
278#endif /* defined(CONFIG_PCI) && defined(CONFIG_IDE) */ 278#endif /* defined(CONFIG_PCI) && defined(CONFIG_IDE) */
279} 279}
280 280
281/* Called from MachineCheckException */ 281/* Called from machine_check_exception */
282void platform_machine_check(struct pt_regs *regs) 282void platform_machine_check(struct pt_regs *regs)
283{ 283{
284#if defined(DCRN_PLB0_BEAR) 284#if defined(DCRN_PLB0_BEAR)
diff --git a/arch/ppc/syslib/ppc83xx_setup.c b/arch/ppc/syslib/ppc83xx_setup.c
index 890484e576e7..4da168a6ad03 100644
--- a/arch/ppc/syslib/ppc83xx_setup.c
+++ b/arch/ppc/syslib/ppc83xx_setup.c
@@ -40,6 +40,7 @@
40#include <asm/ppc_sys.h> 40#include <asm/ppc_sys.h>
41#include <asm/kgdb.h> 41#include <asm/kgdb.h>
42#include <asm/delay.h> 42#include <asm/delay.h>
43#include <asm/machdep.h>
43 44
44#include <syslib/ppc83xx_setup.h> 45#include <syslib/ppc83xx_setup.h>
45#if defined(CONFIG_PCI) 46#if defined(CONFIG_PCI)
diff --git a/arch/ppc/syslib/ppc85xx_setup.c b/arch/ppc/syslib/ppc85xx_setup.c
index 832b8bf99ae7..de2f90576577 100644
--- a/arch/ppc/syslib/ppc85xx_setup.c
+++ b/arch/ppc/syslib/ppc85xx_setup.c
@@ -29,6 +29,7 @@
29#include <asm/mmu.h> 29#include <asm/mmu.h>
30#include <asm/ppc_sys.h> 30#include <asm/ppc_sys.h>
31#include <asm/kgdb.h> 31#include <asm/kgdb.h>
32#include <asm/machdep.h>
32 33
33#include <syslib/ppc85xx_setup.h> 34#include <syslib/ppc85xx_setup.h>
34 35
diff --git a/arch/ppc/syslib/ppc8xx_pic.c b/arch/ppc/syslib/ppc8xx_pic.c
index d3b01c6c97de..3e6f51a61d46 100644
--- a/arch/ppc/syslib/ppc8xx_pic.c
+++ b/arch/ppc/syslib/ppc8xx_pic.c
@@ -6,6 +6,7 @@
6#include <linux/signal.h> 6#include <linux/signal.h>
7#include <linux/interrupt.h> 7#include <linux/interrupt.h>
8#include <asm/irq.h> 8#include <asm/irq.h>
9#include <asm/io.h>
9#include <asm/8xx_immap.h> 10#include <asm/8xx_immap.h>
10#include <asm/mpc8xx.h> 11#include <asm/mpc8xx.h>
11#include "ppc8xx_pic.h" 12#include "ppc8xx_pic.h"
@@ -29,8 +30,7 @@ static void m8xx_mask_irq(unsigned int irq_nr)
29 word = irq_nr >> 5; 30 word = irq_nr >> 5;
30 31
31 ppc_cached_irq_mask[word] &= ~(1 << (31-bit)); 32 ppc_cached_irq_mask[word] &= ~(1 << (31-bit));
32 ((immap_t *)IMAP_ADDR)->im_siu_conf.sc_simask = 33 out_be32(&((immap_t *)IMAP_ADDR)->im_siu_conf.sc_simask, ppc_cached_irq_mask[word]);
33 ppc_cached_irq_mask[word];
34} 34}
35 35
36static void m8xx_unmask_irq(unsigned int irq_nr) 36static void m8xx_unmask_irq(unsigned int irq_nr)
@@ -41,8 +41,7 @@ static void m8xx_unmask_irq(unsigned int irq_nr)
41 word = irq_nr >> 5; 41 word = irq_nr >> 5;
42 42
43 ppc_cached_irq_mask[word] |= (1 << (31-bit)); 43 ppc_cached_irq_mask[word] |= (1 << (31-bit));
44 ((immap_t *)IMAP_ADDR)->im_siu_conf.sc_simask = 44 out_be32(&((immap_t *)IMAP_ADDR)->im_siu_conf.sc_simask, ppc_cached_irq_mask[word]);
45 ppc_cached_irq_mask[word];
46} 45}
47 46
48static void m8xx_end_irq(unsigned int irq_nr) 47static void m8xx_end_irq(unsigned int irq_nr)
@@ -55,8 +54,7 @@ static void m8xx_end_irq(unsigned int irq_nr)
55 word = irq_nr >> 5; 54 word = irq_nr >> 5;
56 55
57 ppc_cached_irq_mask[word] |= (1 << (31-bit)); 56 ppc_cached_irq_mask[word] |= (1 << (31-bit));
58 ((immap_t *)IMAP_ADDR)->im_siu_conf.sc_simask = 57 out_be32(&((immap_t *)IMAP_ADDR)->im_siu_conf.sc_simask, ppc_cached_irq_mask[word]);
59 ppc_cached_irq_mask[word];
60 } 58 }
61} 59}
62 60
@@ -69,9 +67,8 @@ static void m8xx_mask_and_ack(unsigned int irq_nr)
69 word = irq_nr >> 5; 67 word = irq_nr >> 5;
70 68
71 ppc_cached_irq_mask[word] &= ~(1 << (31-bit)); 69 ppc_cached_irq_mask[word] &= ~(1 << (31-bit));
72 ((immap_t *)IMAP_ADDR)->im_siu_conf.sc_simask = 70 out_be32(&((immap_t *)IMAP_ADDR)->im_siu_conf.sc_simask, ppc_cached_irq_mask[word]);
73 ppc_cached_irq_mask[word]; 71 out_be32(&((immap_t *)IMAP_ADDR)->im_siu_conf.sc_sipend, 1 << (31-bit));
74 ((immap_t *)IMAP_ADDR)->im_siu_conf.sc_sipend = 1 << (31-bit);
75} 72}
76 73
77struct hw_interrupt_type ppc8xx_pic = { 74struct hw_interrupt_type ppc8xx_pic = {
@@ -93,7 +90,7 @@ m8xx_get_irq(struct pt_regs *regs)
93 /* For MPC8xx, read the SIVEC register and shift the bits down 90 /* For MPC8xx, read the SIVEC register and shift the bits down
94 * to get the irq number. 91 * to get the irq number.
95 */ 92 */
96 irq = ((immap_t *)IMAP_ADDR)->im_siu_conf.sc_sivec >> 26; 93 irq = in_be32(&((immap_t *)IMAP_ADDR)->im_siu_conf.sc_sivec) >> 26;
97 94
98 /* 95 /*
99 * When we read the sivec without an interrupt to process, we will 96 * When we read the sivec without an interrupt to process, we will
diff --git a/arch/ppc/syslib/ppc_sys.c b/arch/ppc/syslib/ppc_sys.c
index 52ba0c68078d..62ee86e80711 100644
--- a/arch/ppc/syslib/ppc_sys.c
+++ b/arch/ppc/syslib/ppc_sys.c
@@ -69,6 +69,9 @@ static int __init find_chip_by_name_and_id(char *name, u32 id)
69 matched[j++] = i; 69 matched[j++] = i;
70 i++; 70 i++;
71 } 71 }
72
73 ret = i;
74
72 if (j != 0) { 75 if (j != 0) {
73 for (i = 0; i < j; i++) { 76 for (i = 0; i < j; i++) {
74 if ((ppc_sys_specs[matched[i]].mask & id) == 77 if ((ppc_sys_specs[matched[i]].mask & id) ==
diff --git a/arch/ppc/syslib/pq2_devices.c b/arch/ppc/syslib/pq2_devices.c
index 1d3869768f96..6f88ba93412b 100644
--- a/arch/ppc/syslib/pq2_devices.c
+++ b/arch/ppc/syslib/pq2_devices.c
@@ -18,6 +18,7 @@
18#include <asm/cpm2.h> 18#include <asm/cpm2.h>
19#include <asm/irq.h> 19#include <asm/irq.h>
20#include <asm/ppc_sys.h> 20#include <asm/ppc_sys.h>
21#include <asm/machdep.h>
21 22
22struct platform_device ppc_sys_platform_devices[] = { 23struct platform_device ppc_sys_platform_devices[] = {
23 [MPC82xx_CPM_FCC1] = { 24 [MPC82xx_CPM_FCC1] = {
diff --git a/arch/ppc/syslib/prep_nvram.c b/arch/ppc/syslib/prep_nvram.c
index 8599850ca772..2c6364d9641f 100644
--- a/arch/ppc/syslib/prep_nvram.c
+++ b/arch/ppc/syslib/prep_nvram.c
@@ -22,14 +22,14 @@
22static char nvramData[MAX_PREP_NVRAM]; 22static char nvramData[MAX_PREP_NVRAM];
23static NVRAM_MAP *nvram=(NVRAM_MAP *)&nvramData[0]; 23static NVRAM_MAP *nvram=(NVRAM_MAP *)&nvramData[0];
24 24
25unsigned char __prep prep_nvram_read_val(int addr) 25unsigned char prep_nvram_read_val(int addr)
26{ 26{
27 outb(addr, PREP_NVRAM_AS0); 27 outb(addr, PREP_NVRAM_AS0);
28 outb(addr>>8, PREP_NVRAM_AS1); 28 outb(addr>>8, PREP_NVRAM_AS1);
29 return inb(PREP_NVRAM_DATA); 29 return inb(PREP_NVRAM_DATA);
30} 30}
31 31
32void __prep prep_nvram_write_val(int addr, 32void prep_nvram_write_val(int addr,
33 unsigned char val) 33 unsigned char val)
34{ 34{
35 outb(addr, PREP_NVRAM_AS0); 35 outb(addr, PREP_NVRAM_AS0);
@@ -81,8 +81,7 @@ void __init init_prep_nvram(void)
81 } 81 }
82} 82}
83 83
84__prep 84char *prep_nvram_get_var(const char *name)
85char __prep *prep_nvram_get_var(const char *name)
86{ 85{
87 char *cp; 86 char *cp;
88 int namelen; 87 int namelen;
@@ -101,8 +100,7 @@ char __prep *prep_nvram_get_var(const char *name)
101 return NULL; 100 return NULL;
102} 101}
103 102
104__prep 103char *prep_nvram_first_var(void)
105char __prep *prep_nvram_first_var(void)
106{ 104{
107 if (nvram->Header.GELength == 0) { 105 if (nvram->Header.GELength == 0) {
108 return NULL; 106 return NULL;
@@ -112,8 +110,7 @@ char __prep *prep_nvram_first_var(void)
112 } 110 }
113} 111}
114 112
115__prep 113char *prep_nvram_next_var(char *name)
116char __prep *prep_nvram_next_var(char *name)
117{ 114{
118 char *cp; 115 char *cp;
119 116
diff --git a/arch/ppc/syslib/prom.c b/arch/ppc/syslib/prom.c
index 2c64ed627475..278da6ee62ea 100644
--- a/arch/ppc/syslib/prom.c
+++ b/arch/ppc/syslib/prom.c
@@ -89,7 +89,7 @@ extern char cmd_line[512]; /* XXX */
89extern boot_infos_t *boot_infos; 89extern boot_infos_t *boot_infos;
90unsigned long dev_tree_size; 90unsigned long dev_tree_size;
91 91
92void __openfirmware 92void
93phys_call_rtas(int service, int nargs, int nret, ...) 93phys_call_rtas(int service, int nargs, int nret, ...)
94{ 94{
95 va_list list; 95 va_list list;
@@ -862,7 +862,7 @@ find_type_devices(const char *type)
862/* 862/*
863 * Returns all nodes linked together 863 * Returns all nodes linked together
864 */ 864 */
865struct device_node * __openfirmware 865struct device_node *
866find_all_nodes(void) 866find_all_nodes(void)
867{ 867{
868 struct device_node *head, **prevp, *np; 868 struct device_node *head, **prevp, *np;
@@ -1165,7 +1165,7 @@ get_property(struct device_node *np, const char *name, int *lenp)
1165/* 1165/*
1166 * Add a property to a node 1166 * Add a property to a node
1167 */ 1167 */
1168void __openfirmware 1168void
1169prom_add_property(struct device_node* np, struct property* prop) 1169prom_add_property(struct device_node* np, struct property* prop)
1170{ 1170{
1171 struct property **next = &np->properties; 1171 struct property **next = &np->properties;
@@ -1177,7 +1177,7 @@ prom_add_property(struct device_node* np, struct property* prop)
1177} 1177}
1178 1178
1179/* I quickly hacked that one, check against spec ! */ 1179/* I quickly hacked that one, check against spec ! */
1180static inline unsigned long __openfirmware 1180static inline unsigned long
1181bus_space_to_resource_flags(unsigned int bus_space) 1181bus_space_to_resource_flags(unsigned int bus_space)
1182{ 1182{
1183 u8 space = (bus_space >> 24) & 0xf; 1183 u8 space = (bus_space >> 24) & 0xf;
@@ -1194,7 +1194,7 @@ bus_space_to_resource_flags(unsigned int bus_space)
1194 } 1194 }
1195} 1195}
1196 1196
1197static struct resource* __openfirmware 1197static struct resource*
1198find_parent_pci_resource(struct pci_dev* pdev, struct address_range *range) 1198find_parent_pci_resource(struct pci_dev* pdev, struct address_range *range)
1199{ 1199{
1200 unsigned long mask; 1200 unsigned long mask;
@@ -1224,7 +1224,7 @@ find_parent_pci_resource(struct pci_dev* pdev, struct address_range *range)
1224 * or other nodes attached to the root node. Ultimately, put some 1224 * or other nodes attached to the root node. Ultimately, put some
1225 * link to resources in the OF node. 1225 * link to resources in the OF node.
1226 */ 1226 */
1227struct resource* __openfirmware 1227struct resource*
1228request_OF_resource(struct device_node* node, int index, const char* name_postfix) 1228request_OF_resource(struct device_node* node, int index, const char* name_postfix)
1229{ 1229{
1230 struct pci_dev* pcidev; 1230 struct pci_dev* pcidev;
@@ -1280,7 +1280,7 @@ fail:
1280 return NULL; 1280 return NULL;
1281} 1281}
1282 1282
1283int __openfirmware 1283int
1284release_OF_resource(struct device_node* node, int index) 1284release_OF_resource(struct device_node* node, int index)
1285{ 1285{
1286 struct pci_dev* pcidev; 1286 struct pci_dev* pcidev;
@@ -1346,7 +1346,7 @@ release_OF_resource(struct device_node* node, int index)
1346} 1346}
1347 1347
1348#if 0 1348#if 0
1349void __openfirmware 1349void
1350print_properties(struct device_node *np) 1350print_properties(struct device_node *np)
1351{ 1351{
1352 struct property *pp; 1352 struct property *pp;
@@ -1400,7 +1400,7 @@ print_properties(struct device_node *np)
1400static DEFINE_SPINLOCK(rtas_lock); 1400static DEFINE_SPINLOCK(rtas_lock);
1401 1401
1402/* this can be called after setup -- Cort */ 1402/* this can be called after setup -- Cort */
1403int __openfirmware 1403int
1404call_rtas(const char *service, int nargs, int nret, 1404call_rtas(const char *service, int nargs, int nret,
1405 unsigned long *outputs, ...) 1405 unsigned long *outputs, ...)
1406{ 1406{
diff --git a/arch/ppc/syslib/xilinx_pic.c b/arch/ppc/syslib/xilinx_pic.c
index 2cbcad278cef..47f04c71fe9c 100644
--- a/arch/ppc/syslib/xilinx_pic.c
+++ b/arch/ppc/syslib/xilinx_pic.c
@@ -17,6 +17,7 @@
17#include <asm/io.h> 17#include <asm/io.h>
18#include <asm/xparameters.h> 18#include <asm/xparameters.h>
19#include <asm/ibm4xx.h> 19#include <asm/ibm4xx.h>
20#include <asm/machdep.h>
20 21
21/* No one else should require these constants, so define them locally here. */ 22/* No one else should require these constants, so define them locally here. */
22#define ISR 0 /* Interrupt Status Register */ 23#define ISR 0 /* Interrupt Status Register */
diff --git a/arch/ppc/xmon/start.c b/arch/ppc/xmon/start.c
index 507d4eeffe07..98612d420346 100644
--- a/arch/ppc/xmon/start.c
+++ b/arch/ppc/xmon/start.c
@@ -478,8 +478,9 @@ void *xmon_stdout;
478void *xmon_stderr; 478void *xmon_stderr;
479 479
480void 480void
481xmon_init(void) 481xmon_init(int arg)
482{ 482{
483 xmon_map_scc();
483} 484}
484 485
485int 486int
diff --git a/arch/ppc/xmon/xmon.c b/arch/ppc/xmon/xmon.c
index be7869e39465..66bfaa3211a2 100644
--- a/arch/ppc/xmon/xmon.c
+++ b/arch/ppc/xmon/xmon.c
@@ -148,9 +148,14 @@ Commands:\n\
148 r print registers\n\ 148 r print registers\n\
149 S print special registers\n\ 149 S print special registers\n\
150 t print backtrace\n\ 150 t print backtrace\n\
151 la lookup address in system.map\n\ 151 la lookup address\n\
152 ls lookup symbol in system.map\n\ 152 ls lookup symbol\n\
153 C checksum\n\
154 p call function with arguments\n\
155 T print time\n\
153 x exit monitor\n\ 156 x exit monitor\n\
157 zr reboot\n\
158 zh halt\n\
154"; 159";
155 160
156static int xmon_trace[NR_CPUS]; 161static int xmon_trace[NR_CPUS];
diff --git a/arch/ppc64/Kconfig b/arch/ppc64/Kconfig
index c658650af429..42677cc96508 100644
--- a/arch/ppc64/Kconfig
+++ b/arch/ppc64/Kconfig
@@ -10,6 +10,9 @@ config MMU
10 bool 10 bool
11 default y 11 default y
12 12
13config PPC_STD_MMU
14 def_bool y
15
13config UID16 16config UID16
14 bool 17 bool
15 18
@@ -120,6 +123,11 @@ config MPIC
120 bool 123 bool
121 default y 124 default y
122 125
126config PPC_I8259
127 depends on PPC_PSERIES
128 bool
129 default y
130
123config BPA_IIC 131config BPA_IIC
124 depends on PPC_BPA 132 depends on PPC_BPA
125 bool 133 bool
@@ -186,6 +194,12 @@ config BOOTX_TEXT
186 Say Y here to see progress messages from the boot firmware in text 194 Say Y here to see progress messages from the boot firmware in text
187 mode. Requires an Open Firmware compatible video card. 195 mode. Requires an Open Firmware compatible video card.
188 196
197config POWER4
198 def_bool y
199
200config PPC_FPU
201 def_bool y
202
189config POWER4_ONLY 203config POWER4_ONLY
190 bool "Optimize for POWER4" 204 bool "Optimize for POWER4"
191 default n 205 default n
@@ -234,6 +248,10 @@ config HMT
234 This option enables hardware multithreading on RS64 cpus. 248 This option enables hardware multithreading on RS64 cpus.
235 pSeries systems p620 and p660 have such a cpu type. 249 pSeries systems p620 and p660 have such a cpu type.
236 250
251config NUMA
252 bool "NUMA support"
253 default y if SMP && PPC_PSERIES
254
237config ARCH_SELECT_MEMORY_MODEL 255config ARCH_SELECT_MEMORY_MODEL
238 def_bool y 256 def_bool y
239 257
@@ -249,9 +267,6 @@ config ARCH_DISCONTIGMEM_DEFAULT
249 def_bool y 267 def_bool y
250 depends on ARCH_DISCONTIGMEM_ENABLE 268 depends on ARCH_DISCONTIGMEM_ENABLE
251 269
252config ARCH_FLATMEM_ENABLE
253 def_bool y
254
255config ARCH_SPARSEMEM_ENABLE 270config ARCH_SPARSEMEM_ENABLE
256 def_bool y 271 def_bool y
257 depends on ARCH_DISCONTIGMEM_ENABLE 272 depends on ARCH_DISCONTIGMEM_ENABLE
@@ -274,10 +289,6 @@ config NODES_SPAN_OTHER_NODES
274 def_bool y 289 def_bool y
275 depends on NEED_MULTIPLE_NODES 290 depends on NEED_MULTIPLE_NODES
276 291
277config NUMA
278 bool "NUMA support"
279 default y if DISCONTIGMEM || SPARSEMEM
280
281config SCHED_SMT 292config SCHED_SMT
282 bool "SMT (Hyperthreading) scheduler support" 293 bool "SMT (Hyperthreading) scheduler support"
283 depends on SMP 294 depends on SMP
@@ -307,6 +318,11 @@ config PPC_RTAS
307 depends on PPC_PSERIES || PPC_BPA 318 depends on PPC_PSERIES || PPC_BPA
308 default y 319 default y
309 320
321config RTAS_ERROR_LOGGING
322 bool
323 depends on PPC_RTAS
324 default y
325
310config RTAS_PROC 326config RTAS_PROC
311 bool "Proc interface to RTAS" 327 bool "Proc interface to RTAS"
312 depends on PPC_RTAS 328 depends on PPC_RTAS
@@ -357,7 +373,6 @@ config HOTPLUG_CPU
357 373
358config PROC_DEVICETREE 374config PROC_DEVICETREE
359 bool "Support for Open Firmware device tree in /proc" 375 bool "Support for Open Firmware device tree in /proc"
360 depends on !PPC_ISERIES
361 help 376 help
362 This option adds a device-tree directory under /proc which contains 377 This option adds a device-tree directory under /proc which contains
363 an image of the device tree that the kernel copies from Open 378 an image of the device tree that the kernel copies from Open
@@ -461,7 +476,7 @@ config VIOPATH
461 depends on VIOCONS || VIODASD || VIOCD || VIOTAPE || VETH 476 depends on VIOCONS || VIODASD || VIOCD || VIOTAPE || VETH
462 default y 477 default y
463 478
464source "arch/ppc64/oprofile/Kconfig" 479source "arch/powerpc/oprofile/Kconfig"
465 480
466source "arch/ppc64/Kconfig.debug" 481source "arch/ppc64/Kconfig.debug"
467 482
diff --git a/arch/ppc64/Makefile b/arch/ppc64/Makefile
index 521c2a5a2862..fdbd6f44adc0 100644
--- a/arch/ppc64/Makefile
+++ b/arch/ppc64/Makefile
@@ -75,17 +75,25 @@ else
75 CFLAGS += $(call cc-option,-mtune=power4) 75 CFLAGS += $(call cc-option,-mtune=power4)
76endif 76endif
77 77
78# No AltiVec instruction when building kernel
79CFLAGS += $(call cc-option, -mno-altivec)
80
78# Enable unit-at-a-time mode when possible. It shrinks the 81# Enable unit-at-a-time mode when possible. It shrinks the
79# kernel considerably. 82# kernel considerably.
80CFLAGS += $(call cc-option,-funit-at-a-time) 83CFLAGS += $(call cc-option,-funit-at-a-time)
81 84
82head-y := arch/ppc64/kernel/head.o 85head-y := arch/ppc64/kernel/head.o
86head-y += arch/powerpc/kernel/fpu.o
87head-y += arch/powerpc/kernel/entry_64.o
83 88
84libs-y += arch/ppc64/lib/ 89libs-y += arch/ppc64/lib/
85core-y += arch/ppc64/kernel/ 90core-y += arch/ppc64/kernel/ arch/powerpc/kernel/
86core-y += arch/ppc64/mm/ 91core-y += arch/powerpc/mm/
87core-$(CONFIG_XMON) += arch/ppc64/xmon/ 92core-y += arch/powerpc/sysdev/
88drivers-$(CONFIG_OPROFILE) += arch/ppc64/oprofile/ 93core-y += arch/powerpc/platforms/
94core-y += arch/powerpc/lib/
95core-$(CONFIG_XMON) += arch/powerpc/xmon/
96drivers-$(CONFIG_OPROFILE) += arch/powerpc/oprofile/
89 97
90boot := arch/ppc64/boot 98boot := arch/ppc64/boot
91 99
@@ -100,7 +108,7 @@ $(boottargets-y): vmlinux
100bootimage-$(CONFIG_PPC_PSERIES) := $(boot)/zImage 108bootimage-$(CONFIG_PPC_PSERIES) := $(boot)/zImage
101bootimage-$(CONFIG_PPC_PMAC) := vmlinux 109bootimage-$(CONFIG_PPC_PMAC) := vmlinux
102bootimage-$(CONFIG_PPC_MAPLE) := $(boot)/zImage 110bootimage-$(CONFIG_PPC_MAPLE) := $(boot)/zImage
103bootimage-$(CONFIG_PPC_BPA) := zImage 111bootimage-$(CONFIG_PPC_BPA) := $(boot)/zImage
104bootimage-$(CONFIG_PPC_ISERIES) := vmlinux 112bootimage-$(CONFIG_PPC_ISERIES) := vmlinux
105BOOTIMAGE := $(bootimage-y) 113BOOTIMAGE := $(bootimage-y)
106install: vmlinux 114install: vmlinux
diff --git a/arch/ppc64/boot/Makefile b/arch/ppc64/boot/Makefile
index 33fdc8710891..301bc1536c49 100644
--- a/arch/ppc64/boot/Makefile
+++ b/arch/ppc64/boot/Makefile
@@ -22,15 +22,46 @@
22 22
23 23
24HOSTCC := gcc 24HOSTCC := gcc
25BOOTCFLAGS := $(HOSTCFLAGS) -fno-builtin -nostdinc -isystem $(shell $(CROSS32CC) -print-file-name=include) 25BOOTCFLAGS := $(HOSTCFLAGS) -fno-builtin -nostdinc -isystem $(shell $(CROSS32CC) -print-file-name=include) -fPIC
26BOOTAFLAGS := -D__ASSEMBLY__ $(BOOTCFLAGS) -traditional -nostdinc 26BOOTAFLAGS := -D__ASSEMBLY__ $(BOOTCFLAGS) -traditional -nostdinc
27BOOTLFLAGS := -Ttext 0x00400000 -e _start -T $(srctree)/$(src)/zImage.lds 27BOOTLFLAGS := -T $(srctree)/$(src)/zImage.lds
28OBJCOPYFLAGS := contents,alloc,load,readonly,data 28OBJCOPYFLAGS := contents,alloc,load,readonly,data
29 29
30src-boot := crt0.S string.S prom.c main.c zlib.c imagesize.c div64.S 30zlib := infblock.c infcodes.c inffast.c inflate.c inftrees.c infutil.c
31zlibheader := infblock.h infcodes.h inffast.h inftrees.h infutil.h
32zliblinuxheader := zlib.h zconf.h zutil.h
33
34$(addprefix $(obj)/,$(zlib) main.o): $(addprefix $(obj)/,$(zliblinuxheader)) $(addprefix $(obj)/,$(zlibheader))
35#$(addprefix $(obj)/,main.o): $(addprefix $(obj)/,zlib.h)
36
37src-boot := string.S prom.c main.c div64.S crt0.S
38src-boot += $(zlib)
31src-boot := $(addprefix $(obj)/, $(src-boot)) 39src-boot := $(addprefix $(obj)/, $(src-boot))
32obj-boot := $(addsuffix .o, $(basename $(src-boot))) 40obj-boot := $(addsuffix .o, $(basename $(src-boot)))
33 41
42BOOTCFLAGS += -I$(obj) -I$(srctree)/$(obj)
43
44quiet_cmd_copy_zlib = COPY $@
45 cmd_copy_zlib = sed "s@__attribute_used__@@;s@<linux/\([^>]\+\).*@\"\1\"@" $< > $@
46
47quiet_cmd_copy_zlibheader = COPY $@
48 cmd_copy_zlibheader = sed "s@<linux/\([^>]\+\).*@\"\1\"@" $< > $@
49# stddef.h for NULL
50quiet_cmd_copy_zliblinuxheader = COPY $@
51 cmd_copy_zliblinuxheader = sed "s@<linux/string.h>@\"string.h\"@;s@<linux/kernel.h>@<stddef.h>@;s@<linux/\([^>]\+\).*@\"\1\"@" $< > $@
52
53$(addprefix $(obj)/,$(zlib)): $(obj)/%: $(srctree)/lib/zlib_inflate/%
54 $(call cmd,copy_zlib)
55
56$(addprefix $(obj)/,$(zlibheader)): $(obj)/%: $(srctree)/lib/zlib_inflate/%
57 $(call cmd,copy_zlibheader)
58
59$(addprefix $(obj)/,$(zliblinuxheader)): $(obj)/%: $(srctree)/include/linux/%
60 $(call cmd,copy_zliblinuxheader)
61
62clean-files := $(zlib) $(zlibheader) $(zliblinuxheader)
63
64
34quiet_cmd_bootcc = BOOTCC $@ 65quiet_cmd_bootcc = BOOTCC $@
35 cmd_bootcc = $(CROSS32CC) -Wp,-MD,$(depfile) $(BOOTCFLAGS) -c -o $@ $< 66 cmd_bootcc = $(CROSS32CC) -Wp,-MD,$(depfile) $(BOOTCFLAGS) -c -o $@ $<
36 67
@@ -56,7 +87,7 @@ src-sec = $(foreach section, $(1), $(patsubst %,$(obj)/kernel-%.c, $(section)))
56gz-sec = $(foreach section, $(1), $(patsubst %,$(obj)/kernel-%.gz, $(section))) 87gz-sec = $(foreach section, $(1), $(patsubst %,$(obj)/kernel-%.gz, $(section)))
57 88
58hostprogs-y := addnote addRamDisk 89hostprogs-y := addnote addRamDisk
59targets += zImage.vmode zImage.initrd.vmode zImage zImage.initrd imagesize.c \ 90targets += zImage.vmode zImage.initrd.vmode zImage zImage.initrd \
60 $(patsubst $(obj)/%,%, $(call obj-sec, $(required) $(initrd))) \ 91 $(patsubst $(obj)/%,%, $(call obj-sec, $(required) $(initrd))) \
61 $(patsubst $(obj)/%,%, $(call src-sec, $(required) $(initrd))) \ 92 $(patsubst $(obj)/%,%, $(call src-sec, $(required) $(initrd))) \
62 $(patsubst $(obj)/%,%, $(call gz-sec, $(required) $(initrd))) \ 93 $(patsubst $(obj)/%,%, $(call gz-sec, $(required) $(initrd))) \
@@ -69,9 +100,9 @@ quiet_cmd_ramdisk = RAMDISK $@
69quiet_cmd_stripvm = STRIP $@ 100quiet_cmd_stripvm = STRIP $@
70 cmd_stripvm = $(STRIP) -s $< -o $@ 101 cmd_stripvm = $(STRIP) -s $< -o $@
71 102
72vmlinux.strip: vmlinux FORCE 103vmlinux.strip: vmlinux
73 $(call if_changed,stripvm) 104 $(call if_changed,stripvm)
74$(obj)/vmlinux.initrd: vmlinux.strip $(obj)/addRamDisk $(obj)/ramdisk.image.gz FORCE 105$(obj)/vmlinux.initrd: vmlinux.strip $(obj)/addRamDisk $(obj)/ramdisk.image.gz
75 $(call if_changed,ramdisk) 106 $(call if_changed,ramdisk)
76 107
77quiet_cmd_addsection = ADDSEC $@ 108quiet_cmd_addsection = ADDSEC $@
@@ -79,48 +110,38 @@ quiet_cmd_addsection = ADDSEC $@
79 --add-section=.kernel:$(strip $(patsubst $(obj)/kernel-%.o,%, $@))=$(patsubst %.o,%.gz, $@) \ 110 --add-section=.kernel:$(strip $(patsubst $(obj)/kernel-%.o,%, $@))=$(patsubst %.o,%.gz, $@) \
80 --set-section-flags=.kernel:$(strip $(patsubst $(obj)/kernel-%.o,%, $@))=$(OBJCOPYFLAGS) 111 --set-section-flags=.kernel:$(strip $(patsubst $(obj)/kernel-%.o,%, $@))=$(OBJCOPYFLAGS)
81 112
82quiet_cmd_imagesize = GENSIZE $@
83 cmd_imagesize = ls -l vmlinux.strip | \
84 awk '{printf "/* generated -- do not edit! */\n" "unsigned long vmlinux_filesize = %d;\n", $$5}' \
85 > $(obj)/imagesize.c && \
86 $(CROSS_COMPILE)nm -n vmlinux | tail -n 1 | \
87 awk '{printf "unsigned long vmlinux_memsize = 0x%s;\n", substr($$1,8)}' >> $(obj)/imagesize.c
88
89quiet_cmd_addnote = ADDNOTE $@ 113quiet_cmd_addnote = ADDNOTE $@
90 cmd_addnote = $(obj)/addnote $@ 114 cmd_addnote = $(obj)/addnote $@
91 115
92$(call gz-sec, $(required)): $(obj)/kernel-%.gz: % FORCE 116$(call gz-sec, $(required)): $(obj)/kernel-%.gz: %
93 $(call if_changed,gzip) 117 $(call if_changed,gzip)
94 118
95$(obj)/kernel-initrd.gz: $(obj)/ramdisk.image.gz 119$(obj)/kernel-initrd.gz: $(obj)/ramdisk.image.gz
96 cp -f $(obj)/ramdisk.image.gz $@ 120 cp -f $(obj)/ramdisk.image.gz $@
97 121
98$(call src-sec, $(required) $(initrd)): $(obj)/kernel-%.c: $(obj)/kernel-%.gz FORCE 122$(call src-sec, $(required) $(initrd)): $(obj)/kernel-%.c: $(obj)/kernel-%.gz
99 @touch $@ 123 @touch $@
100 124
101$(call obj-sec, $(required) $(initrd)): $(obj)/kernel-%.o: $(obj)/kernel-%.c FORCE 125$(call obj-sec, $(required) $(initrd)): $(obj)/kernel-%.o: $(obj)/kernel-%.c
102 $(call if_changed_dep,bootcc) 126 $(call if_changed_dep,bootcc)
103 $(call cmd,addsection) 127 $(call cmd,addsection)
104 128
105$(obj)/zImage.vmode: obj-boot += $(call obj-sec, $(required)) 129$(obj)/zImage.vmode: obj-boot += $(call obj-sec, $(required))
106$(obj)/zImage.vmode: $(call obj-sec, $(required)) $(obj-boot) FORCE 130$(obj)/zImage.vmode: $(call obj-sec, $(required)) $(obj-boot) $(srctree)/$(src)/zImage.lds
107 $(call cmd,bootld,$(obj-boot)) 131 $(call cmd,bootld,$(obj-boot))
108 132
109$(obj)/zImage.initrd.vmode: obj-boot += $(call obj-sec, $(required) $(initrd)) 133$(obj)/zImage.initrd.vmode: obj-boot += $(call obj-sec, $(required) $(initrd))
110$(obj)/zImage.initrd.vmode: $(call obj-sec, $(required) $(initrd)) $(obj-boot) FORCE 134$(obj)/zImage.initrd.vmode: $(call obj-sec, $(required) $(initrd)) $(obj-boot) $(srctree)/$(src)/zImage.lds
111 $(call cmd,bootld,$(obj-boot)) 135 $(call cmd,bootld,$(obj-boot))
112 136
113$(obj)/zImage: $(obj)/zImage.vmode $(obj)/addnote FORCE 137$(obj)/zImage: $(obj)/zImage.vmode $(obj)/addnote
114 @cp -f $< $@ 138 @cp -f $< $@
115 $(call if_changed,addnote) 139 $(call if_changed,addnote)
116 140
117$(obj)/zImage.initrd: $(obj)/zImage.initrd.vmode $(obj)/addnote FORCE 141$(obj)/zImage.initrd: $(obj)/zImage.initrd.vmode $(obj)/addnote
118 @cp -f $< $@ 142 @cp -f $< $@
119 $(call if_changed,addnote) 143 $(call if_changed,addnote)
120 144
121$(obj)/imagesize.c: vmlinux.strip
122 $(call cmd,imagesize)
123
124install: $(CONFIGURE) $(BOOTIMAGE) 145install: $(CONFIGURE) $(BOOTIMAGE)
125 sh -x $(srctree)/$(src)/install.sh "$(KERNELRELEASE)" vmlinux System.map "$(INSTALL_PATH)" "$(BOOTIMAGE)" 146 sh -x $(srctree)/$(src)/install.sh "$(KERNELRELEASE)" vmlinux System.map "$(INSTALL_PATH)" "$(BOOTIMAGE)"
126 147
diff --git a/arch/ppc64/boot/crt0.S b/arch/ppc64/boot/crt0.S
index 3861e7f9cf19..9cc442263939 100644
--- a/arch/ppc64/boot/crt0.S
+++ b/arch/ppc64/boot/crt0.S
@@ -12,11 +12,40 @@
12#include "ppc_asm.h" 12#include "ppc_asm.h"
13 13
14 .text 14 .text
15 .globl _start 15 .globl _zimage_start
16_start: 16_zimage_start:
17 bl reloc_offset
18
19reloc_offset:
20 mflr r0
21 lis r9,reloc_offset@ha
22 addi r9,r9,reloc_offset@l
23 subf. r0,r9,r0
24 beq clear_caches
25
26reloc_got2:
27 lis r9,__got2_start@ha
28 addi r9,r9,__got2_start@l
29 lis r8,__got2_end@ha
30 addi r8,r8,__got2_end@l
31 subf. r8,r9,r8
32 beq clear_caches
33 srwi. r8,r8,2
34 mtctr r8
35 add r9,r0,r9
36reloc_got2_loop:
37 lwz r8,0(r9)
38 add r8,r8,r0
39 stw r8,0(r9)
40 addi r9,r9,4
41 bdnz reloc_got2_loop
42
43clear_caches:
17 lis r9,_start@h 44 lis r9,_start@h
45 add r9,r0,r9
18 lis r8,_etext@ha 46 lis r8,_etext@ha
19 addi r8,r8,_etext@l 47 addi r8,r8,_etext@l
48 add r8,r0,r8
201: dcbf r0,r9 491: dcbf r0,r9
21 icbi r0,r9 50 icbi r0,r9
22 addi r9,r9,0x20 51 addi r9,r9,0x20
@@ -25,24 +54,6 @@ _start:
25 sync 54 sync
26 isync 55 isync
27 56
28 ## Clear out the BSS as per ANSI C requirements 57 mr r6,r1
29
30 lis r7,_end@ha
31 addi r7,r7,_end@l # r7 = &_end
32 lis r8,__bss_start@ha #
33 addi r8,r8,__bss_start@l # r8 = &_bss_start
34
35 ## Determine how large an area, in number of words, to clear
36
37 subf r7,r8,r7 # r7 = &_end - &_bss_start + 1
38 addi r7,r7,3 # r7 += 3
39 srwi. r7,r7,2 # r7 = size in words.
40 beq 3f # If the size is zero, don't bother
41 addi r8,r8,-4 # r8 -= 4
42 mtctr r7 # SPRN_CTR = number of words to clear
43 li r0,0 # r0 = 0
442: stwu r0,4(r8) # Clear out a word
45 bdnz 2b # Keep clearing until done
463:
47 b start 58 b start
48 59
diff --git a/arch/ppc64/boot/install.sh b/arch/ppc64/boot/install.sh
index cb2d6626b555..eacce9590816 100644
--- a/arch/ppc64/boot/install.sh
+++ b/arch/ppc64/boot/install.sh
@@ -28,7 +28,7 @@ if [ -x /sbin/${CROSS_COMPILE}installkernel ]; then exec /sbin/${CROSS_COMPILE}i
28# Default install 28# Default install
29 29
30# this should work for both the pSeries zImage and the iSeries vmlinux.sm 30# this should work for both the pSeries zImage and the iSeries vmlinux.sm
31image_name=`basename $5` 31image_name=`basename $2`
32 32
33if [ -f $4/$image_name ]; then 33if [ -f $4/$image_name ]; then
34 mv $4/$image_name $4/$image_name.old 34 mv $4/$image_name $4/$image_name.old
diff --git a/arch/ppc64/boot/main.c b/arch/ppc64/boot/main.c
index f7ec19a2d0b0..c1dc876bccab 100644
--- a/arch/ppc64/boot/main.c
+++ b/arch/ppc64/boot/main.c
@@ -17,7 +17,6 @@
17#include "prom.h" 17#include "prom.h"
18#include "zlib.h" 18#include "zlib.h"
19 19
20static void gunzip(void *, int, unsigned char *, int *);
21extern void flush_cache(void *, unsigned long); 20extern void flush_cache(void *, unsigned long);
22 21
23 22
@@ -26,31 +25,26 @@ extern void flush_cache(void *, unsigned long);
26#define RAM_END (512<<20) // Fixme: use OF */ 25#define RAM_END (512<<20) // Fixme: use OF */
27#define ONE_MB 0x100000 26#define ONE_MB 0x100000
28 27
29static char *avail_ram;
30static char *begin_avail, *end_avail;
31static char *avail_high;
32static unsigned int heap_use;
33static unsigned int heap_max;
34
35extern char _start[]; 28extern char _start[];
29extern char __bss_start[];
36extern char _end[]; 30extern char _end[];
37extern char _vmlinux_start[]; 31extern char _vmlinux_start[];
38extern char _vmlinux_end[]; 32extern char _vmlinux_end[];
39extern char _initrd_start[]; 33extern char _initrd_start[];
40extern char _initrd_end[]; 34extern char _initrd_end[];
41extern unsigned long vmlinux_filesize;
42extern unsigned long vmlinux_memsize;
43 35
44struct addr_range { 36struct addr_range {
45 unsigned long addr; 37 unsigned long addr;
46 unsigned long size; 38 unsigned long size;
47 unsigned long memsize; 39 unsigned long memsize;
48}; 40};
49static struct addr_range vmlinux = {0, 0, 0}; 41static struct addr_range vmlinux;
50static struct addr_range vmlinuz = {0, 0, 0}; 42static struct addr_range vmlinuz;
51static struct addr_range initrd = {0, 0, 0}; 43static struct addr_range initrd;
44
45static char scratch[46912]; /* scratch space for gunzip, from zlib_inflate_workspacesize() */
46static char elfheader[256];
52 47
53static char scratch[128<<10]; /* 128kB of scratch space for gunzip */
54 48
55typedef void (*kernel_entry_t)( unsigned long, 49typedef void (*kernel_entry_t)( unsigned long,
56 unsigned long, 50 unsigned long,
@@ -62,6 +56,63 @@ typedef void (*kernel_entry_t)( unsigned long,
62 56
63static unsigned long claim_base; 57static unsigned long claim_base;
64 58
59#define HEAD_CRC 2
60#define EXTRA_FIELD 4
61#define ORIG_NAME 8
62#define COMMENT 0x10
63#define RESERVED 0xe0
64
65static void gunzip(void *dst, int dstlen, unsigned char *src, int *lenp)
66{
67 z_stream s;
68 int r, i, flags;
69
70 /* skip header */
71 i = 10;
72 flags = src[3];
73 if (src[2] != Z_DEFLATED || (flags & RESERVED) != 0) {
74 printf("bad gzipped data\n\r");
75 exit();
76 }
77 if ((flags & EXTRA_FIELD) != 0)
78 i = 12 + src[10] + (src[11] << 8);
79 if ((flags & ORIG_NAME) != 0)
80 while (src[i++] != 0)
81 ;
82 if ((flags & COMMENT) != 0)
83 while (src[i++] != 0)
84 ;
85 if ((flags & HEAD_CRC) != 0)
86 i += 2;
87 if (i >= *lenp) {
88 printf("gunzip: ran out of data in header\n\r");
89 exit();
90 }
91
92 if (zlib_inflate_workspacesize() > sizeof(scratch)) {
93 printf("gunzip needs more mem\n");
94 exit();
95 }
96 memset(&s, 0, sizeof(s));
97 s.workspace = scratch;
98 r = zlib_inflateInit2(&s, -MAX_WBITS);
99 if (r != Z_OK) {
100 printf("inflateInit2 returned %d\n\r", r);
101 exit();
102 }
103 s.next_in = src + i;
104 s.avail_in = *lenp - i;
105 s.next_out = dst;
106 s.avail_out = dstlen;
107 r = zlib_inflate(&s, Z_FULL_FLUSH);
108 if (r != Z_OK && r != Z_STREAM_END) {
109 printf("inflate returned %d msg: %s\n\r", r, s.msg);
110 exit();
111 }
112 *lenp = s.next_out - (unsigned char *) dst;
113 zlib_inflateEnd(&s);
114}
115
65static unsigned long try_claim(unsigned long size) 116static unsigned long try_claim(unsigned long size)
66{ 117{
67 unsigned long addr = 0; 118 unsigned long addr = 0;
@@ -80,13 +131,16 @@ static unsigned long try_claim(unsigned long size)
80 return addr; 131 return addr;
81} 132}
82 133
83void start(unsigned long a1, unsigned long a2, void *promptr) 134void start(unsigned long a1, unsigned long a2, void *promptr, void *sp)
84{ 135{
85 unsigned long i; 136 unsigned long i;
137 int len;
86 kernel_entry_t kernel_entry; 138 kernel_entry_t kernel_entry;
87 Elf64_Ehdr *elf64; 139 Elf64_Ehdr *elf64;
88 Elf64_Phdr *elf64ph; 140 Elf64_Phdr *elf64ph;
89 141
142 memset(__bss_start, 0, _end - __bss_start);
143
90 prom = (int (*)(void *)) promptr; 144 prom = (int (*)(void *)) promptr;
91 chosen_handle = finddevice("/chosen"); 145 chosen_handle = finddevice("/chosen");
92 if (chosen_handle == (void *) -1) 146 if (chosen_handle == (void *) -1)
@@ -97,7 +151,7 @@ void start(unsigned long a1, unsigned long a2, void *promptr)
97 if (getprop(chosen_handle, "stdin", &stdin, sizeof(stdin)) != 4) 151 if (getprop(chosen_handle, "stdin", &stdin, sizeof(stdin)) != 4)
98 exit(); 152 exit();
99 153
100 printf("\n\rzImage starting: loaded at 0x%lx\n\r", (unsigned long) _start); 154 printf("\n\rzImage starting: loaded at 0x%p (sp: 0x%p)\n\r", _start, sp);
101 155
102 /* 156 /*
103 * The first available claim_base must be above the end of the 157 * The first available claim_base must be above the end of the
@@ -118,25 +172,45 @@ void start(unsigned long a1, unsigned long a2, void *promptr)
118 claim_base = PROG_START; 172 claim_base = PROG_START;
119#endif 173#endif
120 174
121 /* 175 vmlinuz.addr = (unsigned long)_vmlinux_start;
122 * Now we try to claim some memory for the kernel itself 176 vmlinuz.size = (unsigned long)(_vmlinux_end - _vmlinux_start);
123 * our "vmlinux_memsize" is the memory footprint in RAM, _HOWEVER_, what 177
124 * our Makefile stuffs in is an image containing all sort of junk including 178 /* gunzip the ELF header of the kernel */
125 * an ELF header. We need to do some calculations here to find the right 179 if (*(unsigned short *)vmlinuz.addr == 0x1f8b) {
126 * size... In practice we add 1Mb, that is enough, but we should really 180 len = vmlinuz.size;
127 * consider fixing the Makefile to put a _raw_ kernel in there ! 181 gunzip(elfheader, sizeof(elfheader),
128 */ 182 (unsigned char *)vmlinuz.addr, &len);
129 vmlinux_memsize += ONE_MB; 183 } else
130 printf("Allocating 0x%lx bytes for kernel ...\n\r", vmlinux_memsize); 184 memcpy(elfheader, (const void *)vmlinuz.addr, sizeof(elfheader));
131 vmlinux.addr = try_claim(vmlinux_memsize); 185
186 elf64 = (Elf64_Ehdr *)elfheader;
187 if ( elf64->e_ident[EI_MAG0] != ELFMAG0 ||
188 elf64->e_ident[EI_MAG1] != ELFMAG1 ||
189 elf64->e_ident[EI_MAG2] != ELFMAG2 ||
190 elf64->e_ident[EI_MAG3] != ELFMAG3 ||
191 elf64->e_ident[EI_CLASS] != ELFCLASS64 ||
192 elf64->e_ident[EI_DATA] != ELFDATA2MSB ||
193 elf64->e_type != ET_EXEC ||
194 elf64->e_machine != EM_PPC64 )
195 {
196 printf("Error: not a valid PPC64 ELF file!\n\r");
197 exit();
198 }
199
200 elf64ph = (Elf64_Phdr *)((unsigned long)elf64 +
201 (unsigned long)elf64->e_phoff);
202 for(i=0; i < (unsigned int)elf64->e_phnum ;i++,elf64ph++) {
203 if (elf64ph->p_type == PT_LOAD && elf64ph->p_offset != 0)
204 break;
205 }
206 vmlinux.size = (unsigned long)elf64ph->p_filesz;
207 vmlinux.memsize = (unsigned long)elf64ph->p_memsz;
208 printf("Allocating 0x%lx bytes for kernel ...\n\r", vmlinux.memsize);
209 vmlinux.addr = try_claim(vmlinux.memsize);
132 if (vmlinux.addr == 0) { 210 if (vmlinux.addr == 0) {
133 printf("Can't allocate memory for kernel image !\n\r"); 211 printf("Can't allocate memory for kernel image !\n\r");
134 exit(); 212 exit();
135 } 213 }
136 vmlinuz.addr = (unsigned long)_vmlinux_start;
137 vmlinuz.size = (unsigned long)(_vmlinux_end - _vmlinux_start);
138 vmlinux.size = PAGE_ALIGN(vmlinux_filesize);
139 vmlinux.memsize = vmlinux_memsize;
140 214
141 /* 215 /*
142 * Now we try to claim memory for the initrd (and copy it there) 216 * Now we try to claim memory for the initrd (and copy it there)
@@ -160,49 +234,22 @@ void start(unsigned long a1, unsigned long a2, void *promptr)
160 234
161 /* Eventually gunzip the kernel */ 235 /* Eventually gunzip the kernel */
162 if (*(unsigned short *)vmlinuz.addr == 0x1f8b) { 236 if (*(unsigned short *)vmlinuz.addr == 0x1f8b) {
163 int len;
164 avail_ram = scratch;
165 begin_avail = avail_high = avail_ram;
166 end_avail = scratch + sizeof(scratch);
167 printf("gunzipping (0x%lx <- 0x%lx:0x%0lx)...", 237 printf("gunzipping (0x%lx <- 0x%lx:0x%0lx)...",
168 vmlinux.addr, vmlinuz.addr, vmlinuz.addr+vmlinuz.size); 238 vmlinux.addr, vmlinuz.addr, vmlinuz.addr+vmlinuz.size);
169 len = vmlinuz.size; 239 len = vmlinuz.size;
170 gunzip((void *)vmlinux.addr, vmlinux.size, 240 gunzip((void *)vmlinux.addr, vmlinux.memsize,
171 (unsigned char *)vmlinuz.addr, &len); 241 (unsigned char *)vmlinuz.addr, &len);
172 printf("done 0x%lx bytes\n\r", len); 242 printf("done 0x%lx bytes\n\r", len);
173 printf("0x%x bytes of heap consumed, max in use 0x%x\n\r",
174 (unsigned)(avail_high - begin_avail), heap_max);
175 } else { 243 } else {
176 memmove((void *)vmlinux.addr,(void *)vmlinuz.addr,vmlinuz.size); 244 memmove((void *)vmlinux.addr,(void *)vmlinuz.addr,vmlinuz.size);
177 } 245 }
178 246
179 /* Skip over the ELF header */ 247 /* Skip over the ELF header */
180 elf64 = (Elf64_Ehdr *)vmlinux.addr;
181 if ( elf64->e_ident[EI_MAG0] != ELFMAG0 ||
182 elf64->e_ident[EI_MAG1] != ELFMAG1 ||
183 elf64->e_ident[EI_MAG2] != ELFMAG2 ||
184 elf64->e_ident[EI_MAG3] != ELFMAG3 ||
185 elf64->e_ident[EI_CLASS] != ELFCLASS64 ||
186 elf64->e_ident[EI_DATA] != ELFDATA2MSB ||
187 elf64->e_type != ET_EXEC ||
188 elf64->e_machine != EM_PPC64 )
189 {
190 printf("Error: not a valid PPC64 ELF file!\n\r");
191 exit();
192 }
193
194 elf64ph = (Elf64_Phdr *)((unsigned long)elf64 +
195 (unsigned long)elf64->e_phoff);
196 for(i=0; i < (unsigned int)elf64->e_phnum ;i++,elf64ph++) {
197 if (elf64ph->p_type == PT_LOAD && elf64ph->p_offset != 0)
198 break;
199 }
200#ifdef DEBUG 248#ifdef DEBUG
201 printf("... skipping 0x%lx bytes of ELF header\n\r", 249 printf("... skipping 0x%lx bytes of ELF header\n\r",
202 (unsigned long)elf64ph->p_offset); 250 (unsigned long)elf64ph->p_offset);
203#endif 251#endif
204 vmlinux.addr += (unsigned long)elf64ph->p_offset; 252 vmlinux.addr += (unsigned long)elf64ph->p_offset;
205 vmlinux.size -= (unsigned long)elf64ph->p_offset;
206 253
207 flush_cache((void *)vmlinux.addr, vmlinux.size); 254 flush_cache((void *)vmlinux.addr, vmlinux.size);
208 255
@@ -225,108 +272,3 @@ void start(unsigned long a1, unsigned long a2, void *promptr)
225 exit(); 272 exit();
226} 273}
227 274
228struct memchunk {
229 unsigned int size;
230 unsigned int pad;
231 struct memchunk *next;
232};
233
234static struct memchunk *freechunks;
235
236void *zalloc(void *x, unsigned items, unsigned size)
237{
238 void *p;
239 struct memchunk **mpp, *mp;
240
241 size *= items;
242 size = _ALIGN(size, sizeof(struct memchunk));
243 heap_use += size;
244 if (heap_use > heap_max)
245 heap_max = heap_use;
246 for (mpp = &freechunks; (mp = *mpp) != 0; mpp = &mp->next) {
247 if (mp->size == size) {
248 *mpp = mp->next;
249 return mp;
250 }
251 }
252 p = avail_ram;
253 avail_ram += size;
254 if (avail_ram > avail_high)
255 avail_high = avail_ram;
256 if (avail_ram > end_avail) {
257 printf("oops... out of memory\n\r");
258 pause();
259 }
260 return p;
261}
262
263void zfree(void *x, void *addr, unsigned nb)
264{
265 struct memchunk *mp = addr;
266
267 nb = _ALIGN(nb, sizeof(struct memchunk));
268 heap_use -= nb;
269 if (avail_ram == addr + nb) {
270 avail_ram = addr;
271 return;
272 }
273 mp->size = nb;
274 mp->next = freechunks;
275 freechunks = mp;
276}
277
278#define HEAD_CRC 2
279#define EXTRA_FIELD 4
280#define ORIG_NAME 8
281#define COMMENT 0x10
282#define RESERVED 0xe0
283
284#define DEFLATED 8
285
286static void gunzip(void *dst, int dstlen, unsigned char *src, int *lenp)
287{
288 z_stream s;
289 int r, i, flags;
290
291 /* skip header */
292 i = 10;
293 flags = src[3];
294 if (src[2] != DEFLATED || (flags & RESERVED) != 0) {
295 printf("bad gzipped data\n\r");
296 exit();
297 }
298 if ((flags & EXTRA_FIELD) != 0)
299 i = 12 + src[10] + (src[11] << 8);
300 if ((flags & ORIG_NAME) != 0)
301 while (src[i++] != 0)
302 ;
303 if ((flags & COMMENT) != 0)
304 while (src[i++] != 0)
305 ;
306 if ((flags & HEAD_CRC) != 0)
307 i += 2;
308 if (i >= *lenp) {
309 printf("gunzip: ran out of data in header\n\r");
310 exit();
311 }
312
313 s.zalloc = zalloc;
314 s.zfree = zfree;
315 r = inflateInit2(&s, -MAX_WBITS);
316 if (r != Z_OK) {
317 printf("inflateInit2 returned %d\n\r", r);
318 exit();
319 }
320 s.next_in = src + i;
321 s.avail_in = *lenp - i;
322 s.next_out = dst;
323 s.avail_out = dstlen;
324 r = inflate(&s, Z_FINISH);
325 if (r != Z_OK && r != Z_STREAM_END) {
326 printf("inflate returned %d msg: %s\n\r", r, s.msg);
327 exit();
328 }
329 *lenp = s.next_out - (unsigned char *) dst;
330 inflateEnd(&s);
331}
332
diff --git a/arch/ppc64/boot/string.S b/arch/ppc64/boot/string.S
index 7ade87ae7718..b1eeaed7db17 100644
--- a/arch/ppc64/boot/string.S
+++ b/arch/ppc64/boot/string.S
@@ -104,7 +104,7 @@ memmove:
104 104
105 .globl memcpy 105 .globl memcpy
106memcpy: 106memcpy:
107 rlwinm. r7,r5,32-3,3,31 /* r0 = r5 >> 3 */ 107 rlwinm. r7,r5,32-3,3,31 /* r7 = r5 >> 3 */
108 addi r6,r3,-4 108 addi r6,r3,-4
109 addi r4,r4,-4 109 addi r4,r4,-4
110 beq 2f /* if less than 8 bytes to do */ 110 beq 2f /* if less than 8 bytes to do */
@@ -146,7 +146,7 @@ memcpy:
146 146
147 .globl backwards_memcpy 147 .globl backwards_memcpy
148backwards_memcpy: 148backwards_memcpy:
149 rlwinm. r7,r5,32-3,3,31 /* r0 = r5 >> 3 */ 149 rlwinm. r7,r5,32-3,3,31 /* r7 = r5 >> 3 */
150 add r6,r3,r5 150 add r6,r3,r5
151 add r4,r4,r5 151 add r4,r4,r5
152 beq 2f 152 beq 2f
diff --git a/arch/ppc64/boot/string.h b/arch/ppc64/boot/string.h
index 9289258bcbd6..9fdff1cc0d70 100644
--- a/arch/ppc64/boot/string.h
+++ b/arch/ppc64/boot/string.h
@@ -1,5 +1,6 @@
1#ifndef _PPC_BOOT_STRING_H_ 1#ifndef _PPC_BOOT_STRING_H_
2#define _PPC_BOOT_STRING_H_ 2#define _PPC_BOOT_STRING_H_
3#include <stddef.h>
3 4
4extern char *strcpy(char *dest, const char *src); 5extern char *strcpy(char *dest, const char *src);
5extern char *strncpy(char *dest, const char *src, size_t n); 6extern char *strncpy(char *dest, const char *src, size_t n);
diff --git a/arch/ppc64/boot/zImage.lds b/arch/ppc64/boot/zImage.lds
index 8fe5e7071f54..4b6bb3ffe3dc 100644
--- a/arch/ppc64/boot/zImage.lds
+++ b/arch/ppc64/boot/zImage.lds
@@ -1,62 +1,24 @@
1OUTPUT_ARCH(powerpc:common) 1OUTPUT_ARCH(powerpc:common)
2SEARCH_DIR(/lib); SEARCH_DIR(/usr/lib); SEARCH_DIR(/usr/local/lib); SEARCH_DIR(/usr/local/powerpc-any-elf/lib); 2ENTRY(_zimage_start)
3/* Do we need any of these for elf?
4 __DYNAMIC = 0; */
5SECTIONS 3SECTIONS
6{ 4{
7 /* Read-only sections, merged into text segment: */ 5 . = (4*1024*1024);
8 . = + SIZEOF_HEADERS; 6 _start = .;
9 .interp : { *(.interp) }
10 .hash : { *(.hash) }
11 .dynsym : { *(.dynsym) }
12 .dynstr : { *(.dynstr) }
13 .rel.text : { *(.rel.text) }
14 .rela.text : { *(.rela.text) }
15 .rel.data : { *(.rel.data) }
16 .rela.data : { *(.rela.data) }
17 .rel.rodata : { *(.rel.rodata) }
18 .rela.rodata : { *(.rela.rodata) }
19 .rel.got : { *(.rel.got) }
20 .rela.got : { *(.rela.got) }
21 .rel.ctors : { *(.rel.ctors) }
22 .rela.ctors : { *(.rela.ctors) }
23 .rel.dtors : { *(.rel.dtors) }
24 .rela.dtors : { *(.rela.dtors) }
25 .rel.bss : { *(.rel.bss) }
26 .rela.bss : { *(.rela.bss) }
27 .rel.plt : { *(.rel.plt) }
28 .rela.plt : { *(.rela.plt) }
29 .plt : { *(.plt) }
30 .text : 7 .text :
31 { 8 {
32 *(.text) 9 *(.text)
33 *(.fixup) 10 *(.fixup)
34 *(.got1)
35 } 11 }
36 . = ALIGN(4096);
37 _etext = .; 12 _etext = .;
38 PROVIDE (etext = .);
39 .rodata :
40 {
41 *(.rodata)
42 *(.rodata1)
43 }
44 .kstrtab : { *(.kstrtab) }
45 __vermagic : { *(__vermagic) }
46 .fini : { *(.fini) } =0
47 .ctors : { *(.ctors) }
48 .dtors : { *(.dtors) }
49 /* Read-write section, merged into data segment: */
50 . = ALIGN(4096); 13 . = ALIGN(4096);
51 .data : 14 .data :
52 { 15 {
53 *(.data) 16 *(.rodata*)
54 *(.data1) 17 *(.data*)
55 *(.sdata) 18 *(.sdata*)
56 *(.sdata2) 19 __got2_start = .;
57 *(.got.plt) *(.got) 20 *(.got2)
58 *(.dynamic) 21 __got2_end = .;
59 CONSTRUCTORS
60 } 22 }
61 23
62 . = ALIGN(4096); 24 . = ALIGN(4096);
@@ -71,20 +33,14 @@ SECTIONS
71 33
72 . = ALIGN(4096); 34 . = ALIGN(4096);
73 _edata = .; 35 _edata = .;
74 PROVIDE (edata = .);
75
76 .fixup : { *(.fixup) }
77 36
78 . = ALIGN(4096); 37 . = ALIGN(4096);
79 __bss_start = .; 38 __bss_start = .;
80 .bss : 39 .bss :
81 { 40 {
82 *(.sbss) *(.scommon) 41 *(.sbss)
83 *(.dynbss)
84 *(.bss) 42 *(.bss)
85 *(COMMON)
86 } 43 }
87 . = ALIGN(4096); 44 . = ALIGN(4096);
88 _end = . ; 45 _end = . ;
89 PROVIDE (end = .);
90} 46}
diff --git a/arch/ppc64/boot/zlib.c b/arch/ppc64/boot/zlib.c
deleted file mode 100644
index 0d910cd2079d..000000000000
--- a/arch/ppc64/boot/zlib.c
+++ /dev/null
@@ -1,2195 +0,0 @@
1/*
2 * This file is derived from various .h and .c files from the zlib-0.95
3 * distribution by Jean-loup Gailly and Mark Adler, with some additions
4 * by Paul Mackerras to aid in implementing Deflate compression and
5 * decompression for PPP packets. See zlib.h for conditions of
6 * distribution and use.
7 *
8 * Changes that have been made include:
9 * - changed functions not used outside this file to "local"
10 * - added minCompression parameter to deflateInit2
11 * - added Z_PACKET_FLUSH (see zlib.h for details)
12 * - added inflateIncomp
13 *
14 Copyright (C) 1995 Jean-loup Gailly and Mark Adler
15
16 This software is provided 'as-is', without any express or implied
17 warranty. In no event will the authors be held liable for any damages
18 arising from the use of this software.
19
20 Permission is granted to anyone to use this software for any purpose,
21 including commercial applications, and to alter it and redistribute it
22 freely, subject to the following restrictions:
23
24 1. The origin of this software must not be misrepresented; you must not
25 claim that you wrote the original software. If you use this software
26 in a product, an acknowledgment in the product documentation would be
27 appreciated but is not required.
28 2. Altered source versions must be plainly marked as such, and must not be
29 misrepresented as being the original software.
30 3. This notice may not be removed or altered from any source distribution.
31
32 Jean-loup Gailly Mark Adler
33 gzip@prep.ai.mit.edu madler@alumni.caltech.edu
34
35 *
36 *
37 */
38
39/*+++++*/
40/* zutil.h -- internal interface and configuration of the compression library
41 * Copyright (C) 1995 Jean-loup Gailly.
42 * For conditions of distribution and use, see copyright notice in zlib.h
43 */
44
45/* WARNING: this file should *not* be used by applications. It is
46 part of the implementation of the compression library and is
47 subject to change. Applications should only use zlib.h.
48 */
49
50/* From: zutil.h,v 1.9 1995/05/03 17:27:12 jloup Exp */
51
52#define _Z_UTIL_H
53
54#include "zlib.h"
55
56#ifndef local
57# define local static
58#endif
59/* compile with -Dlocal if your debugger can't find static symbols */
60
61#define FAR
62
63typedef unsigned char uch;
64typedef uch FAR uchf;
65typedef unsigned short ush;
66typedef ush FAR ushf;
67typedef unsigned long ulg;
68
69extern char *z_errmsg[]; /* indexed by 1-zlib_error */
70
71#define ERR_RETURN(strm,err) return (strm->msg=z_errmsg[1-err], err)
72/* To be used only when the state is known to be valid */
73
74#ifndef NULL
75#define NULL ((void *) 0)
76#endif
77
78 /* common constants */
79
80#define DEFLATED 8
81
82#ifndef DEF_WBITS
83# define DEF_WBITS MAX_WBITS
84#endif
85/* default windowBits for decompression. MAX_WBITS is for compression only */
86
87#if MAX_MEM_LEVEL >= 8
88# define DEF_MEM_LEVEL 8
89#else
90# define DEF_MEM_LEVEL MAX_MEM_LEVEL
91#endif
92/* default memLevel */
93
94#define STORED_BLOCK 0
95#define STATIC_TREES 1
96#define DYN_TREES 2
97/* The three kinds of block type */
98
99#define MIN_MATCH 3
100#define MAX_MATCH 258
101/* The minimum and maximum match lengths */
102
103 /* functions */
104
105extern void *memcpy(void *, const void *, unsigned long);
106#define zmemcpy memcpy
107
108/* Diagnostic functions */
109#ifdef DEBUG_ZLIB
110# include "stdio.h"
111# ifndef verbose
112# define verbose 0
113# endif
114# define Assert(cond,msg) {if(!(cond)) z_error(msg);}
115# define Trace(x) fprintf x
116# define Tracev(x) {if (verbose) fprintf x ;}
117# define Tracevv(x) {if (verbose>1) fprintf x ;}
118# define Tracec(c,x) {if (verbose && (c)) fprintf x ;}
119# define Tracecv(c,x) {if (verbose>1 && (c)) fprintf x ;}
120#else
121# define Assert(cond,msg)
122# define Trace(x)
123# define Tracev(x)
124# define Tracevv(x)
125# define Tracec(c,x)
126# define Tracecv(c,x)
127#endif
128
129
130typedef uLong (*check_func) OF((uLong check, Bytef *buf, uInt len));
131
132/* voidpf zcalloc OF((voidpf opaque, unsigned items, unsigned size)); */
133/* void zcfree OF((voidpf opaque, voidpf ptr)); */
134
135#define ZALLOC(strm, items, size) \
136 (*((strm)->zalloc))((strm)->opaque, (items), (size))
137#define ZFREE(strm, addr, size) \
138 (*((strm)->zfree))((strm)->opaque, (voidpf)(addr), (size))
139#define TRY_FREE(s, p, n) {if (p) ZFREE(s, p, n);}
140
141/* deflate.h -- internal compression state
142 * Copyright (C) 1995 Jean-loup Gailly
143 * For conditions of distribution and use, see copyright notice in zlib.h
144 */
145
146/* WARNING: this file should *not* be used by applications. It is
147 part of the implementation of the compression library and is
148 subject to change. Applications should only use zlib.h.
149 */
150
151/*+++++*/
152/* infblock.h -- header to use infblock.c
153 * Copyright (C) 1995 Mark Adler
154 * For conditions of distribution and use, see copyright notice in zlib.h
155 */
156
157/* WARNING: this file should *not* be used by applications. It is
158 part of the implementation of the compression library and is
159 subject to change. Applications should only use zlib.h.
160 */
161
162struct inflate_blocks_state;
163typedef struct inflate_blocks_state FAR inflate_blocks_statef;
164
165local inflate_blocks_statef * inflate_blocks_new OF((
166 z_stream *z,
167 check_func c, /* check function */
168 uInt w)); /* window size */
169
170local int inflate_blocks OF((
171 inflate_blocks_statef *,
172 z_stream *,
173 int)); /* initial return code */
174
175local void inflate_blocks_reset OF((
176 inflate_blocks_statef *,
177 z_stream *,
178 uLongf *)); /* check value on output */
179
180local int inflate_blocks_free OF((
181 inflate_blocks_statef *,
182 z_stream *,
183 uLongf *)); /* check value on output */
184
185local int inflate_addhistory OF((
186 inflate_blocks_statef *,
187 z_stream *));
188
189local int inflate_packet_flush OF((
190 inflate_blocks_statef *));
191
192/*+++++*/
193/* inftrees.h -- header to use inftrees.c
194 * Copyright (C) 1995 Mark Adler
195 * For conditions of distribution and use, see copyright notice in zlib.h
196 */
197
198/* WARNING: this file should *not* be used by applications. It is
199 part of the implementation of the compression library and is
200 subject to change. Applications should only use zlib.h.
201 */
202
203/* Huffman code lookup table entry--this entry is four bytes for machines
204 that have 16-bit pointers (e.g. PC's in the small or medium model). */
205
206typedef struct inflate_huft_s FAR inflate_huft;
207
208struct inflate_huft_s {
209 union {
210 struct {
211 Byte Exop; /* number of extra bits or operation */
212 Byte Bits; /* number of bits in this code or subcode */
213 } what;
214 uInt Nalloc; /* number of these allocated here */
215 Bytef *pad; /* pad structure to a power of 2 (4 bytes for */
216 } word; /* 16-bit, 8 bytes for 32-bit machines) */
217 union {
218 uInt Base; /* literal, length base, or distance base */
219 inflate_huft *Next; /* pointer to next level of table */
220 } more;
221};
222
223#ifdef DEBUG_ZLIB
224 local uInt inflate_hufts;
225#endif
226
227local int inflate_trees_bits OF((
228 uIntf *, /* 19 code lengths */
229 uIntf *, /* bits tree desired/actual depth */
230 inflate_huft * FAR *, /* bits tree result */
231 z_stream *)); /* for zalloc, zfree functions */
232
233local int inflate_trees_dynamic OF((
234 uInt, /* number of literal/length codes */
235 uInt, /* number of distance codes */
236 uIntf *, /* that many (total) code lengths */
237 uIntf *, /* literal desired/actual bit depth */
238 uIntf *, /* distance desired/actual bit depth */
239 inflate_huft * FAR *, /* literal/length tree result */
240 inflate_huft * FAR *, /* distance tree result */
241 z_stream *)); /* for zalloc, zfree functions */
242
243local int inflate_trees_fixed OF((
244 uIntf *, /* literal desired/actual bit depth */
245 uIntf *, /* distance desired/actual bit depth */
246 inflate_huft * FAR *, /* literal/length tree result */
247 inflate_huft * FAR *)); /* distance tree result */
248
249local int inflate_trees_free OF((
250 inflate_huft *, /* tables to free */
251 z_stream *)); /* for zfree function */
252
253
254/*+++++*/
255/* infcodes.h -- header to use infcodes.c
256 * Copyright (C) 1995 Mark Adler
257 * For conditions of distribution and use, see copyright notice in zlib.h
258 */
259
260/* WARNING: this file should *not* be used by applications. It is
261 part of the implementation of the compression library and is
262 subject to change. Applications should only use zlib.h.
263 */
264
265struct inflate_codes_state;
266typedef struct inflate_codes_state FAR inflate_codes_statef;
267
268local inflate_codes_statef *inflate_codes_new OF((
269 uInt, uInt,
270 inflate_huft *, inflate_huft *,
271 z_stream *));
272
273local int inflate_codes OF((
274 inflate_blocks_statef *,
275 z_stream *,
276 int));
277
278local void inflate_codes_free OF((
279 inflate_codes_statef *,
280 z_stream *));
281
282
283/*+++++*/
284/* inflate.c -- zlib interface to inflate modules
285 * Copyright (C) 1995 Mark Adler
286 * For conditions of distribution and use, see copyright notice in zlib.h
287 */
288
289/* inflate private state */
290struct internal_state {
291
292 /* mode */
293 enum {
294 METHOD, /* waiting for method byte */
295 FLAG, /* waiting for flag byte */
296 BLOCKS, /* decompressing blocks */
297 CHECK4, /* four check bytes to go */
298 CHECK3, /* three check bytes to go */
299 CHECK2, /* two check bytes to go */
300 CHECK1, /* one check byte to go */
301 DONE, /* finished check, done */
302 BAD} /* got an error--stay here */
303 mode; /* current inflate mode */
304
305 /* mode dependent information */
306 union {
307 uInt method; /* if FLAGS, method byte */
308 struct {
309 uLong was; /* computed check value */
310 uLong need; /* stream check value */
311 } check; /* if CHECK, check values to compare */
312 uInt marker; /* if BAD, inflateSync's marker bytes count */
313 } sub; /* submode */
314
315 /* mode independent information */
316 int nowrap; /* flag for no wrapper */
317 uInt wbits; /* log2(window size) (8..15, defaults to 15) */
318 inflate_blocks_statef
319 *blocks; /* current inflate_blocks state */
320
321};
322
323
324int inflateReset(
325 z_stream *z
326)
327{
328 uLong c;
329
330 if (z == Z_NULL || z->state == Z_NULL)
331 return Z_STREAM_ERROR;
332 z->total_in = z->total_out = 0;
333 z->msg = Z_NULL;
334 z->state->mode = z->state->nowrap ? BLOCKS : METHOD;
335 inflate_blocks_reset(z->state->blocks, z, &c);
336 Trace((stderr, "inflate: reset\n"));
337 return Z_OK;
338}
339
340
341int inflateEnd(
342 z_stream *z
343)
344{
345 uLong c;
346
347 if (z == Z_NULL || z->state == Z_NULL || z->zfree == Z_NULL)
348 return Z_STREAM_ERROR;
349 if (z->state->blocks != Z_NULL)
350 inflate_blocks_free(z->state->blocks, z, &c);
351 ZFREE(z, z->state, sizeof(struct internal_state));
352 z->state = Z_NULL;
353 Trace((stderr, "inflate: end\n"));
354 return Z_OK;
355}
356
357
358int inflateInit2(
359 z_stream *z,
360 int w
361)
362{
363 /* initialize state */
364 if (z == Z_NULL)
365 return Z_STREAM_ERROR;
366/* if (z->zalloc == Z_NULL) z->zalloc = zcalloc; */
367/* if (z->zfree == Z_NULL) z->zfree = zcfree; */
368 if ((z->state = (struct internal_state FAR *)
369 ZALLOC(z,1,sizeof(struct internal_state))) == Z_NULL)
370 return Z_MEM_ERROR;
371 z->state->blocks = Z_NULL;
372
373 /* handle undocumented nowrap option (no zlib header or check) */
374 z->state->nowrap = 0;
375 if (w < 0)
376 {
377 w = - w;
378 z->state->nowrap = 1;
379 }
380
381 /* set window size */
382 if (w < 8 || w > 15)
383 {
384 inflateEnd(z);
385 return Z_STREAM_ERROR;
386 }
387 z->state->wbits = (uInt)w;
388
389 /* create inflate_blocks state */
390 if ((z->state->blocks =
391 inflate_blocks_new(z, z->state->nowrap ? Z_NULL : adler32, 1 << w))
392 == Z_NULL)
393 {
394 inflateEnd(z);
395 return Z_MEM_ERROR;
396 }
397 Trace((stderr, "inflate: allocated\n"));
398
399 /* reset state */
400 inflateReset(z);
401 return Z_OK;
402}
403
404
405int inflateInit(
406 z_stream *z
407)
408{
409 return inflateInit2(z, DEF_WBITS);
410}
411
412
413#define NEEDBYTE {if(z->avail_in==0)goto empty;r=Z_OK;}
414#define NEXTBYTE (z->avail_in--,z->total_in++,*z->next_in++)
415
416int inflate(
417 z_stream *z,
418 int f
419)
420{
421 int r;
422 uInt b;
423
424 if (z == Z_NULL || z->next_in == Z_NULL)
425 return Z_STREAM_ERROR;
426 r = Z_BUF_ERROR;
427 while (1) switch (z->state->mode)
428 {
429 case METHOD:
430 NEEDBYTE
431 if (((z->state->sub.method = NEXTBYTE) & 0xf) != DEFLATED)
432 {
433 z->state->mode = BAD;
434 z->msg = "unknown compression method";
435 z->state->sub.marker = 5; /* can't try inflateSync */
436 break;
437 }
438 if ((z->state->sub.method >> 4) + 8 > z->state->wbits)
439 {
440 z->state->mode = BAD;
441 z->msg = "invalid window size";
442 z->state->sub.marker = 5; /* can't try inflateSync */
443 break;
444 }
445 z->state->mode = FLAG;
446 case FLAG:
447 NEEDBYTE
448 if ((b = NEXTBYTE) & 0x20)
449 {
450 z->state->mode = BAD;
451 z->msg = "invalid reserved bit";
452 z->state->sub.marker = 5; /* can't try inflateSync */
453 break;
454 }
455 if (((z->state->sub.method << 8) + b) % 31)
456 {
457 z->state->mode = BAD;
458 z->msg = "incorrect header check";
459 z->state->sub.marker = 5; /* can't try inflateSync */
460 break;
461 }
462 Trace((stderr, "inflate: zlib header ok\n"));
463 z->state->mode = BLOCKS;
464 case BLOCKS:
465 r = inflate_blocks(z->state->blocks, z, r);
466 if (f == Z_PACKET_FLUSH && z->avail_in == 0 && z->avail_out != 0)
467 r = inflate_packet_flush(z->state->blocks);
468 if (r == Z_DATA_ERROR)
469 {
470 z->state->mode = BAD;
471 z->state->sub.marker = 0; /* can try inflateSync */
472 break;
473 }
474 if (r != Z_STREAM_END)
475 return r;
476 r = Z_OK;
477 inflate_blocks_reset(z->state->blocks, z, &z->state->sub.check.was);
478 if (z->state->nowrap)
479 {
480 z->state->mode = DONE;
481 break;
482 }
483 z->state->mode = CHECK4;
484 case CHECK4:
485 NEEDBYTE
486 z->state->sub.check.need = (uLong)NEXTBYTE << 24;
487 z->state->mode = CHECK3;
488 case CHECK3:
489 NEEDBYTE
490 z->state->sub.check.need += (uLong)NEXTBYTE << 16;
491 z->state->mode = CHECK2;
492 case CHECK2:
493 NEEDBYTE
494 z->state->sub.check.need += (uLong)NEXTBYTE << 8;
495 z->state->mode = CHECK1;
496 case CHECK1:
497 NEEDBYTE
498 z->state->sub.check.need += (uLong)NEXTBYTE;
499
500 if (z->state->sub.check.was != z->state->sub.check.need)
501 {
502 z->state->mode = BAD;
503 z->msg = "incorrect data check";
504 z->state->sub.marker = 5; /* can't try inflateSync */
505 break;
506 }
507 Trace((stderr, "inflate: zlib check ok\n"));
508 z->state->mode = DONE;
509 case DONE:
510 return Z_STREAM_END;
511 case BAD:
512 return Z_DATA_ERROR;
513 default:
514 return Z_STREAM_ERROR;
515 }
516
517 empty:
518 if (f != Z_PACKET_FLUSH)
519 return r;
520 z->state->mode = BAD;
521 z->state->sub.marker = 0; /* can try inflateSync */
522 return Z_DATA_ERROR;
523}
524
525/*
526 * This subroutine adds the data at next_in/avail_in to the output history
527 * without performing any output. The output buffer must be "caught up";
528 * i.e. no pending output (hence s->read equals s->write), and the state must
529 * be BLOCKS (i.e. we should be willing to see the start of a series of
530 * BLOCKS). On exit, the output will also be caught up, and the checksum
531 * will have been updated if need be.
532 */
533
534int inflateIncomp(
535 z_stream *z
536)
537{
538 if (z->state->mode != BLOCKS)
539 return Z_DATA_ERROR;
540 return inflate_addhistory(z->state->blocks, z);
541}
542
543
544int inflateSync(
545 z_stream *z
546)
547{
548 uInt n; /* number of bytes to look at */
549 Bytef *p; /* pointer to bytes */
550 uInt m; /* number of marker bytes found in a row */
551 uLong r, w; /* temporaries to save total_in and total_out */
552
553 /* set up */
554 if (z == Z_NULL || z->state == Z_NULL)
555 return Z_STREAM_ERROR;
556 if (z->state->mode != BAD)
557 {
558 z->state->mode = BAD;
559 z->state->sub.marker = 0;
560 }
561 if ((n = z->avail_in) == 0)
562 return Z_BUF_ERROR;
563 p = z->next_in;
564 m = z->state->sub.marker;
565
566 /* search */
567 while (n && m < 4)
568 {
569 if (*p == (Byte)(m < 2 ? 0 : 0xff))
570 m++;
571 else if (*p)
572 m = 0;
573 else
574 m = 4 - m;
575 p++, n--;
576 }
577
578 /* restore */
579 z->total_in += p - z->next_in;
580 z->next_in = p;
581 z->avail_in = n;
582 z->state->sub.marker = m;
583
584 /* return no joy or set up to restart on a new block */
585 if (m != 4)
586 return Z_DATA_ERROR;
587 r = z->total_in; w = z->total_out;
588 inflateReset(z);
589 z->total_in = r; z->total_out = w;
590 z->state->mode = BLOCKS;
591 return Z_OK;
592}
593
594#undef NEEDBYTE
595#undef NEXTBYTE
596
597/*+++++*/
598/* infutil.h -- types and macros common to blocks and codes
599 * Copyright (C) 1995 Mark Adler
600 * For conditions of distribution and use, see copyright notice in zlib.h
601 */
602
603/* WARNING: this file should *not* be used by applications. It is
604 part of the implementation of the compression library and is
605 subject to change. Applications should only use zlib.h.
606 */
607
608/* inflate blocks semi-private state */
609struct inflate_blocks_state {
610
611 /* mode */
612 enum {
613 TYPE, /* get type bits (3, including end bit) */
614 LENS, /* get lengths for stored */
615 STORED, /* processing stored block */
616 TABLE, /* get table lengths */
617 BTREE, /* get bit lengths tree for a dynamic block */
618 DTREE, /* get length, distance trees for a dynamic block */
619 CODES, /* processing fixed or dynamic block */
620 DRY, /* output remaining window bytes */
621 DONEB, /* finished last block, done */
622 BADB} /* got a data error--stuck here */
623 mode; /* current inflate_block mode */
624
625 /* mode dependent information */
626 union {
627 uInt left; /* if STORED, bytes left to copy */
628 struct {
629 uInt table; /* table lengths (14 bits) */
630 uInt index; /* index into blens (or border) */
631 uIntf *blens; /* bit lengths of codes */
632 uInt bb; /* bit length tree depth */
633 inflate_huft *tb; /* bit length decoding tree */
634 int nblens; /* # elements allocated at blens */
635 } trees; /* if DTREE, decoding info for trees */
636 struct {
637 inflate_huft *tl, *td; /* trees to free */
638 inflate_codes_statef
639 *codes;
640 } decode; /* if CODES, current state */
641 } sub; /* submode */
642 uInt last; /* true if this block is the last block */
643
644 /* mode independent information */
645 uInt bitk; /* bits in bit buffer */
646 uLong bitb; /* bit buffer */
647 Bytef *window; /* sliding window */
648 Bytef *end; /* one byte after sliding window */
649 Bytef *read; /* window read pointer */
650 Bytef *write; /* window write pointer */
651 check_func checkfn; /* check function */
652 uLong check; /* check on output */
653
654};
655
656
657/* defines for inflate input/output */
658/* update pointers and return */
659#define UPDBITS {s->bitb=b;s->bitk=k;}
660#define UPDIN {z->avail_in=n;z->total_in+=p-z->next_in;z->next_in=p;}
661#define UPDOUT {s->write=q;}
662#define UPDATE {UPDBITS UPDIN UPDOUT}
663#define LEAVE {UPDATE return inflate_flush(s,z,r);}
664/* get bytes and bits */
665#define LOADIN {p=z->next_in;n=z->avail_in;b=s->bitb;k=s->bitk;}
666#define NEEDBYTE {if(n)r=Z_OK;else LEAVE}
667#define NEXTBYTE (n--,*p++)
668#define NEEDBITS(j) {while(k<(j)){NEEDBYTE;b|=((uLong)NEXTBYTE)<<k;k+=8;}}
669#define DUMPBITS(j) {b>>=(j);k-=(j);}
670/* output bytes */
671#define WAVAIL (q<s->read?s->read-q-1:s->end-q)
672#define LOADOUT {q=s->write;m=WAVAIL;}
673#define WRAP {if(q==s->end&&s->read!=s->window){q=s->window;m=WAVAIL;}}
674#define FLUSH {UPDOUT r=inflate_flush(s,z,r); LOADOUT}
675#define NEEDOUT {if(m==0){WRAP if(m==0){FLUSH WRAP if(m==0) LEAVE}}r=Z_OK;}
676#define OUTBYTE(a) {*q++=(Byte)(a);m--;}
677/* load local pointers */
678#define LOAD {LOADIN LOADOUT}
679
680/* And'ing with mask[n] masks the lower n bits */
681local uInt inflate_mask[] = {
682 0x0000,
683 0x0001, 0x0003, 0x0007, 0x000f, 0x001f, 0x003f, 0x007f, 0x00ff,
684 0x01ff, 0x03ff, 0x07ff, 0x0fff, 0x1fff, 0x3fff, 0x7fff, 0xffff
685};
686
687/* copy as much as possible from the sliding window to the output area */
688local int inflate_flush OF((
689 inflate_blocks_statef *,
690 z_stream *,
691 int));
692
693/*+++++*/
694/* inffast.h -- header to use inffast.c
695 * Copyright (C) 1995 Mark Adler
696 * For conditions of distribution and use, see copyright notice in zlib.h
697 */
698
699/* WARNING: this file should *not* be used by applications. It is
700 part of the implementation of the compression library and is
701 subject to change. Applications should only use zlib.h.
702 */
703
704local int inflate_fast OF((
705 uInt,
706 uInt,
707 inflate_huft *,
708 inflate_huft *,
709 inflate_blocks_statef *,
710 z_stream *));
711
712
713/*+++++*/
714/* infblock.c -- interpret and process block types to last block
715 * Copyright (C) 1995 Mark Adler
716 * For conditions of distribution and use, see copyright notice in zlib.h
717 */
718
719/* Table for deflate from PKZIP's appnote.txt. */
720local uInt border[] = { /* Order of the bit length code lengths */
721 16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15};
722
723/*
724 Notes beyond the 1.93a appnote.txt:
725
726 1. Distance pointers never point before the beginning of the output
727 stream.
728 2. Distance pointers can point back across blocks, up to 32k away.
729 3. There is an implied maximum of 7 bits for the bit length table and
730 15 bits for the actual data.
731 4. If only one code exists, then it is encoded using one bit. (Zero
732 would be more efficient, but perhaps a little confusing.) If two
733 codes exist, they are coded using one bit each (0 and 1).
734 5. There is no way of sending zero distance codes--a dummy must be
735 sent if there are none. (History: a pre 2.0 version of PKZIP would
736 store blocks with no distance codes, but this was discovered to be
737 too harsh a criterion.) Valid only for 1.93a. 2.04c does allow
738 zero distance codes, which is sent as one code of zero bits in
739 length.
740 6. There are up to 286 literal/length codes. Code 256 represents the
741 end-of-block. Note however that the static length tree defines
742 288 codes just to fill out the Huffman codes. Codes 286 and 287
743 cannot be used though, since there is no length base or extra bits
744 defined for them. Similarily, there are up to 30 distance codes.
745 However, static trees define 32 codes (all 5 bits) to fill out the
746 Huffman codes, but the last two had better not show up in the data.
747 7. Unzip can check dynamic Huffman blocks for complete code sets.
748 The exception is that a single code would not be complete (see #4).
749 8. The five bits following the block type is really the number of
750 literal codes sent minus 257.
751 9. Length codes 8,16,16 are interpreted as 13 length codes of 8 bits
752 (1+6+6). Therefore, to output three times the length, you output
753 three codes (1+1+1), whereas to output four times the same length,
754 you only need two codes (1+3). Hmm.
755 10. In the tree reconstruction algorithm, Code = Code + Increment
756 only if BitLength(i) is not zero. (Pretty obvious.)
757 11. Correction: 4 Bits: # of Bit Length codes - 4 (4 - 19)
758 12. Note: length code 284 can represent 227-258, but length code 285
759 really is 258. The last length deserves its own, short code
760 since it gets used a lot in very redundant files. The length
761 258 is special since 258 - 3 (the min match length) is 255.
762 13. The literal/length and distance code bit lengths are read as a
763 single stream of lengths. It is possible (and advantageous) for
764 a repeat code (16, 17, or 18) to go across the boundary between
765 the two sets of lengths.
766 */
767
768
769local void inflate_blocks_reset(
770 inflate_blocks_statef *s,
771 z_stream *z,
772 uLongf *c
773)
774{
775 if (s->checkfn != Z_NULL)
776 *c = s->check;
777 if (s->mode == BTREE || s->mode == DTREE)
778 ZFREE(z, s->sub.trees.blens, s->sub.trees.nblens * sizeof(uInt));
779 if (s->mode == CODES)
780 {
781 inflate_codes_free(s->sub.decode.codes, z);
782 inflate_trees_free(s->sub.decode.td, z);
783 inflate_trees_free(s->sub.decode.tl, z);
784 }
785 s->mode = TYPE;
786 s->bitk = 0;
787 s->bitb = 0;
788 s->read = s->write = s->window;
789 if (s->checkfn != Z_NULL)
790 s->check = (*s->checkfn)(0L, Z_NULL, 0);
791 Trace((stderr, "inflate: blocks reset\n"));
792}
793
794
795local inflate_blocks_statef *inflate_blocks_new(
796 z_stream *z,
797 check_func c,
798 uInt w
799)
800{
801 inflate_blocks_statef *s;
802
803 if ((s = (inflate_blocks_statef *)ZALLOC
804 (z,1,sizeof(struct inflate_blocks_state))) == Z_NULL)
805 return s;
806 if ((s->window = (Bytef *)ZALLOC(z, 1, w)) == Z_NULL)
807 {
808 ZFREE(z, s, sizeof(struct inflate_blocks_state));
809 return Z_NULL;
810 }
811 s->end = s->window + w;
812 s->checkfn = c;
813 s->mode = TYPE;
814 Trace((stderr, "inflate: blocks allocated\n"));
815 inflate_blocks_reset(s, z, &s->check);
816 return s;
817}
818
819
820local int inflate_blocks(
821 inflate_blocks_statef *s,
822 z_stream *z,
823 int r
824)
825{
826 uInt t; /* temporary storage */
827 uLong b; /* bit buffer */
828 uInt k; /* bits in bit buffer */
829 Bytef *p; /* input data pointer */
830 uInt n; /* bytes available there */
831 Bytef *q; /* output window write pointer */
832 uInt m; /* bytes to end of window or read pointer */
833
834 /* copy input/output information to locals (UPDATE macro restores) */
835 LOAD
836
837 /* process input based on current state */
838 while (1) switch (s->mode)
839 {
840 case TYPE:
841 NEEDBITS(3)
842 t = (uInt)b & 7;
843 s->last = t & 1;
844 switch (t >> 1)
845 {
846 case 0: /* stored */
847 Trace((stderr, "inflate: stored block%s\n",
848 s->last ? " (last)" : ""));
849 DUMPBITS(3)
850 t = k & 7; /* go to byte boundary */
851 DUMPBITS(t)
852 s->mode = LENS; /* get length of stored block */
853 break;
854 case 1: /* fixed */
855 Trace((stderr, "inflate: fixed codes block%s\n",
856 s->last ? " (last)" : ""));
857 {
858 uInt bl, bd;
859 inflate_huft *tl, *td;
860
861 inflate_trees_fixed(&bl, &bd, &tl, &td);
862 s->sub.decode.codes = inflate_codes_new(bl, bd, tl, td, z);
863 if (s->sub.decode.codes == Z_NULL)
864 {
865 r = Z_MEM_ERROR;
866 LEAVE
867 }
868 s->sub.decode.tl = Z_NULL; /* don't try to free these */
869 s->sub.decode.td = Z_NULL;
870 }
871 DUMPBITS(3)
872 s->mode = CODES;
873 break;
874 case 2: /* dynamic */
875 Trace((stderr, "inflate: dynamic codes block%s\n",
876 s->last ? " (last)" : ""));
877 DUMPBITS(3)
878 s->mode = TABLE;
879 break;
880 case 3: /* illegal */
881 DUMPBITS(3)
882 s->mode = BADB;
883 z->msg = "invalid block type";
884 r = Z_DATA_ERROR;
885 LEAVE
886 }
887 break;
888 case LENS:
889 NEEDBITS(32)
890 if (((~b) >> 16) != (b & 0xffff))
891 {
892 s->mode = BADB;
893 z->msg = "invalid stored block lengths";
894 r = Z_DATA_ERROR;
895 LEAVE
896 }
897 s->sub.left = (uInt)b & 0xffff;
898 b = k = 0; /* dump bits */
899 Tracev((stderr, "inflate: stored length %u\n", s->sub.left));
900 s->mode = s->sub.left ? STORED : TYPE;
901 break;
902 case STORED:
903 if (n == 0)
904 LEAVE
905 NEEDOUT
906 t = s->sub.left;
907 if (t > n) t = n;
908 if (t > m) t = m;
909 zmemcpy(q, p, t);
910 p += t; n -= t;
911 q += t; m -= t;
912 if ((s->sub.left -= t) != 0)
913 break;
914 Tracev((stderr, "inflate: stored end, %lu total out\n",
915 z->total_out + (q >= s->read ? q - s->read :
916 (s->end - s->read) + (q - s->window))));
917 s->mode = s->last ? DRY : TYPE;
918 break;
919 case TABLE:
920 NEEDBITS(14)
921 s->sub.trees.table = t = (uInt)b & 0x3fff;
922#ifndef PKZIP_BUG_WORKAROUND
923 if ((t & 0x1f) > 29 || ((t >> 5) & 0x1f) > 29)
924 {
925 s->mode = BADB;
926 z->msg = "too many length or distance symbols";
927 r = Z_DATA_ERROR;
928 LEAVE
929 }
930#endif
931 t = 258 + (t & 0x1f) + ((t >> 5) & 0x1f);
932 if (t < 19)
933 t = 19;
934 if ((s->sub.trees.blens = (uIntf*)ZALLOC(z, t, sizeof(uInt))) == Z_NULL)
935 {
936 r = Z_MEM_ERROR;
937 LEAVE
938 }
939 s->sub.trees.nblens = t;
940 DUMPBITS(14)
941 s->sub.trees.index = 0;
942 Tracev((stderr, "inflate: table sizes ok\n"));
943 s->mode = BTREE;
944 case BTREE:
945 while (s->sub.trees.index < 4 + (s->sub.trees.table >> 10))
946 {
947 NEEDBITS(3)
948 s->sub.trees.blens[border[s->sub.trees.index++]] = (uInt)b & 7;
949 DUMPBITS(3)
950 }
951 while (s->sub.trees.index < 19)
952 s->sub.trees.blens[border[s->sub.trees.index++]] = 0;
953 s->sub.trees.bb = 7;
954 t = inflate_trees_bits(s->sub.trees.blens, &s->sub.trees.bb,
955 &s->sub.trees.tb, z);
956 if (t != Z_OK)
957 {
958 r = t;
959 if (r == Z_DATA_ERROR)
960 s->mode = BADB;
961 LEAVE
962 }
963 s->sub.trees.index = 0;
964 Tracev((stderr, "inflate: bits tree ok\n"));
965 s->mode = DTREE;
966 case DTREE:
967 while (t = s->sub.trees.table,
968 s->sub.trees.index < 258 + (t & 0x1f) + ((t >> 5) & 0x1f))
969 {
970 inflate_huft *h;
971 uInt i, j, c;
972
973 t = s->sub.trees.bb;
974 NEEDBITS(t)
975 h = s->sub.trees.tb + ((uInt)b & inflate_mask[t]);
976 t = h->word.what.Bits;
977 c = h->more.Base;
978 if (c < 16)
979 {
980 DUMPBITS(t)
981 s->sub.trees.blens[s->sub.trees.index++] = c;
982 }
983 else /* c == 16..18 */
984 {
985 i = c == 18 ? 7 : c - 14;
986 j = c == 18 ? 11 : 3;
987 NEEDBITS(t + i)
988 DUMPBITS(t)
989 j += (uInt)b & inflate_mask[i];
990 DUMPBITS(i)
991 i = s->sub.trees.index;
992 t = s->sub.trees.table;
993 if (i + j > 258 + (t & 0x1f) + ((t >> 5) & 0x1f) ||
994 (c == 16 && i < 1))
995 {
996 s->mode = BADB;
997 z->msg = "invalid bit length repeat";
998 r = Z_DATA_ERROR;
999 LEAVE
1000 }
1001 c = c == 16 ? s->sub.trees.blens[i - 1] : 0;
1002 do {
1003 s->sub.trees.blens[i++] = c;
1004 } while (--j);
1005 s->sub.trees.index = i;
1006 }
1007 }
1008 inflate_trees_free(s->sub.trees.tb, z);
1009 s->sub.trees.tb = Z_NULL;
1010 {
1011 uInt bl, bd;
1012 inflate_huft *tl, *td;
1013 inflate_codes_statef *c;
1014
1015 bl = 9; /* must be <= 9 for lookahead assumptions */
1016 bd = 6; /* must be <= 9 for lookahead assumptions */
1017 t = s->sub.trees.table;
1018 t = inflate_trees_dynamic(257 + (t & 0x1f), 1 + ((t >> 5) & 0x1f),
1019 s->sub.trees.blens, &bl, &bd, &tl, &td, z);
1020 if (t != Z_OK)
1021 {
1022 if (t == (uInt)Z_DATA_ERROR)
1023 s->mode = BADB;
1024 r = t;
1025 LEAVE
1026 }
1027 Tracev((stderr, "inflate: trees ok\n"));
1028 if ((c = inflate_codes_new(bl, bd, tl, td, z)) == Z_NULL)
1029 {
1030 inflate_trees_free(td, z);
1031 inflate_trees_free(tl, z);
1032 r = Z_MEM_ERROR;
1033 LEAVE
1034 }
1035 ZFREE(z, s->sub.trees.blens, s->sub.trees.nblens * sizeof(uInt));
1036 s->sub.decode.codes = c;
1037 s->sub.decode.tl = tl;
1038 s->sub.decode.td = td;
1039 }
1040 s->mode = CODES;
1041 case CODES:
1042 UPDATE
1043 if ((r = inflate_codes(s, z, r)) != Z_STREAM_END)
1044 return inflate_flush(s, z, r);
1045 r = Z_OK;
1046 inflate_codes_free(s->sub.decode.codes, z);
1047 inflate_trees_free(s->sub.decode.td, z);
1048 inflate_trees_free(s->sub.decode.tl, z);
1049 LOAD
1050 Tracev((stderr, "inflate: codes end, %lu total out\n",
1051 z->total_out + (q >= s->read ? q - s->read :
1052 (s->end - s->read) + (q - s->window))));
1053 if (!s->last)
1054 {
1055 s->mode = TYPE;
1056 break;
1057 }
1058 if (k > 7) /* return unused byte, if any */
1059 {
1060 Assert(k < 16, "inflate_codes grabbed too many bytes")
1061 k -= 8;
1062 n++;
1063 p--; /* can always return one */
1064 }
1065 s->mode = DRY;
1066 case DRY:
1067 FLUSH
1068 if (s->read != s->write)
1069 LEAVE
1070 s->mode = DONEB;
1071 case DONEB:
1072 r = Z_STREAM_END;
1073 LEAVE
1074 case BADB:
1075 r = Z_DATA_ERROR;
1076 LEAVE
1077 default:
1078 r = Z_STREAM_ERROR;
1079 LEAVE
1080 }
1081}
1082
1083
1084local int inflate_blocks_free(
1085 inflate_blocks_statef *s,
1086 z_stream *z,
1087 uLongf *c
1088)
1089{
1090 inflate_blocks_reset(s, z, c);
1091 ZFREE(z, s->window, s->end - s->window);
1092 ZFREE(z, s, sizeof(struct inflate_blocks_state));
1093 Trace((stderr, "inflate: blocks freed\n"));
1094 return Z_OK;
1095}
1096
1097/*
1098 * This subroutine adds the data at next_in/avail_in to the output history
1099 * without performing any output. The output buffer must be "caught up";
1100 * i.e. no pending output (hence s->read equals s->write), and the state must
1101 * be BLOCKS (i.e. we should be willing to see the start of a series of
1102 * BLOCKS). On exit, the output will also be caught up, and the checksum
1103 * will have been updated if need be.
1104 */
1105local int inflate_addhistory(
1106 inflate_blocks_statef *s,
1107 z_stream *z
1108)
1109{
1110 uLong b; /* bit buffer */ /* NOT USED HERE */
1111 uInt k; /* bits in bit buffer */ /* NOT USED HERE */
1112 uInt t; /* temporary storage */
1113 Bytef *p; /* input data pointer */
1114 uInt n; /* bytes available there */
1115 Bytef *q; /* output window write pointer */
1116 uInt m; /* bytes to end of window or read pointer */
1117
1118 if (s->read != s->write)
1119 return Z_STREAM_ERROR;
1120 if (s->mode != TYPE)
1121 return Z_DATA_ERROR;
1122
1123 /* we're ready to rock */
1124 LOAD
1125 /* while there is input ready, copy to output buffer, moving
1126 * pointers as needed.
1127 */
1128 while (n) {
1129 t = n; /* how many to do */
1130 /* is there room until end of buffer? */
1131 if (t > m) t = m;
1132 /* update check information */
1133 if (s->checkfn != Z_NULL)
1134 s->check = (*s->checkfn)(s->check, q, t);
1135 zmemcpy(q, p, t);
1136 q += t;
1137 p += t;
1138 n -= t;
1139 z->total_out += t;
1140 s->read = q; /* drag read pointer forward */
1141/* WRAP */ /* expand WRAP macro by hand to handle s->read */
1142 if (q == s->end) {
1143 s->read = q = s->window;
1144 m = WAVAIL;
1145 }
1146 }
1147 UPDATE
1148 return Z_OK;
1149}
1150
1151
1152/*
1153 * At the end of a Deflate-compressed PPP packet, we expect to have seen
1154 * a `stored' block type value but not the (zero) length bytes.
1155 */
1156local int inflate_packet_flush(
1157 inflate_blocks_statef *s
1158)
1159{
1160 if (s->mode != LENS)
1161 return Z_DATA_ERROR;
1162 s->mode = TYPE;
1163 return Z_OK;
1164}
1165
1166
1167/*+++++*/
1168/* inftrees.c -- generate Huffman trees for efficient decoding
1169 * Copyright (C) 1995 Mark Adler
1170 * For conditions of distribution and use, see copyright notice in zlib.h
1171 */
1172
1173/* simplify the use of the inflate_huft type with some defines */
1174#define base more.Base
1175#define next more.Next
1176#define exop word.what.Exop
1177#define bits word.what.Bits
1178
1179
1180local int huft_build OF((
1181 uIntf *, /* code lengths in bits */
1182 uInt, /* number of codes */
1183 uInt, /* number of "simple" codes */
1184 uIntf *, /* list of base values for non-simple codes */
1185 uIntf *, /* list of extra bits for non-simple codes */
1186 inflate_huft * FAR*,/* result: starting table */
1187 uIntf *, /* maximum lookup bits (returns actual) */
1188 z_stream *)); /* for zalloc function */
1189
1190local voidpf falloc OF((
1191 voidpf, /* opaque pointer (not used) */
1192 uInt, /* number of items */
1193 uInt)); /* size of item */
1194
1195local void ffree OF((
1196 voidpf q, /* opaque pointer (not used) */
1197 voidpf p, /* what to free (not used) */
1198 uInt n)); /* number of bytes (not used) */
1199
1200/* Tables for deflate from PKZIP's appnote.txt. */
1201local uInt cplens[] = { /* Copy lengths for literal codes 257..285 */
1202 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 15, 17, 19, 23, 27, 31,
1203 35, 43, 51, 59, 67, 83, 99, 115, 131, 163, 195, 227, 258, 0, 0};
1204 /* actually lengths - 2; also see note #13 above about 258 */
1205local uInt cplext[] = { /* Extra bits for literal codes 257..285 */
1206 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2,
1207 3, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 0, 192, 192}; /* 192==invalid */
1208local uInt cpdist[] = { /* Copy offsets for distance codes 0..29 */
1209 1, 2, 3, 4, 5, 7, 9, 13, 17, 25, 33, 49, 65, 97, 129, 193,
1210 257, 385, 513, 769, 1025, 1537, 2049, 3073, 4097, 6145,
1211 8193, 12289, 16385, 24577};
1212local uInt cpdext[] = { /* Extra bits for distance codes */
1213 0, 0, 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6,
1214 7, 7, 8, 8, 9, 9, 10, 10, 11, 11,
1215 12, 12, 13, 13};
1216
1217/*
1218 Huffman code decoding is performed using a multi-level table lookup.
1219 The fastest way to decode is to simply build a lookup table whose
1220 size is determined by the longest code. However, the time it takes
1221 to build this table can also be a factor if the data being decoded
1222 is not very long. The most common codes are necessarily the
1223 shortest codes, so those codes dominate the decoding time, and hence
1224 the speed. The idea is you can have a shorter table that decodes the
1225 shorter, more probable codes, and then point to subsidiary tables for
1226 the longer codes. The time it costs to decode the longer codes is
1227 then traded against the time it takes to make longer tables.
1228
1229 This results of this trade are in the variables lbits and dbits
1230 below. lbits is the number of bits the first level table for literal/
1231 length codes can decode in one step, and dbits is the same thing for
1232 the distance codes. Subsequent tables are also less than or equal to
1233 those sizes. These values may be adjusted either when all of the
1234 codes are shorter than that, in which case the longest code length in
1235 bits is used, or when the shortest code is *longer* than the requested
1236 table size, in which case the length of the shortest code in bits is
1237 used.
1238
1239 There are two different values for the two tables, since they code a
1240 different number of possibilities each. The literal/length table
1241 codes 286 possible values, or in a flat code, a little over eight
1242 bits. The distance table codes 30 possible values, or a little less
1243 than five bits, flat. The optimum values for speed end up being
1244 about one bit more than those, so lbits is 8+1 and dbits is 5+1.
1245 The optimum values may differ though from machine to machine, and
1246 possibly even between compilers. Your mileage may vary.
1247 */
1248
1249
1250/* If BMAX needs to be larger than 16, then h and x[] should be uLong. */
1251#define BMAX 15 /* maximum bit length of any code */
1252#define N_MAX 288 /* maximum number of codes in any set */
1253
1254#ifdef DEBUG_ZLIB
1255 uInt inflate_hufts;
1256#endif
1257
1258local int huft_build(
1259 uIntf *b, /* code lengths in bits (all assumed <= BMAX) */
1260 uInt n, /* number of codes (assumed <= N_MAX) */
1261 uInt s, /* number of simple-valued codes (0..s-1) */
1262 uIntf *d, /* list of base values for non-simple codes */
1263 uIntf *e, /* list of extra bits for non-simple codes */
1264 inflate_huft * FAR *t, /* result: starting table */
1265 uIntf *m, /* maximum lookup bits, returns actual */
1266 z_stream *zs /* for zalloc function */
1267)
1268/* Given a list of code lengths and a maximum table size, make a set of
1269 tables to decode that set of codes. Return Z_OK on success, Z_BUF_ERROR
1270 if the given code set is incomplete (the tables are still built in this
1271 case), Z_DATA_ERROR if the input is invalid (all zero length codes or an
1272 over-subscribed set of lengths), or Z_MEM_ERROR if not enough memory. */
1273{
1274
1275 uInt a; /* counter for codes of length k */
1276 uInt c[BMAX+1]; /* bit length count table */
1277 uInt f; /* i repeats in table every f entries */
1278 int g; /* maximum code length */
1279 int h; /* table level */
1280 register uInt i; /* counter, current code */
1281 register uInt j; /* counter */
1282 register int k; /* number of bits in current code */
1283 int l; /* bits per table (returned in m) */
1284 register uIntf *p; /* pointer into c[], b[], or v[] */
1285 inflate_huft *q; /* points to current table */
1286 struct inflate_huft_s r; /* table entry for structure assignment */
1287 inflate_huft *u[BMAX]; /* table stack */
1288 uInt v[N_MAX]; /* values in order of bit length */
1289 register int w; /* bits before this table == (l * h) */
1290 uInt x[BMAX+1]; /* bit offsets, then code stack */
1291 uIntf *xp; /* pointer into x */
1292 int y; /* number of dummy codes added */
1293 uInt z; /* number of entries in current table */
1294
1295
1296 /* Generate counts for each bit length */
1297 p = c;
1298#define C0 *p++ = 0;
1299#define C2 C0 C0 C0 C0
1300#define C4 C2 C2 C2 C2
1301 C4 /* clear c[]--assume BMAX+1 is 16 */
1302 p = b; i = n;
1303 do {
1304 c[*p++]++; /* assume all entries <= BMAX */
1305 } while (--i);
1306 if (c[0] == n) /* null input--all zero length codes */
1307 {
1308 *t = (inflate_huft *)Z_NULL;
1309 *m = 0;
1310 return Z_DATA_ERROR;
1311 }
1312
1313
1314 /* Find minimum and maximum length, bound *m by those */
1315 l = *m;
1316 for (j = 1; j <= BMAX; j++)
1317 if (c[j])
1318 break;
1319 k = j; /* minimum code length */
1320 if ((uInt)l < j)
1321 l = j;
1322 for (i = BMAX; i; i--)
1323 if (c[i])
1324 break;
1325 g = i; /* maximum code length */
1326 if ((uInt)l > i)
1327 l = i;
1328 *m = l;
1329
1330
1331 /* Adjust last length count to fill out codes, if needed */
1332 for (y = 1 << j; j < i; j++, y <<= 1)
1333 if ((y -= c[j]) < 0)
1334 return Z_DATA_ERROR;
1335 if ((y -= c[i]) < 0)
1336 return Z_DATA_ERROR;
1337 c[i] += y;
1338
1339
1340 /* Generate starting offsets into the value table for each length */
1341 x[1] = j = 0;
1342 p = c + 1; xp = x + 2;
1343 while (--i) { /* note that i == g from above */
1344 *xp++ = (j += *p++);
1345 }
1346
1347
1348 /* Make a table of values in order of bit lengths */
1349 p = b; i = 0;
1350 do {
1351 if ((j = *p++) != 0)
1352 v[x[j]++] = i;
1353 } while (++i < n);
1354 n = x[g]; /* set n to length of v */
1355
1356
1357 /* Generate the Huffman codes and for each, make the table entries */
1358 x[0] = i = 0; /* first Huffman code is zero */
1359 p = v; /* grab values in bit order */
1360 h = -1; /* no tables yet--level -1 */
1361 w = -l; /* bits decoded == (l * h) */
1362 u[0] = (inflate_huft *)Z_NULL; /* just to keep compilers happy */
1363 q = (inflate_huft *)Z_NULL; /* ditto */
1364 z = 0; /* ditto */
1365
1366 /* go through the bit lengths (k already is bits in shortest code) */
1367 for (; k <= g; k++)
1368 {
1369 a = c[k];
1370 while (a--)
1371 {
1372 /* here i is the Huffman code of length k bits for value *p */
1373 /* make tables up to required level */
1374 while (k > w + l)
1375 {
1376 h++;
1377 w += l; /* previous table always l bits */
1378
1379 /* compute minimum size table less than or equal to l bits */
1380 z = (z = g - w) > (uInt)l ? l : z; /* table size upper limit */
1381 if ((f = 1 << (j = k - w)) > a + 1) /* try a k-w bit table */
1382 { /* too few codes for k-w bit table */
1383 f -= a + 1; /* deduct codes from patterns left */
1384 xp = c + k;
1385 if (j < z)
1386 while (++j < z) /* try smaller tables up to z bits */
1387 {
1388 if ((f <<= 1) <= *++xp)
1389 break; /* enough codes to use up j bits */
1390 f -= *xp; /* else deduct codes from patterns */
1391 }
1392 }
1393 z = 1 << j; /* table entries for j-bit table */
1394
1395 /* allocate and link in new table */
1396 if ((q = (inflate_huft *)ZALLOC
1397 (zs,z + 1,sizeof(inflate_huft))) == Z_NULL)
1398 {
1399 if (h)
1400 inflate_trees_free(u[0], zs);
1401 return Z_MEM_ERROR; /* not enough memory */
1402 }
1403 q->word.Nalloc = z + 1;
1404#ifdef DEBUG_ZLIB
1405 inflate_hufts += z + 1;
1406#endif
1407 *t = q + 1; /* link to list for huft_free() */
1408 *(t = &(q->next)) = Z_NULL;
1409 u[h] = ++q; /* table starts after link */
1410
1411 /* connect to last table, if there is one */
1412 if (h)
1413 {
1414 x[h] = i; /* save pattern for backing up */
1415 r.bits = (Byte)l; /* bits to dump before this table */
1416 r.exop = (Byte)j; /* bits in this table */
1417 r.next = q; /* pointer to this table */
1418 j = i >> (w - l); /* (get around Turbo C bug) */
1419 u[h-1][j] = r; /* connect to last table */
1420 }
1421 }
1422
1423 /* set up table entry in r */
1424 r.bits = (Byte)(k - w);
1425 if (p >= v + n)
1426 r.exop = 128 + 64; /* out of values--invalid code */
1427 else if (*p < s)
1428 {
1429 r.exop = (Byte)(*p < 256 ? 0 : 32 + 64); /* 256 is end-of-block */
1430 r.base = *p++; /* simple code is just the value */
1431 }
1432 else
1433 {
1434 r.exop = (Byte)e[*p - s] + 16 + 64; /* non-simple--look up in lists */
1435 r.base = d[*p++ - s];
1436 }
1437
1438 /* fill code-like entries with r */
1439 f = 1 << (k - w);
1440 for (j = i >> w; j < z; j += f)
1441 q[j] = r;
1442
1443 /* backwards increment the k-bit code i */
1444 for (j = 1 << (k - 1); i & j; j >>= 1)
1445 i ^= j;
1446 i ^= j;
1447
1448 /* backup over finished tables */
1449 while ((i & ((1 << w) - 1)) != x[h])
1450 {
1451 h--; /* don't need to update q */
1452 w -= l;
1453 }
1454 }
1455 }
1456
1457
1458 /* Return Z_BUF_ERROR if we were given an incomplete table */
1459 return y != 0 && g != 1 ? Z_BUF_ERROR : Z_OK;
1460}
1461
1462
1463local int inflate_trees_bits(
1464 uIntf *c, /* 19 code lengths */
1465 uIntf *bb, /* bits tree desired/actual depth */
1466 inflate_huft * FAR *tb, /* bits tree result */
1467 z_stream *z /* for zfree function */
1468)
1469{
1470 int r;
1471
1472 r = huft_build(c, 19, 19, (uIntf*)Z_NULL, (uIntf*)Z_NULL, tb, bb, z);
1473 if (r == Z_DATA_ERROR)
1474 z->msg = "oversubscribed dynamic bit lengths tree";
1475 else if (r == Z_BUF_ERROR)
1476 {
1477 inflate_trees_free(*tb, z);
1478 z->msg = "incomplete dynamic bit lengths tree";
1479 r = Z_DATA_ERROR;
1480 }
1481 return r;
1482}
1483
1484
1485local int inflate_trees_dynamic(
1486 uInt nl, /* number of literal/length codes */
1487 uInt nd, /* number of distance codes */
1488 uIntf *c, /* that many (total) code lengths */
1489 uIntf *bl, /* literal desired/actual bit depth */
1490 uIntf *bd, /* distance desired/actual bit depth */
1491 inflate_huft * FAR *tl, /* literal/length tree result */
1492 inflate_huft * FAR *td, /* distance tree result */
1493 z_stream *z /* for zfree function */
1494)
1495{
1496 int r;
1497
1498 /* build literal/length tree */
1499 if ((r = huft_build(c, nl, 257, cplens, cplext, tl, bl, z)) != Z_OK)
1500 {
1501 if (r == Z_DATA_ERROR)
1502 z->msg = "oversubscribed literal/length tree";
1503 else if (r == Z_BUF_ERROR)
1504 {
1505 inflate_trees_free(*tl, z);
1506 z->msg = "incomplete literal/length tree";
1507 r = Z_DATA_ERROR;
1508 }
1509 return r;
1510 }
1511
1512 /* build distance tree */
1513 if ((r = huft_build(c + nl, nd, 0, cpdist, cpdext, td, bd, z)) != Z_OK)
1514 {
1515 if (r == Z_DATA_ERROR)
1516 z->msg = "oversubscribed literal/length tree";
1517 else if (r == Z_BUF_ERROR) {
1518#ifdef PKZIP_BUG_WORKAROUND
1519 r = Z_OK;
1520 }
1521#else
1522 inflate_trees_free(*td, z);
1523 z->msg = "incomplete literal/length tree";
1524 r = Z_DATA_ERROR;
1525 }
1526 inflate_trees_free(*tl, z);
1527 return r;
1528#endif
1529 }
1530
1531 /* done */
1532 return Z_OK;
1533}
1534
1535
1536/* build fixed tables only once--keep them here */
1537local int fixed_lock = 0;
1538local int fixed_built = 0;
1539#define FIXEDH 530 /* number of hufts used by fixed tables */
1540local uInt fixed_left = FIXEDH;
1541local inflate_huft fixed_mem[FIXEDH];
1542local uInt fixed_bl;
1543local uInt fixed_bd;
1544local inflate_huft *fixed_tl;
1545local inflate_huft *fixed_td;
1546
1547
1548local voidpf falloc(
1549 voidpf q, /* opaque pointer (not used) */
1550 uInt n, /* number of items */
1551 uInt s /* size of item */
1552)
1553{
1554 Assert(s == sizeof(inflate_huft) && n <= fixed_left,
1555 "inflate_trees falloc overflow");
1556 if (q) s++; /* to make some compilers happy */
1557 fixed_left -= n;
1558 return (voidpf)(fixed_mem + fixed_left);
1559}
1560
1561
1562local void ffree(
1563 voidpf q,
1564 voidpf p,
1565 uInt n
1566)
1567{
1568 Assert(0, "inflate_trees ffree called!");
1569 if (q) q = p; /* to make some compilers happy */
1570}
1571
1572
1573local int inflate_trees_fixed(
1574 uIntf *bl, /* literal desired/actual bit depth */
1575 uIntf *bd, /* distance desired/actual bit depth */
1576 inflate_huft * FAR *tl, /* literal/length tree result */
1577 inflate_huft * FAR *td /* distance tree result */
1578)
1579{
1580 /* build fixed tables if not built already--lock out other instances */
1581 while (++fixed_lock > 1)
1582 fixed_lock--;
1583 if (!fixed_built)
1584 {
1585 int k; /* temporary variable */
1586 unsigned c[288]; /* length list for huft_build */
1587 z_stream z; /* for falloc function */
1588
1589 /* set up fake z_stream for memory routines */
1590 z.zalloc = falloc;
1591 z.zfree = ffree;
1592 z.opaque = Z_NULL;
1593
1594 /* literal table */
1595 for (k = 0; k < 144; k++)
1596 c[k] = 8;
1597 for (; k < 256; k++)
1598 c[k] = 9;
1599 for (; k < 280; k++)
1600 c[k] = 7;
1601 for (; k < 288; k++)
1602 c[k] = 8;
1603 fixed_bl = 7;
1604 huft_build(c, 288, 257, cplens, cplext, &fixed_tl, &fixed_bl, &z);
1605
1606 /* distance table */
1607 for (k = 0; k < 30; k++)
1608 c[k] = 5;
1609 fixed_bd = 5;
1610 huft_build(c, 30, 0, cpdist, cpdext, &fixed_td, &fixed_bd, &z);
1611
1612 /* done */
1613 fixed_built = 1;
1614 }
1615 fixed_lock--;
1616 *bl = fixed_bl;
1617 *bd = fixed_bd;
1618 *tl = fixed_tl;
1619 *td = fixed_td;
1620 return Z_OK;
1621}
1622
1623
1624local int inflate_trees_free(
1625 inflate_huft *t, /* table to free */
1626 z_stream *z /* for zfree function */
1627)
1628/* Free the malloc'ed tables built by huft_build(), which makes a linked
1629 list of the tables it made, with the links in a dummy first entry of
1630 each table. */
1631{
1632 register inflate_huft *p, *q;
1633
1634 /* Go through linked list, freeing from the malloced (t[-1]) address. */
1635 p = t;
1636 while (p != Z_NULL)
1637 {
1638 q = (--p)->next;
1639 ZFREE(z, p, p->word.Nalloc * sizeof(inflate_huft));
1640 p = q;
1641 }
1642 return Z_OK;
1643}
1644
1645/*+++++*/
1646/* infcodes.c -- process literals and length/distance pairs
1647 * Copyright (C) 1995 Mark Adler
1648 * For conditions of distribution and use, see copyright notice in zlib.h
1649 */
1650
1651/* simplify the use of the inflate_huft type with some defines */
1652#define base more.Base
1653#define next more.Next
1654#define exop word.what.Exop
1655#define bits word.what.Bits
1656
1657/* inflate codes private state */
1658struct inflate_codes_state {
1659
1660 /* mode */
1661 enum { /* waiting for "i:"=input, "o:"=output, "x:"=nothing */
1662 START, /* x: set up for LEN */
1663 LEN, /* i: get length/literal/eob next */
1664 LENEXT, /* i: getting length extra (have base) */
1665 DIST, /* i: get distance next */
1666 DISTEXT, /* i: getting distance extra */
1667 COPY, /* o: copying bytes in window, waiting for space */
1668 LIT, /* o: got literal, waiting for output space */
1669 WASH, /* o: got eob, possibly still output waiting */
1670 END, /* x: got eob and all data flushed */
1671 BADCODE} /* x: got error */
1672 mode; /* current inflate_codes mode */
1673
1674 /* mode dependent information */
1675 uInt len;
1676 union {
1677 struct {
1678 inflate_huft *tree; /* pointer into tree */
1679 uInt need; /* bits needed */
1680 } code; /* if LEN or DIST, where in tree */
1681 uInt lit; /* if LIT, literal */
1682 struct {
1683 uInt get; /* bits to get for extra */
1684 uInt dist; /* distance back to copy from */
1685 } copy; /* if EXT or COPY, where and how much */
1686 } sub; /* submode */
1687
1688 /* mode independent information */
1689 Byte lbits; /* ltree bits decoded per branch */
1690 Byte dbits; /* dtree bits decoder per branch */
1691 inflate_huft *ltree; /* literal/length/eob tree */
1692 inflate_huft *dtree; /* distance tree */
1693
1694};
1695
1696
1697local inflate_codes_statef *inflate_codes_new(
1698 uInt bl,
1699 uInt bd,
1700 inflate_huft *tl,
1701 inflate_huft *td,
1702 z_stream *z
1703)
1704{
1705 inflate_codes_statef *c;
1706
1707 if ((c = (inflate_codes_statef *)
1708 ZALLOC(z,1,sizeof(struct inflate_codes_state))) != Z_NULL)
1709 {
1710 c->mode = START;
1711 c->lbits = (Byte)bl;
1712 c->dbits = (Byte)bd;
1713 c->ltree = tl;
1714 c->dtree = td;
1715 Tracev((stderr, "inflate: codes new\n"));
1716 }
1717 return c;
1718}
1719
1720
1721local int inflate_codes(
1722 inflate_blocks_statef *s,
1723 z_stream *z,
1724 int r
1725)
1726{
1727 uInt j; /* temporary storage */
1728 inflate_huft *t; /* temporary pointer */
1729 uInt e; /* extra bits or operation */
1730 uLong b; /* bit buffer */
1731 uInt k; /* bits in bit buffer */
1732 Bytef *p; /* input data pointer */
1733 uInt n; /* bytes available there */
1734 Bytef *q; /* output window write pointer */
1735 uInt m; /* bytes to end of window or read pointer */
1736 Bytef *f; /* pointer to copy strings from */
1737 inflate_codes_statef *c = s->sub.decode.codes; /* codes state */
1738
1739 /* copy input/output information to locals (UPDATE macro restores) */
1740 LOAD
1741
1742 /* process input and output based on current state */
1743 while (1) switch (c->mode)
1744 { /* waiting for "i:"=input, "o:"=output, "x:"=nothing */
1745 case START: /* x: set up for LEN */
1746#ifndef SLOW
1747 if (m >= 258 && n >= 10)
1748 {
1749 UPDATE
1750 r = inflate_fast(c->lbits, c->dbits, c->ltree, c->dtree, s, z);
1751 LOAD
1752 if (r != Z_OK)
1753 {
1754 c->mode = r == Z_STREAM_END ? WASH : BADCODE;
1755 break;
1756 }
1757 }
1758#endif /* !SLOW */
1759 c->sub.code.need = c->lbits;
1760 c->sub.code.tree = c->ltree;
1761 c->mode = LEN;
1762 case LEN: /* i: get length/literal/eob next */
1763 j = c->sub.code.need;
1764 NEEDBITS(j)
1765 t = c->sub.code.tree + ((uInt)b & inflate_mask[j]);
1766 DUMPBITS(t->bits)
1767 e = (uInt)(t->exop);
1768 if (e == 0) /* literal */
1769 {
1770 c->sub.lit = t->base;
1771 Tracevv((stderr, t->base >= 0x20 && t->base < 0x7f ?
1772 "inflate: literal '%c'\n" :
1773 "inflate: literal 0x%02x\n", t->base));
1774 c->mode = LIT;
1775 break;
1776 }
1777 if (e & 16) /* length */
1778 {
1779 c->sub.copy.get = e & 15;
1780 c->len = t->base;
1781 c->mode = LENEXT;
1782 break;
1783 }
1784 if ((e & 64) == 0) /* next table */
1785 {
1786 c->sub.code.need = e;
1787 c->sub.code.tree = t->next;
1788 break;
1789 }
1790 if (e & 32) /* end of block */
1791 {
1792 Tracevv((stderr, "inflate: end of block\n"));
1793 c->mode = WASH;
1794 break;
1795 }
1796 c->mode = BADCODE; /* invalid code */
1797 z->msg = "invalid literal/length code";
1798 r = Z_DATA_ERROR;
1799 LEAVE
1800 case LENEXT: /* i: getting length extra (have base) */
1801 j = c->sub.copy.get;
1802 NEEDBITS(j)
1803 c->len += (uInt)b & inflate_mask[j];
1804 DUMPBITS(j)
1805 c->sub.code.need = c->dbits;
1806 c->sub.code.tree = c->dtree;
1807 Tracevv((stderr, "inflate: length %u\n", c->len));
1808 c->mode = DIST;
1809 case DIST: /* i: get distance next */
1810 j = c->sub.code.need;
1811 NEEDBITS(j)
1812 t = c->sub.code.tree + ((uInt)b & inflate_mask[j]);
1813 DUMPBITS(t->bits)
1814 e = (uInt)(t->exop);
1815 if (e & 16) /* distance */
1816 {
1817 c->sub.copy.get = e & 15;
1818 c->sub.copy.dist = t->base;
1819 c->mode = DISTEXT;
1820 break;
1821 }
1822 if ((e & 64) == 0) /* next table */
1823 {
1824 c->sub.code.need = e;
1825 c->sub.code.tree = t->next;
1826 break;
1827 }
1828 c->mode = BADCODE; /* invalid code */
1829 z->msg = "invalid distance code";
1830 r = Z_DATA_ERROR;
1831 LEAVE
1832 case DISTEXT: /* i: getting distance extra */
1833 j = c->sub.copy.get;
1834 NEEDBITS(j)
1835 c->sub.copy.dist += (uInt)b & inflate_mask[j];
1836 DUMPBITS(j)
1837 Tracevv((stderr, "inflate: distance %u\n", c->sub.copy.dist));
1838 c->mode = COPY;
1839 case COPY: /* o: copying bytes in window, waiting for space */
1840#ifndef __TURBOC__ /* Turbo C bug for following expression */
1841 f = (uInt)(q - s->window) < c->sub.copy.dist ?
1842 s->end - (c->sub.copy.dist - (q - s->window)) :
1843 q - c->sub.copy.dist;
1844#else
1845 f = q - c->sub.copy.dist;
1846 if ((uInt)(q - s->window) < c->sub.copy.dist)
1847 f = s->end - (c->sub.copy.dist - (q - s->window));
1848#endif
1849 while (c->len)
1850 {
1851 NEEDOUT
1852 OUTBYTE(*f++)
1853 if (f == s->end)
1854 f = s->window;
1855 c->len--;
1856 }
1857 c->mode = START;
1858 break;
1859 case LIT: /* o: got literal, waiting for output space */
1860 NEEDOUT
1861 OUTBYTE(c->sub.lit)
1862 c->mode = START;
1863 break;
1864 case WASH: /* o: got eob, possibly more output */
1865 FLUSH
1866 if (s->read != s->write)
1867 LEAVE
1868 c->mode = END;
1869 case END:
1870 r = Z_STREAM_END;
1871 LEAVE
1872 case BADCODE: /* x: got error */
1873 r = Z_DATA_ERROR;
1874 LEAVE
1875 default:
1876 r = Z_STREAM_ERROR;
1877 LEAVE
1878 }
1879}
1880
1881
1882local void inflate_codes_free(
1883 inflate_codes_statef *c,
1884 z_stream *z
1885)
1886{
1887 ZFREE(z, c, sizeof(struct inflate_codes_state));
1888 Tracev((stderr, "inflate: codes free\n"));
1889}
1890
1891/*+++++*/
1892/* inflate_util.c -- data and routines common to blocks and codes
1893 * Copyright (C) 1995 Mark Adler
1894 * For conditions of distribution and use, see copyright notice in zlib.h
1895 */
1896
1897/* copy as much as possible from the sliding window to the output area */
1898local int inflate_flush(
1899 inflate_blocks_statef *s,
1900 z_stream *z,
1901 int r
1902)
1903{
1904 uInt n;
1905 Bytef *p, *q;
1906
1907 /* local copies of source and destination pointers */
1908 p = z->next_out;
1909 q = s->read;
1910
1911 /* compute number of bytes to copy as far as end of window */
1912 n = (uInt)((q <= s->write ? s->write : s->end) - q);
1913 if (n > z->avail_out) n = z->avail_out;
1914 if (n && r == Z_BUF_ERROR) r = Z_OK;
1915
1916 /* update counters */
1917 z->avail_out -= n;
1918 z->total_out += n;
1919
1920 /* update check information */
1921 if (s->checkfn != Z_NULL)
1922 s->check = (*s->checkfn)(s->check, q, n);
1923
1924 /* copy as far as end of window */
1925 zmemcpy(p, q, n);
1926 p += n;
1927 q += n;
1928
1929 /* see if more to copy at beginning of window */
1930 if (q == s->end)
1931 {
1932 /* wrap pointers */
1933 q = s->window;
1934 if (s->write == s->end)
1935 s->write = s->window;
1936
1937 /* compute bytes to copy */
1938 n = (uInt)(s->write - q);
1939 if (n > z->avail_out) n = z->avail_out;
1940 if (n && r == Z_BUF_ERROR) r = Z_OK;
1941
1942 /* update counters */
1943 z->avail_out -= n;
1944 z->total_out += n;
1945
1946 /* update check information */
1947 if (s->checkfn != Z_NULL)
1948 s->check = (*s->checkfn)(s->check, q, n);
1949
1950 /* copy */
1951 zmemcpy(p, q, n);
1952 p += n;
1953 q += n;
1954 }
1955
1956 /* update pointers */
1957 z->next_out = p;
1958 s->read = q;
1959
1960 /* done */
1961 return r;
1962}
1963
1964
1965/*+++++*/
1966/* inffast.c -- process literals and length/distance pairs fast
1967 * Copyright (C) 1995 Mark Adler
1968 * For conditions of distribution and use, see copyright notice in zlib.h
1969 */
1970
1971/* simplify the use of the inflate_huft type with some defines */
1972#define base more.Base
1973#define next more.Next
1974#define exop word.what.Exop
1975#define bits word.what.Bits
1976
1977/* macros for bit input with no checking and for returning unused bytes */
1978#define GRABBITS(j) {while(k<(j)){b|=((uLong)NEXTBYTE)<<k;k+=8;}}
1979#define UNGRAB {n+=(c=k>>3);p-=c;k&=7;}
1980
1981/* Called with number of bytes left to write in window at least 258
1982 (the maximum string length) and number of input bytes available
1983 at least ten. The ten bytes are six bytes for the longest length/
1984 distance pair plus four bytes for overloading the bit buffer. */
1985
1986local int inflate_fast(
1987 uInt bl,
1988 uInt bd,
1989 inflate_huft *tl,
1990 inflate_huft *td,
1991 inflate_blocks_statef *s,
1992 z_stream *z
1993)
1994{
1995 inflate_huft *t; /* temporary pointer */
1996 uInt e; /* extra bits or operation */
1997 uLong b; /* bit buffer */
1998 uInt k; /* bits in bit buffer */
1999 Bytef *p; /* input data pointer */
2000 uInt n; /* bytes available there */
2001 Bytef *q; /* output window write pointer */
2002 uInt m; /* bytes to end of window or read pointer */
2003 uInt ml; /* mask for literal/length tree */
2004 uInt md; /* mask for distance tree */
2005 uInt c; /* bytes to copy */
2006 uInt d; /* distance back to copy from */
2007 Bytef *r; /* copy source pointer */
2008
2009 /* load input, output, bit values */
2010 LOAD
2011
2012 /* initialize masks */
2013 ml = inflate_mask[bl];
2014 md = inflate_mask[bd];
2015
2016 /* do until not enough input or output space for fast loop */
2017 do { /* assume called with m >= 258 && n >= 10 */
2018 /* get literal/length code */
2019 GRABBITS(20) /* max bits for literal/length code */
2020 if ((e = (t = tl + ((uInt)b & ml))->exop) == 0)
2021 {
2022 DUMPBITS(t->bits)
2023 Tracevv((stderr, t->base >= 0x20 && t->base < 0x7f ?
2024 "inflate: * literal '%c'\n" :
2025 "inflate: * literal 0x%02x\n", t->base));
2026 *q++ = (Byte)t->base;
2027 m--;
2028 continue;
2029 }
2030 do {
2031 DUMPBITS(t->bits)
2032 if (e & 16)
2033 {
2034 /* get extra bits for length */
2035 e &= 15;
2036 c = t->base + ((uInt)b & inflate_mask[e]);
2037 DUMPBITS(e)
2038 Tracevv((stderr, "inflate: * length %u\n", c));
2039
2040 /* decode distance base of block to copy */
2041 GRABBITS(15); /* max bits for distance code */
2042 e = (t = td + ((uInt)b & md))->exop;
2043 do {
2044 DUMPBITS(t->bits)
2045 if (e & 16)
2046 {
2047 /* get extra bits to add to distance base */
2048 e &= 15;
2049 GRABBITS(e) /* get extra bits (up to 13) */
2050 d = t->base + ((uInt)b & inflate_mask[e]);
2051 DUMPBITS(e)
2052 Tracevv((stderr, "inflate: * distance %u\n", d));
2053
2054 /* do the copy */
2055 m -= c;
2056 if ((uInt)(q - s->window) >= d) /* offset before dest */
2057 { /* just copy */
2058 r = q - d;
2059 *q++ = *r++; c--; /* minimum count is three, */
2060 *q++ = *r++; c--; /* so unroll loop a little */
2061 }
2062 else /* else offset after destination */
2063 {
2064 e = d - (q - s->window); /* bytes from offset to end */
2065 r = s->end - e; /* pointer to offset */
2066 if (c > e) /* if source crosses, */
2067 {
2068 c -= e; /* copy to end of window */
2069 do {
2070 *q++ = *r++;
2071 } while (--e);
2072 r = s->window; /* copy rest from start of window */
2073 }
2074 }
2075 do { /* copy all or what's left */
2076 *q++ = *r++;
2077 } while (--c);
2078 break;
2079 }
2080 else if ((e & 64) == 0)
2081 e = (t = t->next + ((uInt)b & inflate_mask[e]))->exop;
2082 else
2083 {
2084 z->msg = "invalid distance code";
2085 UNGRAB
2086 UPDATE
2087 return Z_DATA_ERROR;
2088 }
2089 } while (1);
2090 break;
2091 }
2092 if ((e & 64) == 0)
2093 {
2094 if ((e = (t = t->next + ((uInt)b & inflate_mask[e]))->exop) == 0)
2095 {
2096 DUMPBITS(t->bits)
2097 Tracevv((stderr, t->base >= 0x20 && t->base < 0x7f ?
2098 "inflate: * literal '%c'\n" :
2099 "inflate: * literal 0x%02x\n", t->base));
2100 *q++ = (Byte)t->base;
2101 m--;
2102 break;
2103 }
2104 }
2105 else if (e & 32)
2106 {
2107 Tracevv((stderr, "inflate: * end of block\n"));
2108 UNGRAB
2109 UPDATE
2110 return Z_STREAM_END;
2111 }
2112 else
2113 {
2114 z->msg = "invalid literal/length code";
2115 UNGRAB
2116 UPDATE
2117 return Z_DATA_ERROR;
2118 }
2119 } while (1);
2120 } while (m >= 258 && n >= 10);
2121
2122 /* not enough input or output--restore pointers and return */
2123 UNGRAB
2124 UPDATE
2125 return Z_OK;
2126}
2127
2128
2129/*+++++*/
2130/* zutil.c -- target dependent utility functions for the compression library
2131 * Copyright (C) 1995 Jean-loup Gailly.
2132 * For conditions of distribution and use, see copyright notice in zlib.h
2133 */
2134
2135/* From: zutil.c,v 1.8 1995/05/03 17:27:12 jloup Exp */
2136
2137char *zlib_version = ZLIB_VERSION;
2138
2139char *z_errmsg[] = {
2140"stream end", /* Z_STREAM_END 1 */
2141"", /* Z_OK 0 */
2142"file error", /* Z_ERRNO (-1) */
2143"stream error", /* Z_STREAM_ERROR (-2) */
2144"data error", /* Z_DATA_ERROR (-3) */
2145"insufficient memory", /* Z_MEM_ERROR (-4) */
2146"buffer error", /* Z_BUF_ERROR (-5) */
2147""};
2148
2149
2150/*+++++*/
2151/* adler32.c -- compute the Adler-32 checksum of a data stream
2152 * Copyright (C) 1995 Mark Adler
2153 * For conditions of distribution and use, see copyright notice in zlib.h
2154 */
2155
2156/* From: adler32.c,v 1.6 1995/05/03 17:27:08 jloup Exp */
2157
2158#define BASE 65521L /* largest prime smaller than 65536 */
2159#define NMAX 5552
2160/* NMAX is the largest n such that 255n(n+1)/2 + (n+1)(BASE-1) <= 2^32-1 */
2161
2162#define DO1(buf) {s1 += *buf++; s2 += s1;}
2163#define DO2(buf) DO1(buf); DO1(buf);
2164#define DO4(buf) DO2(buf); DO2(buf);
2165#define DO8(buf) DO4(buf); DO4(buf);
2166#define DO16(buf) DO8(buf); DO8(buf);
2167
2168/* ========================================================================= */
2169uLong adler32(
2170 uLong adler,
2171 Bytef *buf,
2172 uInt len
2173)
2174{
2175 unsigned long s1 = adler & 0xffff;
2176 unsigned long s2 = (adler >> 16) & 0xffff;
2177 int k;
2178
2179 if (buf == Z_NULL) return 1L;
2180
2181 while (len > 0) {
2182 k = len < NMAX ? len : NMAX;
2183 len -= k;
2184 while (k >= 16) {
2185 DO16(buf);
2186 k -= 16;
2187 }
2188 if (k != 0) do {
2189 DO1(buf);
2190 } while (--k);
2191 s1 %= BASE;
2192 s2 %= BASE;
2193 }
2194 return (s2 << 16) | s1;
2195}
diff --git a/arch/ppc64/boot/zlib.h b/arch/ppc64/boot/zlib.h
deleted file mode 100644
index f0b996c6864f..000000000000
--- a/arch/ppc64/boot/zlib.h
+++ /dev/null
@@ -1,432 +0,0 @@
1/* */
2
3/*
4 * This file is derived from zlib.h and zconf.h from the zlib-0.95
5 * distribution by Jean-loup Gailly and Mark Adler, with some additions
6 * by Paul Mackerras to aid in implementing Deflate compression and
7 * decompression for PPP packets.
8 */
9
10/*
11 * ==FILEVERSION 960122==
12 *
13 * This marker is used by the Linux installation script to determine
14 * whether an up-to-date version of this file is already installed.
15 */
16
17/* zlib.h -- interface of the 'zlib' general purpose compression library
18 version 0.95, Aug 16th, 1995.
19
20 Copyright (C) 1995 Jean-loup Gailly and Mark Adler
21
22 This software is provided 'as-is', without any express or implied
23 warranty. In no event will the authors be held liable for any damages
24 arising from the use of this software.
25
26 Permission is granted to anyone to use this software for any purpose,
27 including commercial applications, and to alter it and redistribute it
28 freely, subject to the following restrictions:
29
30 1. The origin of this software must not be misrepresented; you must not
31 claim that you wrote the original software. If you use this software
32 in a product, an acknowledgment in the product documentation would be
33 appreciated but is not required.
34 2. Altered source versions must be plainly marked as such, and must not be
35 misrepresented as being the original software.
36 3. This notice may not be removed or altered from any source distribution.
37
38 Jean-loup Gailly Mark Adler
39 gzip@prep.ai.mit.edu madler@alumni.caltech.edu
40 */
41
42#ifndef _ZLIB_H
43#define _ZLIB_H
44
45/* #include "zconf.h" */ /* included directly here */
46
47/* zconf.h -- configuration of the zlib compression library
48 * Copyright (C) 1995 Jean-loup Gailly.
49 * For conditions of distribution and use, see copyright notice in zlib.h
50 */
51
52/* From: zconf.h,v 1.12 1995/05/03 17:27:12 jloup Exp */
53
54/*
55 The library does not install any signal handler. It is recommended to
56 add at least a handler for SIGSEGV when decompressing; the library checks
57 the consistency of the input data whenever possible but may go nuts
58 for some forms of corrupted input.
59 */
60
61/*
62 * Compile with -DMAXSEG_64K if the alloc function cannot allocate more
63 * than 64k bytes at a time (needed on systems with 16-bit int).
64 * Compile with -DUNALIGNED_OK if it is OK to access shorts or ints
65 * at addresses which are not a multiple of their size.
66 * Under DOS, -DFAR=far or -DFAR=__far may be needed.
67 */
68
69#ifndef STDC
70# if defined(MSDOS) || defined(__STDC__) || defined(__cplusplus)
71# define STDC
72# endif
73#endif
74
75#ifdef __MWERKS__ /* Metrowerks CodeWarrior declares fileno() in unix.h */
76# include <unix.h>
77#endif
78
79/* Maximum value for memLevel in deflateInit2 */
80#ifndef MAX_MEM_LEVEL
81# ifdef MAXSEG_64K
82# define MAX_MEM_LEVEL 8
83# else
84# define MAX_MEM_LEVEL 9
85# endif
86#endif
87
88#ifndef FAR
89# define FAR
90#endif
91
92/* Maximum value for windowBits in deflateInit2 and inflateInit2 */
93#ifndef MAX_WBITS
94# define MAX_WBITS 15 /* 32K LZ77 window */
95#endif
96
97/* The memory requirements for deflate are (in bytes):
98 1 << (windowBits+2) + 1 << (memLevel+9)
99 that is: 128K for windowBits=15 + 128K for memLevel = 8 (default values)
100 plus a few kilobytes for small objects. For example, if you want to reduce
101 the default memory requirements from 256K to 128K, compile with
102 make CFLAGS="-O -DMAX_WBITS=14 -DMAX_MEM_LEVEL=7"
103 Of course this will generally degrade compression (there's no free lunch).
104
105 The memory requirements for inflate are (in bytes) 1 << windowBits
106 that is, 32K for windowBits=15 (default value) plus a few kilobytes
107 for small objects.
108*/
109
110 /* Type declarations */
111
112#ifndef OF /* function prototypes */
113# ifdef STDC
114# define OF(args) args
115# else
116# define OF(args) ()
117# endif
118#endif
119
120typedef unsigned char Byte; /* 8 bits */
121typedef unsigned int uInt; /* 16 bits or more */
122typedef unsigned long uLong; /* 32 bits or more */
123
124typedef Byte FAR Bytef;
125typedef char FAR charf;
126typedef int FAR intf;
127typedef uInt FAR uIntf;
128typedef uLong FAR uLongf;
129
130#ifdef STDC
131 typedef void FAR *voidpf;
132 typedef void *voidp;
133#else
134 typedef Byte FAR *voidpf;
135 typedef Byte *voidp;
136#endif
137
138/* end of original zconf.h */
139
140#define ZLIB_VERSION "0.95P"
141
142/*
143 The 'zlib' compression library provides in-memory compression and
144 decompression functions, including integrity checks of the uncompressed
145 data. This version of the library supports only one compression method
146 (deflation) but other algorithms may be added later and will have the same
147 stream interface.
148
149 For compression the application must provide the output buffer and
150 may optionally provide the input buffer for optimization. For decompression,
151 the application must provide the input buffer and may optionally provide
152 the output buffer for optimization.
153
154 Compression can be done in a single step if the buffers are large
155 enough (for example if an input file is mmap'ed), or can be done by
156 repeated calls of the compression function. In the latter case, the
157 application must provide more input and/or consume the output
158 (providing more output space) before each call.
159*/
160
161typedef voidpf (*alloc_func) OF((voidpf opaque, uInt items, uInt size));
162typedef void (*free_func) OF((voidpf opaque, voidpf address, uInt nbytes));
163
164struct internal_state;
165
166typedef struct z_stream_s {
167 Bytef *next_in; /* next input byte */
168 uInt avail_in; /* number of bytes available at next_in */
169 uLong total_in; /* total nb of input bytes read so far */
170
171 Bytef *next_out; /* next output byte should be put there */
172 uInt avail_out; /* remaining free space at next_out */
173 uLong total_out; /* total nb of bytes output so far */
174
175 char *msg; /* last error message, NULL if no error */
176 struct internal_state FAR *state; /* not visible by applications */
177
178 alloc_func zalloc; /* used to allocate the internal state */
179 free_func zfree; /* used to free the internal state */
180 voidp opaque; /* private data object passed to zalloc and zfree */
181
182 Byte data_type; /* best guess about the data type: ascii or binary */
183
184} z_stream;
185
186/*
187 The application must update next_in and avail_in when avail_in has
188 dropped to zero. It must update next_out and avail_out when avail_out
189 has dropped to zero. The application must initialize zalloc, zfree and
190 opaque before calling the init function. All other fields are set by the
191 compression library and must not be updated by the application.
192
193 The opaque value provided by the application will be passed as the first
194 parameter for calls of zalloc and zfree. This can be useful for custom
195 memory management. The compression library attaches no meaning to the
196 opaque value.
197
198 zalloc must return Z_NULL if there is not enough memory for the object.
199 On 16-bit systems, the functions zalloc and zfree must be able to allocate
200 exactly 65536 bytes, but will not be required to allocate more than this
201 if the symbol MAXSEG_64K is defined (see zconf.h). WARNING: On MSDOS,
202 pointers returned by zalloc for objects of exactly 65536 bytes *must*
203 have their offset normalized to zero. The default allocation function
204 provided by this library ensures this (see zutil.c). To reduce memory
205 requirements and avoid any allocation of 64K objects, at the expense of
206 compression ratio, compile the library with -DMAX_WBITS=14 (see zconf.h).
207
208 The fields total_in and total_out can be used for statistics or
209 progress reports. After compression, total_in holds the total size of
210 the uncompressed data and may be saved for use in the decompressor
211 (particularly if the decompressor wants to decompress everything in
212 a single step).
213*/
214
215 /* constants */
216
217#define Z_NO_FLUSH 0
218#define Z_PARTIAL_FLUSH 1
219#define Z_FULL_FLUSH 2
220#define Z_SYNC_FLUSH 3 /* experimental: partial_flush + byte align */
221#define Z_FINISH 4
222#define Z_PACKET_FLUSH 5
223/* See deflate() below for the usage of these constants */
224
225#define Z_OK 0
226#define Z_STREAM_END 1
227#define Z_ERRNO (-1)
228#define Z_STREAM_ERROR (-2)
229#define Z_DATA_ERROR (-3)
230#define Z_MEM_ERROR (-4)
231#define Z_BUF_ERROR (-5)
232/* error codes for the compression/decompression functions */
233
234#define Z_BEST_SPEED 1
235#define Z_BEST_COMPRESSION 9
236#define Z_DEFAULT_COMPRESSION (-1)
237/* compression levels */
238
239#define Z_FILTERED 1
240#define Z_HUFFMAN_ONLY 2
241#define Z_DEFAULT_STRATEGY 0
242
243#define Z_BINARY 0
244#define Z_ASCII 1
245#define Z_UNKNOWN 2
246/* Used to set the data_type field */
247
248#define Z_NULL 0 /* for initializing zalloc, zfree, opaque */
249
250extern char *zlib_version;
251/* The application can compare zlib_version and ZLIB_VERSION for consistency.
252 If the first character differs, the library code actually used is
253 not compatible with the zlib.h header file used by the application.
254 */
255
256 /* basic functions */
257
258extern int inflateInit OF((z_stream *strm));
259/*
260 Initializes the internal stream state for decompression. The fields
261 zalloc and zfree must be initialized before by the caller. If zalloc and
262 zfree are set to Z_NULL, inflateInit updates them to use default allocation
263 functions.
264
265 inflateInit returns Z_OK if success, Z_MEM_ERROR if there was not
266 enough memory. msg is set to null if there is no error message.
267 inflateInit does not perform any decompression: this will be done by
268 inflate().
269*/
270
271
272extern int inflate OF((z_stream *strm, int flush));
273/*
274 Performs one or both of the following actions:
275
276 - Decompress more input starting at next_in and update next_in and avail_in
277 accordingly. If not all input can be processed (because there is not
278 enough room in the output buffer), next_in is updated and processing
279 will resume at this point for the next call of inflate().
280
281 - Provide more output starting at next_out and update next_out and avail_out
282 accordingly. inflate() always provides as much output as possible
283 (until there is no more input data or no more space in the output buffer).
284
285 Before the call of inflate(), the application should ensure that at least
286 one of the actions is possible, by providing more input and/or consuming
287 more output, and updating the next_* and avail_* values accordingly.
288 The application can consume the uncompressed output when it wants, for
289 example when the output buffer is full (avail_out == 0), or after each
290 call of inflate().
291
292 If the parameter flush is set to Z_PARTIAL_FLUSH or Z_PACKET_FLUSH,
293 inflate flushes as much output as possible to the output buffer. The
294 flushing behavior of inflate is not specified for values of the flush
295 parameter other than Z_PARTIAL_FLUSH, Z_PACKET_FLUSH or Z_FINISH, but the
296 current implementation actually flushes as much output as possible
297 anyway. For Z_PACKET_FLUSH, inflate checks that once all the input data
298 has been consumed, it is expecting to see the length field of a stored
299 block; if not, it returns Z_DATA_ERROR.
300
301 inflate() should normally be called until it returns Z_STREAM_END or an
302 error. However if all decompression is to be performed in a single step
303 (a single call of inflate), the parameter flush should be set to
304 Z_FINISH. In this case all pending input is processed and all pending
305 output is flushed; avail_out must be large enough to hold all the
306 uncompressed data. (The size of the uncompressed data may have been saved
307 by the compressor for this purpose.) The next operation on this stream must
308 be inflateEnd to deallocate the decompression state. The use of Z_FINISH
309 is never required, but can be used to inform inflate that a faster routine
310 may be used for the single inflate() call.
311
312 inflate() returns Z_OK if some progress has been made (more input
313 processed or more output produced), Z_STREAM_END if the end of the
314 compressed data has been reached and all uncompressed output has been
315 produced, Z_DATA_ERROR if the input data was corrupted, Z_STREAM_ERROR if
316 the stream structure was inconsistent (for example if next_in or next_out
317 was NULL), Z_MEM_ERROR if there was not enough memory, Z_BUF_ERROR if no
318 progress is possible or if there was not enough room in the output buffer
319 when Z_FINISH is used. In the Z_DATA_ERROR case, the application may then
320 call inflateSync to look for a good compression block. */
321
322
323extern int inflateEnd OF((z_stream *strm));
324/*
325 All dynamically allocated data structures for this stream are freed.
326 This function discards any unprocessed input and does not flush any
327 pending output.
328
329 inflateEnd returns Z_OK if success, Z_STREAM_ERROR if the stream state
330 was inconsistent. In the error case, msg may be set but then points to a
331 static string (which must not be deallocated).
332*/
333
334 /* advanced functions */
335
336extern int inflateInit2 OF((z_stream *strm,
337 int windowBits));
338/*
339 This is another version of inflateInit with more compression options. The
340 fields next_out, zalloc and zfree must be initialized before by the caller.
341
342 The windowBits parameter is the base two logarithm of the maximum window
343 size (the size of the history buffer). It should be in the range 8..15 for
344 this version of the library (the value 16 will be allowed soon). The
345 default value is 15 if inflateInit is used instead. If a compressed stream
346 with a larger window size is given as input, inflate() will return with
347 the error code Z_DATA_ERROR instead of trying to allocate a larger window.
348
349 If next_out is not null, the library will use this buffer for the history
350 buffer; the buffer must either be large enough to hold the entire output
351 data, or have at least 1<<windowBits bytes. If next_out is null, the
352 library will allocate its own buffer (and leave next_out null). next_in
353 need not be provided here but must be provided by the application for the
354 next call of inflate().
355
356 If the history buffer is provided by the application, next_out must
357 never be changed by the application since the decompressor maintains
358 history information inside this buffer from call to call; the application
359 can only reset next_out to the beginning of the history buffer when
360 avail_out is zero and all output has been consumed.
361
362 inflateInit2 returns Z_OK if success, Z_MEM_ERROR if there was
363 not enough memory, Z_STREAM_ERROR if a parameter is invalid (such as
364 windowBits < 8). msg is set to null if there is no error message.
365 inflateInit2 does not perform any decompression: this will be done by
366 inflate().
367*/
368
369extern int inflateSync OF((z_stream *strm));
370/*
371 Skips invalid compressed data until the special marker (see deflate()
372 above) can be found, or until all available input is skipped. No output
373 is provided.
374
375 inflateSync returns Z_OK if the special marker has been found, Z_BUF_ERROR
376 if no more input was provided, Z_DATA_ERROR if no marker has been found,
377 or Z_STREAM_ERROR if the stream structure was inconsistent. In the success
378 case, the application may save the current current value of total_in which
379 indicates where valid compressed data was found. In the error case, the
380 application may repeatedly call inflateSync, providing more input each time,
381 until success or end of the input data.
382*/
383
384extern int inflateReset OF((z_stream *strm));
385/*
386 This function is equivalent to inflateEnd followed by inflateInit,
387 but does not free and reallocate all the internal decompression state.
388 The stream will keep attributes that may have been set by inflateInit2.
389
390 inflateReset returns Z_OK if success, or Z_STREAM_ERROR if the source
391 stream state was inconsistent (such as zalloc or state being NULL).
392*/
393
394extern int inflateIncomp OF((z_stream *strm));
395/*
396 This function adds the data at next_in (avail_in bytes) to the output
397 history without performing any output. There must be no pending output,
398 and the decompressor must be expecting to see the start of a block.
399 Calling this function is equivalent to decompressing a stored block
400 containing the data at next_in (except that the data is not output).
401*/
402
403 /* checksum functions */
404
405/*
406 This function is not related to compression but is exported
407 anyway because it might be useful in applications using the
408 compression library.
409*/
410
411extern uLong adler32 OF((uLong adler, Bytef *buf, uInt len));
412
413/*
414 Update a running Adler-32 checksum with the bytes buf[0..len-1] and
415 return the updated checksum. If buf is NULL, this function returns
416 the required initial value for the checksum.
417 An Adler-32 checksum is almost as reliable as a CRC32 but can be computed
418 much faster. Usage example:
419
420 uLong adler = adler32(0L, Z_NULL, 0);
421
422 while (read_buffer(buffer, length) != EOF) {
423 adler = adler32(adler, buffer, length);
424 }
425 if (adler != original_adler) error();
426*/
427
428#ifndef _Z_UTIL_H
429 struct internal_state {int dummy;}; /* hack for buggy compilers */
430#endif
431
432#endif /* _ZLIB_H */
diff --git a/arch/ppc64/defconfig b/arch/ppc64/defconfig
index 37c157c93cef..e79fd60bc122 100644
--- a/arch/ppc64/defconfig
+++ b/arch/ppc64/defconfig
@@ -1318,7 +1318,7 @@ CONFIG_MSDOS_PARTITION=y
1318# 1318#
1319CONFIG_NLS=y 1319CONFIG_NLS=y
1320CONFIG_NLS_DEFAULT="iso8859-1" 1320CONFIG_NLS_DEFAULT="iso8859-1"
1321CONFIG_NLS_CODEPAGE_437=m 1321CONFIG_NLS_CODEPAGE_437=y
1322CONFIG_NLS_CODEPAGE_737=m 1322CONFIG_NLS_CODEPAGE_737=m
1323CONFIG_NLS_CODEPAGE_775=m 1323CONFIG_NLS_CODEPAGE_775=m
1324CONFIG_NLS_CODEPAGE_850=m 1324CONFIG_NLS_CODEPAGE_850=m
@@ -1342,7 +1342,7 @@ CONFIG_NLS_ISO8859_8=m
1342CONFIG_NLS_CODEPAGE_1250=m 1342CONFIG_NLS_CODEPAGE_1250=m
1343CONFIG_NLS_CODEPAGE_1251=m 1343CONFIG_NLS_CODEPAGE_1251=m
1344CONFIG_NLS_ASCII=m 1344CONFIG_NLS_ASCII=m
1345CONFIG_NLS_ISO8859_1=m 1345CONFIG_NLS_ISO8859_1=y
1346CONFIG_NLS_ISO8859_2=m 1346CONFIG_NLS_ISO8859_2=m
1347CONFIG_NLS_ISO8859_3=m 1347CONFIG_NLS_ISO8859_3=m
1348CONFIG_NLS_ISO8859_4=m 1348CONFIG_NLS_ISO8859_4=m
diff --git a/arch/ppc64/kernel/HvLpEvent.c b/arch/ppc64/kernel/HvLpEvent.c
deleted file mode 100644
index 90032b138902..000000000000
--- a/arch/ppc64/kernel/HvLpEvent.c
+++ /dev/null
@@ -1,88 +0,0 @@
1/*
2 * Copyright 2001 Mike Corrigan IBM Corp
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 */
9#include <linux/stddef.h>
10#include <linux/kernel.h>
11#include <linux/module.h>
12#include <asm/system.h>
13#include <asm/iSeries/HvLpEvent.h>
14#include <asm/iSeries/HvCallEvent.h>
15#include <asm/iSeries/ItLpNaca.h>
16
17/* Array of LpEvent handler functions */
18LpEventHandler lpEventHandler[HvLpEvent_Type_NumTypes];
19unsigned lpEventHandlerPaths[HvLpEvent_Type_NumTypes];
20
21/* Register a handler for an LpEvent type */
22
23int HvLpEvent_registerHandler( HvLpEvent_Type eventType, LpEventHandler handler )
24{
25 int rc = 1;
26 if ( eventType < HvLpEvent_Type_NumTypes ) {
27 lpEventHandler[eventType] = handler;
28 rc = 0;
29 }
30 return rc;
31
32}
33
34int HvLpEvent_unregisterHandler( HvLpEvent_Type eventType )
35{
36 int rc = 1;
37
38 might_sleep();
39
40 if ( eventType < HvLpEvent_Type_NumTypes ) {
41 if ( !lpEventHandlerPaths[eventType] ) {
42 lpEventHandler[eventType] = NULL;
43 rc = 0;
44
45 /* We now sleep until all other CPUs have scheduled. This ensures that
46 * the deletion is seen by all other CPUs, and that the deleted handler
47 * isn't still running on another CPU when we return. */
48 synchronize_rcu();
49 }
50 }
51 return rc;
52}
53EXPORT_SYMBOL(HvLpEvent_registerHandler);
54EXPORT_SYMBOL(HvLpEvent_unregisterHandler);
55
56/* (lpIndex is the partition index of the target partition.
57 * needed only for VirtualIo, VirtualLan and SessionMgr. Zero
58 * indicates to use our partition index - for the other types)
59 */
60int HvLpEvent_openPath( HvLpEvent_Type eventType, HvLpIndex lpIndex )
61{
62 int rc = 1;
63 if ( eventType < HvLpEvent_Type_NumTypes &&
64 lpEventHandler[eventType] ) {
65 if ( lpIndex == 0 )
66 lpIndex = itLpNaca.xLpIndex;
67 HvCallEvent_openLpEventPath( lpIndex, eventType );
68 ++lpEventHandlerPaths[eventType];
69 rc = 0;
70 }
71 return rc;
72}
73
74int HvLpEvent_closePath( HvLpEvent_Type eventType, HvLpIndex lpIndex )
75{
76 int rc = 1;
77 if ( eventType < HvLpEvent_Type_NumTypes &&
78 lpEventHandler[eventType] &&
79 lpEventHandlerPaths[eventType] ) {
80 if ( lpIndex == 0 )
81 lpIndex = itLpNaca.xLpIndex;
82 HvCallEvent_closeLpEventPath( lpIndex, eventType );
83 --lpEventHandlerPaths[eventType];
84 rc = 0;
85 }
86 return rc;
87}
88
diff --git a/arch/ppc64/kernel/Makefile b/arch/ppc64/kernel/Makefile
index ae60eb1193c6..327c08ce4291 100644
--- a/arch/ppc64/kernel/Makefile
+++ b/arch/ppc64/kernel/Makefile
@@ -2,36 +2,34 @@
2# Makefile for the linux ppc64 kernel. 2# Makefile for the linux ppc64 kernel.
3# 3#
4 4
5ifneq ($(CONFIG_PPC_MERGE),y)
6
5EXTRA_CFLAGS += -mno-minimal-toc 7EXTRA_CFLAGS += -mno-minimal-toc
6extra-y := head.o vmlinux.lds 8extra-y := head.o vmlinux.lds
7 9
8obj-y := setup.o entry.o traps.o irq.o idle.o dma.o \ 10obj-y := misc.o prom.o
9 time.o process.o signal.o syscalls.o misc.o ptrace.o \ 11
10 align.o semaphore.o bitops.o pacaData.o \ 12endif
11 udbg.o binfmt_elf32.o sys_ppc32.o ioctl32.o \
12 ptrace32.o signal32.o rtc.o init_task.o \
13 lmb.o cputable.o cpu_setup_power4.o idle_power4.o \
14 iommu.o sysfs.o vdso.o pmc.o firmware.o
15obj-y += vdso32/ vdso64/
16 13
17obj-$(CONFIG_PPC_OF) += of_device.o 14obj-y += irq.o idle.o dma.o \
15 signal.o \
16 align.o bitops.o pacaData.o \
17 udbg.o ioctl32.o \
18 rtc.o \
19 cpu_setup_power4.o \
20 iommu.o sysfs.o vdso.o firmware.o
21obj-y += vdso32/ vdso64/
18 22
19pci-obj-$(CONFIG_PPC_ISERIES) += iSeries_pci.o iSeries_irq.o \
20 iSeries_VpdInfo.o
21pci-obj-$(CONFIG_PPC_MULTIPLATFORM) += pci_dn.o pci_direct_iommu.o 23pci-obj-$(CONFIG_PPC_MULTIPLATFORM) += pci_dn.o pci_direct_iommu.o
22 24
23obj-$(CONFIG_PCI) += pci.o pci_iommu.o iomap.o $(pci-obj-y) 25obj-$(CONFIG_PCI) += pci.o pci_iommu.o iomap.o $(pci-obj-y)
24 26
25obj-$(CONFIG_PPC_ISERIES) += HvCall.o HvLpConfig.o LparData.o \ 27obj-$(CONFIG_PPC_MULTIPLATFORM) += nvram.o
26 iSeries_setup.o ItLpQueue.o hvCall.o \ 28ifneq ($(CONFIG_PPC_MERGE),y)
27 mf.o HvLpEvent.o iSeries_proc.o iSeries_htab.o \ 29obj-$(CONFIG_PPC_MULTIPLATFORM) += prom_init.o
28 iSeries_iommu.o 30endif
29
30obj-$(CONFIG_PPC_MULTIPLATFORM) += nvram.o i8259.o prom_init.o prom.o
31 31
32obj-$(CONFIG_PPC_PSERIES) += pSeries_pci.o pSeries_lpar.o pSeries_hvCall.o \ 32obj-$(CONFIG_PPC_PSERIES) += rtasd.o udbg_16550.o
33 pSeries_nvram.o rtasd.o ras.o pSeries_reconfig.o \
34 pSeries_setup.o pSeries_iommu.o udbg_16550.o
35 33
36obj-$(CONFIG_PPC_BPA) += bpa_setup.o bpa_iommu.o bpa_nvram.o \ 34obj-$(CONFIG_PPC_BPA) += bpa_setup.o bpa_iommu.o bpa_nvram.o \
37 bpa_iic.o spider-pic.o 35 bpa_iic.o spider-pic.o
@@ -41,45 +39,36 @@ obj-$(CONFIG_EEH) += eeh.o
41obj-$(CONFIG_PROC_FS) += proc_ppc64.o 39obj-$(CONFIG_PROC_FS) += proc_ppc64.o
42obj-$(CONFIG_RTAS_FLASH) += rtas_flash.o 40obj-$(CONFIG_RTAS_FLASH) += rtas_flash.o
43obj-$(CONFIG_SMP) += smp.o 41obj-$(CONFIG_SMP) += smp.o
44obj-$(CONFIG_MODULES) += module.o ppc_ksyms.o 42obj-$(CONFIG_MODULES) += module.o
45obj-$(CONFIG_PPC_RTAS) += rtas.o rtas_pci.o 43ifneq ($(CONFIG_PPC_MERGE),y)
44obj-$(CONFIG_MODULES) += ppc_ksyms.o
45endif
46obj-$(CONFIG_PPC_RTAS) += rtas_pci.o
46obj-$(CONFIG_RTAS_PROC) += rtas-proc.o 47obj-$(CONFIG_RTAS_PROC) += rtas-proc.o
47obj-$(CONFIG_SCANLOG) += scanlog.o 48obj-$(CONFIG_SCANLOG) += scanlog.o
48obj-$(CONFIG_VIOPATH) += viopath.o
49obj-$(CONFIG_LPARCFG) += lparcfg.o 49obj-$(CONFIG_LPARCFG) += lparcfg.o
50obj-$(CONFIG_HVC_CONSOLE) += hvconsole.o 50obj-$(CONFIG_HVC_CONSOLE) += hvconsole.o
51ifneq ($(CONFIG_PPC_MERGE),y)
51obj-$(CONFIG_BOOTX_TEXT) += btext.o 52obj-$(CONFIG_BOOTX_TEXT) += btext.o
53endif
52obj-$(CONFIG_HVCS) += hvcserver.o 54obj-$(CONFIG_HVCS) += hvcserver.o
53 55
54vio-obj-$(CONFIG_PPC_PSERIES) += pSeries_vio.o 56obj-$(CONFIG_PPC_PMAC) += udbg_scc.o
55vio-obj-$(CONFIG_PPC_ISERIES) += iSeries_vio.o
56obj-$(CONFIG_IBMVIO) += vio.o $(vio-obj-y)
57obj-$(CONFIG_XICS) += xics.o
58obj-$(CONFIG_MPIC) += mpic.o
59 57
60obj-$(CONFIG_PPC_PMAC) += pmac_setup.o pmac_feature.o pmac_pci.o \ 58obj-$(CONFIG_PPC_MAPLE) += udbg_16550.o
61 pmac_time.o pmac_nvram.o pmac_low_i2c.o \
62 udbg_scc.o
63
64obj-$(CONFIG_PPC_MAPLE) += maple_setup.o maple_pci.o maple_time.o \
65 udbg_16550.o
66
67obj-$(CONFIG_U3_DART) += u3_iommu.o
68 59
69ifdef CONFIG_SMP 60ifdef CONFIG_SMP
70obj-$(CONFIG_PPC_PMAC) += pmac_smp.o smp-tbsync.o 61obj-$(CONFIG_PPC_PMAC) += smp-tbsync.o
71obj-$(CONFIG_PPC_ISERIES) += iSeries_smp.o
72obj-$(CONFIG_PPC_PSERIES) += pSeries_smp.o
73obj-$(CONFIG_PPC_BPA) += pSeries_smp.o
74obj-$(CONFIG_PPC_MAPLE) += smp-tbsync.o 62obj-$(CONFIG_PPC_MAPLE) += smp-tbsync.o
75endif 63endif
76 64
77obj-$(CONFIG_ALTIVEC) += vecemu.o vector.o
78obj-$(CONFIG_KPROBES) += kprobes.o 65obj-$(CONFIG_KPROBES) += kprobes.o
79 66
80CFLAGS_ioctl32.o += -Ifs/ 67CFLAGS_ioctl32.o += -Ifs/
81 68
69ifneq ($(CONFIG_PPC_MERGE),y)
82ifeq ($(CONFIG_PPC_ISERIES),y) 70ifeq ($(CONFIG_PPC_ISERIES),y)
83arch/ppc64/kernel/head.o: arch/ppc64/kernel/lparmap.s 71arch/ppc64/kernel/head.o: arch/powerpc/kernel/lparmap.s
84AFLAGS_head.o += -Iarch/ppc64/kernel 72AFLAGS_head.o += -Iarch/powerpc/kernel
73endif
85endif 74endif
diff --git a/arch/ppc64/kernel/align.c b/arch/ppc64/kernel/align.c
index 330e7ef81427..256d5b592aa1 100644
--- a/arch/ppc64/kernel/align.c
+++ b/arch/ppc64/kernel/align.c
@@ -313,7 +313,7 @@ fix_alignment(struct pt_regs *regs)
313 /* Doing stfs, have to convert to single */ 313 /* Doing stfs, have to convert to single */
314 preempt_disable(); 314 preempt_disable();
315 enable_kernel_fp(); 315 enable_kernel_fp();
316 cvt_df(&current->thread.fpr[reg], (float *)&data.v[4], &current->thread.fpscr); 316 cvt_df(&current->thread.fpr[reg], (float *)&data.v[4], &current->thread);
317 disable_kernel_fp(); 317 disable_kernel_fp();
318 preempt_enable(); 318 preempt_enable();
319 } 319 }
@@ -349,7 +349,7 @@ fix_alignment(struct pt_regs *regs)
349 /* Doing lfs, have to convert to double */ 349 /* Doing lfs, have to convert to double */
350 preempt_disable(); 350 preempt_disable();
351 enable_kernel_fp(); 351 enable_kernel_fp();
352 cvt_fd((float *)&data.v[4], &current->thread.fpr[reg], &current->thread.fpscr); 352 cvt_fd((float *)&data.v[4], &current->thread.fpr[reg], &current->thread);
353 disable_kernel_fp(); 353 disable_kernel_fp();
354 preempt_enable(); 354 preempt_enable();
355 } 355 }
diff --git a/arch/ppc64/kernel/asm-offsets.c b/arch/ppc64/kernel/asm-offsets.c
index 1ff4fa05a973..5e6046cb414e 100644
--- a/arch/ppc64/kernel/asm-offsets.c
+++ b/arch/ppc64/kernel/asm-offsets.c
@@ -46,8 +46,6 @@
46int main(void) 46int main(void)
47{ 47{
48 /* thread struct on stack */ 48 /* thread struct on stack */
49 DEFINE(THREAD_SHIFT, THREAD_SHIFT);
50 DEFINE(THREAD_SIZE, THREAD_SIZE);
51 DEFINE(TI_FLAGS, offsetof(struct thread_info, flags)); 49 DEFINE(TI_FLAGS, offsetof(struct thread_info, flags));
52 DEFINE(TI_PREEMPT, offsetof(struct thread_info, preempt_count)); 50 DEFINE(TI_PREEMPT, offsetof(struct thread_info, preempt_count));
53 DEFINE(TI_SC_NOERR, offsetof(struct thread_info, syscall_noerror)); 51 DEFINE(TI_SC_NOERR, offsetof(struct thread_info, syscall_noerror));
@@ -77,6 +75,7 @@ int main(void)
77 DEFINE(ICACHEL1LOGLINESIZE, offsetof(struct ppc64_caches, log_iline_size)); 75 DEFINE(ICACHEL1LOGLINESIZE, offsetof(struct ppc64_caches, log_iline_size));
78 DEFINE(ICACHEL1LINESPERPAGE, offsetof(struct ppc64_caches, ilines_per_page)); 76 DEFINE(ICACHEL1LINESPERPAGE, offsetof(struct ppc64_caches, ilines_per_page));
79 DEFINE(PLATFORM, offsetof(struct systemcfg, platform)); 77 DEFINE(PLATFORM, offsetof(struct systemcfg, platform));
78 DEFINE(PLATFORM_LPAR, PLATFORM_LPAR);
80 79
81 /* paca */ 80 /* paca */
82 DEFINE(PACA_SIZE, sizeof(struct paca_struct)); 81 DEFINE(PACA_SIZE, sizeof(struct paca_struct));
diff --git a/arch/ppc64/kernel/bpa_iommu.c b/arch/ppc64/kernel/bpa_iommu.c
index 5f2460090e03..da1b4b7a3269 100644
--- a/arch/ppc64/kernel/bpa_iommu.c
+++ b/arch/ppc64/kernel/bpa_iommu.c
@@ -39,8 +39,8 @@
39#include <asm/pmac_feature.h> 39#include <asm/pmac_feature.h>
40#include <asm/abs_addr.h> 40#include <asm/abs_addr.h>
41#include <asm/system.h> 41#include <asm/system.h>
42#include <asm/ppc-pci.h>
42 43
43#include "pci.h"
44#include "bpa_iommu.h" 44#include "bpa_iommu.h"
45 45
46static inline unsigned long 46static inline unsigned long
diff --git a/arch/ppc64/kernel/bpa_setup.c b/arch/ppc64/kernel/bpa_setup.c
index 57b3db66f458..c2dc8f282eb8 100644
--- a/arch/ppc64/kernel/bpa_setup.c
+++ b/arch/ppc64/kernel/bpa_setup.c
@@ -43,8 +43,9 @@
43#include <asm/time.h> 43#include <asm/time.h>
44#include <asm/nvram.h> 44#include <asm/nvram.h>
45#include <asm/cputable.h> 45#include <asm/cputable.h>
46#include <asm/ppc-pci.h>
47#include <asm/irq.h>
46 48
47#include "pci.h"
48#include "bpa_iic.h" 49#include "bpa_iic.h"
49#include "bpa_iommu.h" 50#include "bpa_iommu.h"
50 51
@@ -54,7 +55,7 @@
54#define DBG(fmt...) 55#define DBG(fmt...)
55#endif 56#endif
56 57
57void bpa_get_cpuinfo(struct seq_file *m) 58void bpa_show_cpuinfo(struct seq_file *m)
58{ 59{
59 struct device_node *root; 60 struct device_node *root;
60 const char *model = ""; 61 const char *model = "";
@@ -128,7 +129,7 @@ struct machdep_calls __initdata bpa_md = {
128 .probe = bpa_probe, 129 .probe = bpa_probe,
129 .setup_arch = bpa_setup_arch, 130 .setup_arch = bpa_setup_arch,
130 .init_early = bpa_init_early, 131 .init_early = bpa_init_early,
131 .get_cpuinfo = bpa_get_cpuinfo, 132 .show_cpuinfo = bpa_show_cpuinfo,
132 .restart = rtas_restart, 133 .restart = rtas_restart,
133 .power_off = rtas_power_off, 134 .power_off = rtas_power_off,
134 .halt = rtas_halt, 135 .halt = rtas_halt,
diff --git a/arch/ppc64/kernel/btext.c b/arch/ppc64/kernel/btext.c
index b6fbfbe9032d..506a37885c5c 100644
--- a/arch/ppc64/kernel/btext.c
+++ b/arch/ppc64/kernel/btext.c
@@ -18,6 +18,7 @@
18#include <asm/io.h> 18#include <asm/io.h>
19#include <asm/lmb.h> 19#include <asm/lmb.h>
20#include <asm/processor.h> 20#include <asm/processor.h>
21#include <asm/udbg.h>
21 22
22#undef NO_SCROLL 23#undef NO_SCROLL
23 24
@@ -131,6 +132,47 @@ int btext_initialize(struct device_node *np)
131 return 0; 132 return 0;
132} 133}
133 134
135static void btext_putc(unsigned char c)
136{
137 btext_drawchar(c);
138}
139
140void __init init_boot_display(void)
141{
142 char *name;
143 struct device_node *np = NULL;
144 int rc = -ENODEV;
145
146 printk("trying to initialize btext ...\n");
147
148 name = (char *)get_property(of_chosen, "linux,stdout-path", NULL);
149 if (name != NULL) {
150 np = of_find_node_by_path(name);
151 if (np != NULL) {
152 if (strcmp(np->type, "display") != 0) {
153 printk("boot stdout isn't a display !\n");
154 of_node_put(np);
155 np = NULL;
156 }
157 }
158 }
159 if (np)
160 rc = btext_initialize(np);
161 if (rc) {
162 for (np = NULL; (np = of_find_node_by_type(np, "display"));) {
163 if (get_property(np, "linux,opened", NULL)) {
164 printk("trying %s ...\n", np->full_name);
165 rc = btext_initialize(np);
166 printk("result: %d\n", rc);
167 }
168 if (rc == 0)
169 break;
170 }
171 }
172 if (rc == 0 && udbg_putc == NULL)
173 udbg_putc = btext_putc;
174}
175
134 176
135/* Calc the base address of a given point (x,y) */ 177/* Calc the base address of a given point (x,y) */
136static unsigned char * calc_base(int x, int y) 178static unsigned char * calc_base(int x, int y)
diff --git a/arch/ppc64/kernel/cputable.c b/arch/ppc64/kernel/cputable.c
deleted file mode 100644
index 8831a28c3c4e..000000000000
--- a/arch/ppc64/kernel/cputable.c
+++ /dev/null
@@ -1,308 +0,0 @@
1/*
2 * arch/ppc64/kernel/cputable.c
3 *
4 * Copyright (C) 2001 Ben. Herrenschmidt (benh@kernel.crashing.org)
5 *
6 * Modifications for ppc64:
7 * Copyright (C) 2003 Dave Engebretsen <engebret@us.ibm.com>
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; either version
12 * 2 of the License, or (at your option) any later version.
13 */
14
15#include <linux/config.h>
16#include <linux/string.h>
17#include <linux/sched.h>
18#include <linux/threads.h>
19#include <linux/init.h>
20#include <linux/module.h>
21
22#include <asm/oprofile_impl.h>
23#include <asm/cputable.h>
24
25struct cpu_spec* cur_cpu_spec = NULL;
26EXPORT_SYMBOL(cur_cpu_spec);
27
28/* NOTE:
29 * Unlike ppc32, ppc64 will only call this once for the boot CPU, it's
30 * the responsibility of the appropriate CPU save/restore functions to
31 * eventually copy these settings over. Those save/restore aren't yet
32 * part of the cputable though. That has to be fixed for both ppc32
33 * and ppc64
34 */
35extern void __setup_cpu_power3(unsigned long offset, struct cpu_spec* spec);
36extern void __setup_cpu_power4(unsigned long offset, struct cpu_spec* spec);
37extern void __setup_cpu_ppc970(unsigned long offset, struct cpu_spec* spec);
38extern void __setup_cpu_be(unsigned long offset, struct cpu_spec* spec);
39
40
41/* We only set the altivec features if the kernel was compiled with altivec
42 * support
43 */
44#ifdef CONFIG_ALTIVEC
45#define CPU_FTR_ALTIVEC_COMP CPU_FTR_ALTIVEC
46#define PPC_FEATURE_HAS_ALTIVEC_COMP PPC_FEATURE_HAS_ALTIVEC
47#else
48#define CPU_FTR_ALTIVEC_COMP 0
49#define PPC_FEATURE_HAS_ALTIVEC_COMP 0
50#endif
51
52struct cpu_spec cpu_specs[] = {
53 { /* Power3 */
54 .pvr_mask = 0xffff0000,
55 .pvr_value = 0x00400000,
56 .cpu_name = "POWER3 (630)",
57 .cpu_features = CPU_FTR_SPLIT_ID_CACHE |
58 CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE | CPU_FTR_IABR,
59 .cpu_user_features = COMMON_USER_PPC64,
60 .icache_bsize = 128,
61 .dcache_bsize = 128,
62 .num_pmcs = 8,
63 .cpu_setup = __setup_cpu_power3,
64#ifdef CONFIG_OPROFILE
65 .oprofile_cpu_type = "ppc64/power3",
66 .oprofile_model = &op_model_rs64,
67#endif
68 },
69 { /* Power3+ */
70 .pvr_mask = 0xffff0000,
71 .pvr_value = 0x00410000,
72 .cpu_name = "POWER3 (630+)",
73 .cpu_features = CPU_FTR_SPLIT_ID_CACHE |
74 CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE | CPU_FTR_IABR,
75 .cpu_user_features = COMMON_USER_PPC64,
76 .icache_bsize = 128,
77 .dcache_bsize = 128,
78 .num_pmcs = 8,
79 .cpu_setup = __setup_cpu_power3,
80#ifdef CONFIG_OPROFILE
81 .oprofile_cpu_type = "ppc64/power3",
82 .oprofile_model = &op_model_rs64,
83#endif
84 },
85 { /* Northstar */
86 .pvr_mask = 0xffff0000,
87 .pvr_value = 0x00330000,
88 .cpu_name = "RS64-II (northstar)",
89 .cpu_features = CPU_FTR_SPLIT_ID_CACHE |
90 CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE | CPU_FTR_IABR |
91 CPU_FTR_MMCRA | CPU_FTR_CTRL,
92 .cpu_user_features = COMMON_USER_PPC64,
93 .icache_bsize = 128,
94 .dcache_bsize = 128,
95 .num_pmcs = 8,
96 .cpu_setup = __setup_cpu_power3,
97#ifdef CONFIG_OPROFILE
98 .oprofile_cpu_type = "ppc64/rs64",
99 .oprofile_model = &op_model_rs64,
100#endif
101 },
102 { /* Pulsar */
103 .pvr_mask = 0xffff0000,
104 .pvr_value = 0x00340000,
105 .cpu_name = "RS64-III (pulsar)",
106 .cpu_features = CPU_FTR_SPLIT_ID_CACHE |
107 CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE | CPU_FTR_IABR |
108 CPU_FTR_MMCRA | CPU_FTR_CTRL,
109 .cpu_user_features = COMMON_USER_PPC64,
110 .icache_bsize = 128,
111 .dcache_bsize = 128,
112 .num_pmcs = 8,
113 .cpu_setup = __setup_cpu_power3,
114#ifdef CONFIG_OPROFILE
115 .oprofile_cpu_type = "ppc64/rs64",
116 .oprofile_model = &op_model_rs64,
117#endif
118 },
119 { /* I-star */
120 .pvr_mask = 0xffff0000,
121 .pvr_value = 0x00360000,
122 .cpu_name = "RS64-III (icestar)",
123 .cpu_features = CPU_FTR_SPLIT_ID_CACHE |
124 CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE | CPU_FTR_IABR |
125 CPU_FTR_MMCRA | CPU_FTR_CTRL,
126 .cpu_user_features = COMMON_USER_PPC64,
127 .icache_bsize = 128,
128 .dcache_bsize = 128,
129 .num_pmcs = 8,
130 .cpu_setup = __setup_cpu_power3,
131#ifdef CONFIG_OPROFILE
132 .oprofile_cpu_type = "ppc64/rs64",
133 .oprofile_model = &op_model_rs64,
134#endif
135 },
136 { /* S-star */
137 .pvr_mask = 0xffff0000,
138 .pvr_value = 0x00370000,
139 .cpu_name = "RS64-IV (sstar)",
140 .cpu_features = CPU_FTR_SPLIT_ID_CACHE |
141 CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE | CPU_FTR_IABR |
142 CPU_FTR_MMCRA | CPU_FTR_CTRL,
143 .cpu_user_features = COMMON_USER_PPC64,
144 .icache_bsize = 128,
145 .dcache_bsize = 128,
146 .num_pmcs = 8,
147 .cpu_setup = __setup_cpu_power3,
148#ifdef CONFIG_OPROFILE
149 .oprofile_cpu_type = "ppc64/rs64",
150 .oprofile_model = &op_model_rs64,
151#endif
152 },
153 { /* Power4 */
154 .pvr_mask = 0xffff0000,
155 .pvr_value = 0x00350000,
156 .cpu_name = "POWER4 (gp)",
157 .cpu_features = CPU_FTR_SPLIT_ID_CACHE |
158 CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE |
159 CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_MMCRA,
160 .cpu_user_features = COMMON_USER_PPC64,
161 .icache_bsize = 128,
162 .dcache_bsize = 128,
163 .num_pmcs = 8,
164 .cpu_setup = __setup_cpu_power4,
165#ifdef CONFIG_OPROFILE
166 .oprofile_cpu_type = "ppc64/power4",
167 .oprofile_model = &op_model_rs64,
168#endif
169 },
170 { /* Power4+ */
171 .pvr_mask = 0xffff0000,
172 .pvr_value = 0x00380000,
173 .cpu_name = "POWER4+ (gq)",
174 .cpu_features = CPU_FTR_SPLIT_ID_CACHE |
175 CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE |
176 CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_MMCRA,
177 .cpu_user_features = COMMON_USER_PPC64,
178 .icache_bsize = 128,
179 .dcache_bsize = 128,
180 .num_pmcs = 8,
181 .cpu_setup = __setup_cpu_power4,
182#ifdef CONFIG_OPROFILE
183 .oprofile_cpu_type = "ppc64/power4",
184 .oprofile_model = &op_model_power4,
185#endif
186 },
187 { /* PPC970 */
188 .pvr_mask = 0xffff0000,
189 .pvr_value = 0x00390000,
190 .cpu_name = "PPC970",
191 .cpu_features = CPU_FTR_SPLIT_ID_CACHE |
192 CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE |
193 CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_ALTIVEC_COMP |
194 CPU_FTR_CAN_NAP | CPU_FTR_MMCRA,
195 .cpu_user_features = COMMON_USER_PPC64 |
196 PPC_FEATURE_HAS_ALTIVEC_COMP,
197 .icache_bsize = 128,
198 .dcache_bsize = 128,
199 .num_pmcs = 8,
200 .cpu_setup = __setup_cpu_ppc970,
201#ifdef CONFIG_OPROFILE
202 .oprofile_cpu_type = "ppc64/970",
203 .oprofile_model = &op_model_power4,
204#endif
205 },
206 { /* PPC970FX */
207 .pvr_mask = 0xffff0000,
208 .pvr_value = 0x003c0000,
209 .cpu_name = "PPC970FX",
210 .cpu_features = CPU_FTR_SPLIT_ID_CACHE |
211 CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE |
212 CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_ALTIVEC_COMP |
213 CPU_FTR_CAN_NAP | CPU_FTR_MMCRA,
214 .cpu_user_features = COMMON_USER_PPC64 |
215 PPC_FEATURE_HAS_ALTIVEC_COMP,
216 .icache_bsize = 128,
217 .dcache_bsize = 128,
218 .num_pmcs = 8,
219 .cpu_setup = __setup_cpu_ppc970,
220#ifdef CONFIG_OPROFILE
221 .oprofile_cpu_type = "ppc64/970",
222 .oprofile_model = &op_model_power4,
223#endif
224 },
225 { /* PPC970MP */
226 .pvr_mask = 0xffff0000,
227 .pvr_value = 0x00440000,
228 .cpu_name = "PPC970MP",
229 .cpu_features = CPU_FTR_SPLIT_ID_CACHE |
230 CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE |
231 CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_ALTIVEC_COMP |
232 CPU_FTR_CAN_NAP | CPU_FTR_MMCRA,
233 .cpu_user_features = COMMON_USER_PPC64 |
234 PPC_FEATURE_HAS_ALTIVEC_COMP,
235 .icache_bsize = 128,
236 .dcache_bsize = 128,
237 .cpu_setup = __setup_cpu_ppc970,
238#ifdef CONFIG_OPROFILE
239 .oprofile_cpu_type = "ppc64/970",
240 .oprofile_model = &op_model_power4,
241#endif
242 },
243 { /* Power5 */
244 .pvr_mask = 0xffff0000,
245 .pvr_value = 0x003a0000,
246 .cpu_name = "POWER5 (gr)",
247 .cpu_features = CPU_FTR_SPLIT_ID_CACHE |
248 CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE |
249 CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_MMCRA | CPU_FTR_SMT |
250 CPU_FTR_COHERENT_ICACHE | CPU_FTR_LOCKLESS_TLBIE |
251 CPU_FTR_MMCRA_SIHV,
252 .cpu_user_features = COMMON_USER_PPC64,
253 .icache_bsize = 128,
254 .dcache_bsize = 128,
255 .num_pmcs = 6,
256 .cpu_setup = __setup_cpu_power4,
257#ifdef CONFIG_OPROFILE
258 .oprofile_cpu_type = "ppc64/power5",
259 .oprofile_model = &op_model_power4,
260#endif
261 },
262 { /* Power5 */
263 .pvr_mask = 0xffff0000,
264 .pvr_value = 0x003b0000,
265 .cpu_name = "POWER5 (gs)",
266 .cpu_features = CPU_FTR_SPLIT_ID_CACHE |
267 CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE |
268 CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_MMCRA | CPU_FTR_SMT |
269 CPU_FTR_COHERENT_ICACHE | CPU_FTR_LOCKLESS_TLBIE |
270 CPU_FTR_MMCRA_SIHV,
271 .cpu_user_features = COMMON_USER_PPC64,
272 .icache_bsize = 128,
273 .dcache_bsize = 128,
274 .num_pmcs = 6,
275 .cpu_setup = __setup_cpu_power4,
276#ifdef CONFIG_OPROFILE
277 .oprofile_cpu_type = "ppc64/power5",
278 .oprofile_model = &op_model_power4,
279#endif
280 },
281 { /* BE DD1.x */
282 .pvr_mask = 0xffff0000,
283 .pvr_value = 0x00700000,
284 .cpu_name = "Broadband Engine",
285 .cpu_features = CPU_FTR_SPLIT_ID_CACHE |
286 CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE |
287 CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_ALTIVEC_COMP |
288 CPU_FTR_SMT,
289 .cpu_user_features = COMMON_USER_PPC64 |
290 PPC_FEATURE_HAS_ALTIVEC_COMP,
291 .icache_bsize = 128,
292 .dcache_bsize = 128,
293 .cpu_setup = __setup_cpu_be,
294 },
295 { /* default match */
296 .pvr_mask = 0x00000000,
297 .pvr_value = 0x00000000,
298 .cpu_name = "POWER4 (compatible)",
299 .cpu_features = CPU_FTR_SPLIT_ID_CACHE |
300 CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE |
301 CPU_FTR_PPCAS_ARCH_V2,
302 .cpu_user_features = COMMON_USER_PPC64,
303 .icache_bsize = 128,
304 .dcache_bsize = 128,
305 .num_pmcs = 6,
306 .cpu_setup = __setup_cpu_power4,
307 }
308};
diff --git a/arch/ppc64/kernel/eeh.c b/arch/ppc64/kernel/eeh.c
index ba93fd731222..035d1b14a207 100644
--- a/arch/ppc64/kernel/eeh.c
+++ b/arch/ppc64/kernel/eeh.c
@@ -33,7 +33,7 @@
33#include <asm/rtas.h> 33#include <asm/rtas.h>
34#include <asm/atomic.h> 34#include <asm/atomic.h>
35#include <asm/systemcfg.h> 35#include <asm/systemcfg.h>
36#include "pci.h" 36#include <asm/ppc-pci.h>
37 37
38#undef DEBUG 38#undef DEBUG
39 39
diff --git a/arch/ppc64/kernel/head.S b/arch/ppc64/kernel/head.S
index 72c61041151a..929f9f42cf7a 100644
--- a/arch/ppc64/kernel/head.S
+++ b/arch/ppc64/kernel/head.S
@@ -36,6 +36,7 @@
36#include <asm/setup.h> 36#include <asm/setup.h>
37#include <asm/hvcall.h> 37#include <asm/hvcall.h>
38#include <asm/iSeries/LparMap.h> 38#include <asm/iSeries/LparMap.h>
39#include <asm/thread_info.h>
39 40
40#ifdef CONFIG_PPC_ISERIES 41#ifdef CONFIG_PPC_ISERIES
41#define DO_SOFT_DISABLE 42#define DO_SOFT_DISABLE
@@ -80,7 +81,7 @@ _stext:
80_GLOBAL(__start) 81_GLOBAL(__start)
81 /* NOP this out unconditionally */ 82 /* NOP this out unconditionally */
82BEGIN_FTR_SECTION 83BEGIN_FTR_SECTION
83 b .__start_initialization_multiplatform 84 b .__start_initialization_multiplatform
84END_FTR_SECTION(0, 1) 85END_FTR_SECTION(0, 1)
85#endif /* CONFIG_PPC_MULTIPLATFORM */ 86#endif /* CONFIG_PPC_MULTIPLATFORM */
86 87
@@ -201,22 +202,22 @@ exception_marker:
201#define EX_CCR 60 202#define EX_CCR 60
202 203
203#define EXCEPTION_PROLOG_PSERIES(area, label) \ 204#define EXCEPTION_PROLOG_PSERIES(area, label) \
204 mfspr r13,SPRG3; /* get paca address into r13 */ \ 205 mfspr r13,SPRN_SPRG3; /* get paca address into r13 */ \
205 std r9,area+EX_R9(r13); /* save r9 - r12 */ \ 206 std r9,area+EX_R9(r13); /* save r9 - r12 */ \
206 std r10,area+EX_R10(r13); \ 207 std r10,area+EX_R10(r13); \
207 std r11,area+EX_R11(r13); \ 208 std r11,area+EX_R11(r13); \
208 std r12,area+EX_R12(r13); \ 209 std r12,area+EX_R12(r13); \
209 mfspr r9,SPRG1; \ 210 mfspr r9,SPRN_SPRG1; \
210 std r9,area+EX_R13(r13); \ 211 std r9,area+EX_R13(r13); \
211 mfcr r9; \ 212 mfcr r9; \
212 clrrdi r12,r13,32; /* get high part of &label */ \ 213 clrrdi r12,r13,32; /* get high part of &label */ \
213 mfmsr r10; \ 214 mfmsr r10; \
214 mfspr r11,SRR0; /* save SRR0 */ \ 215 mfspr r11,SPRN_SRR0; /* save SRR0 */ \
215 ori r12,r12,(label)@l; /* virt addr of handler */ \ 216 ori r12,r12,(label)@l; /* virt addr of handler */ \
216 ori r10,r10,MSR_IR|MSR_DR|MSR_RI; \ 217 ori r10,r10,MSR_IR|MSR_DR|MSR_RI; \
217 mtspr SRR0,r12; \ 218 mtspr SPRN_SRR0,r12; \
218 mfspr r12,SRR1; /* and SRR1 */ \ 219 mfspr r12,SPRN_SRR1; /* and SRR1 */ \
219 mtspr SRR1,r10; \ 220 mtspr SPRN_SRR1,r10; \
220 rfid; \ 221 rfid; \
221 b . /* prevent speculative execution */ 222 b . /* prevent speculative execution */
222 223
@@ -225,12 +226,12 @@ exception_marker:
225 * This code runs with relocation on. 226 * This code runs with relocation on.
226 */ 227 */
227#define EXCEPTION_PROLOG_ISERIES_1(area) \ 228#define EXCEPTION_PROLOG_ISERIES_1(area) \
228 mfspr r13,SPRG3; /* get paca address into r13 */ \ 229 mfspr r13,SPRN_SPRG3; /* get paca address into r13 */ \
229 std r9,area+EX_R9(r13); /* save r9 - r12 */ \ 230 std r9,area+EX_R9(r13); /* save r9 - r12 */ \
230 std r10,area+EX_R10(r13); \ 231 std r10,area+EX_R10(r13); \
231 std r11,area+EX_R11(r13); \ 232 std r11,area+EX_R11(r13); \
232 std r12,area+EX_R12(r13); \ 233 std r12,area+EX_R12(r13); \
233 mfspr r9,SPRG1; \ 234 mfspr r9,SPRN_SPRG1; \
234 std r9,area+EX_R13(r13); \ 235 std r9,area+EX_R13(r13); \
235 mfcr r9 236 mfcr r9
236 237
@@ -283,7 +284,7 @@ exception_marker:
283 std r9,_LINK(r1); \ 284 std r9,_LINK(r1); \
284 mfctr r10; /* save CTR in stackframe */ \ 285 mfctr r10; /* save CTR in stackframe */ \
285 std r10,_CTR(r1); \ 286 std r10,_CTR(r1); \
286 mfspr r11,XER; /* save XER in stackframe */ \ 287 mfspr r11,SPRN_XER; /* save XER in stackframe */ \
287 std r11,_XER(r1); \ 288 std r11,_XER(r1); \
288 li r9,(n)+1; \ 289 li r9,(n)+1; \
289 std r9,_TRAP(r1); /* set trap number */ \ 290 std r9,_TRAP(r1); /* set trap number */ \
@@ -300,7 +301,7 @@ exception_marker:
300 .globl label##_pSeries; \ 301 .globl label##_pSeries; \
301label##_pSeries: \ 302label##_pSeries: \
302 HMT_MEDIUM; \ 303 HMT_MEDIUM; \
303 mtspr SPRG1,r13; /* save r13 */ \ 304 mtspr SPRN_SPRG1,r13; /* save r13 */ \
304 RUNLATCH_ON(r13); \ 305 RUNLATCH_ON(r13); \
305 EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, label##_common) 306 EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, label##_common)
306 307
@@ -308,7 +309,7 @@ label##_pSeries: \
308 .globl label##_iSeries; \ 309 .globl label##_iSeries; \
309label##_iSeries: \ 310label##_iSeries: \
310 HMT_MEDIUM; \ 311 HMT_MEDIUM; \
311 mtspr SPRG1,r13; /* save r13 */ \ 312 mtspr SPRN_SPRG1,r13; /* save r13 */ \
312 RUNLATCH_ON(r13); \ 313 RUNLATCH_ON(r13); \
313 EXCEPTION_PROLOG_ISERIES_1(area); \ 314 EXCEPTION_PROLOG_ISERIES_1(area); \
314 EXCEPTION_PROLOG_ISERIES_2; \ 315 EXCEPTION_PROLOG_ISERIES_2; \
@@ -318,7 +319,7 @@ label##_iSeries: \
318 .globl label##_iSeries; \ 319 .globl label##_iSeries; \
319label##_iSeries: \ 320label##_iSeries: \
320 HMT_MEDIUM; \ 321 HMT_MEDIUM; \
321 mtspr SPRG1,r13; /* save r13 */ \ 322 mtspr SPRN_SPRG1,r13; /* save r13 */ \
322 RUNLATCH_ON(r13); \ 323 RUNLATCH_ON(r13); \
323 EXCEPTION_PROLOG_ISERIES_1(PACA_EXGEN); \ 324 EXCEPTION_PROLOG_ISERIES_1(PACA_EXGEN); \
324 lbz r10,PACAPROCENABLED(r13); \ 325 lbz r10,PACAPROCENABLED(r13); \
@@ -388,7 +389,7 @@ __start_interrupts:
388 . = 0x200 389 . = 0x200
389_machine_check_pSeries: 390_machine_check_pSeries:
390 HMT_MEDIUM 391 HMT_MEDIUM
391 mtspr SPRG1,r13 /* save r13 */ 392 mtspr SPRN_SPRG1,r13 /* save r13 */
392 RUNLATCH_ON(r13) 393 RUNLATCH_ON(r13)
393 EXCEPTION_PROLOG_PSERIES(PACA_EXMC, machine_check_common) 394 EXCEPTION_PROLOG_PSERIES(PACA_EXMC, machine_check_common)
394 395
@@ -396,18 +397,18 @@ _machine_check_pSeries:
396 .globl data_access_pSeries 397 .globl data_access_pSeries
397data_access_pSeries: 398data_access_pSeries:
398 HMT_MEDIUM 399 HMT_MEDIUM
399 mtspr SPRG1,r13 400 mtspr SPRN_SPRG1,r13
400BEGIN_FTR_SECTION 401BEGIN_FTR_SECTION
401 mtspr SPRG2,r12 402 mtspr SPRN_SPRG2,r12
402 mfspr r13,DAR 403 mfspr r13,SPRN_DAR
403 mfspr r12,DSISR 404 mfspr r12,SPRN_DSISR
404 srdi r13,r13,60 405 srdi r13,r13,60
405 rlwimi r13,r12,16,0x20 406 rlwimi r13,r12,16,0x20
406 mfcr r12 407 mfcr r12
407 cmpwi r13,0x2c 408 cmpwi r13,0x2c
408 beq .do_stab_bolted_pSeries 409 beq .do_stab_bolted_pSeries
409 mtcrf 0x80,r12 410 mtcrf 0x80,r12
410 mfspr r12,SPRG2 411 mfspr r12,SPRN_SPRG2
411END_FTR_SECTION_IFCLR(CPU_FTR_SLB) 412END_FTR_SECTION_IFCLR(CPU_FTR_SLB)
412 EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, data_access_common) 413 EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, data_access_common)
413 414
@@ -415,19 +416,19 @@ END_FTR_SECTION_IFCLR(CPU_FTR_SLB)
415 .globl data_access_slb_pSeries 416 .globl data_access_slb_pSeries
416data_access_slb_pSeries: 417data_access_slb_pSeries:
417 HMT_MEDIUM 418 HMT_MEDIUM
418 mtspr SPRG1,r13 419 mtspr SPRN_SPRG1,r13
419 RUNLATCH_ON(r13) 420 RUNLATCH_ON(r13)
420 mfspr r13,SPRG3 /* get paca address into r13 */ 421 mfspr r13,SPRN_SPRG3 /* get paca address into r13 */
421 std r9,PACA_EXSLB+EX_R9(r13) /* save r9 - r12 */ 422 std r9,PACA_EXSLB+EX_R9(r13) /* save r9 - r12 */
422 std r10,PACA_EXSLB+EX_R10(r13) 423 std r10,PACA_EXSLB+EX_R10(r13)
423 std r11,PACA_EXSLB+EX_R11(r13) 424 std r11,PACA_EXSLB+EX_R11(r13)
424 std r12,PACA_EXSLB+EX_R12(r13) 425 std r12,PACA_EXSLB+EX_R12(r13)
425 std r3,PACA_EXSLB+EX_R3(r13) 426 std r3,PACA_EXSLB+EX_R3(r13)
426 mfspr r9,SPRG1 427 mfspr r9,SPRN_SPRG1
427 std r9,PACA_EXSLB+EX_R13(r13) 428 std r9,PACA_EXSLB+EX_R13(r13)
428 mfcr r9 429 mfcr r9
429 mfspr r12,SRR1 /* and SRR1 */ 430 mfspr r12,SPRN_SRR1 /* and SRR1 */
430 mfspr r3,DAR 431 mfspr r3,SPRN_DAR
431 b .do_slb_miss /* Rel. branch works in real mode */ 432 b .do_slb_miss /* Rel. branch works in real mode */
432 433
433 STD_EXCEPTION_PSERIES(0x400, instruction_access) 434 STD_EXCEPTION_PSERIES(0x400, instruction_access)
@@ -436,19 +437,19 @@ data_access_slb_pSeries:
436 .globl instruction_access_slb_pSeries 437 .globl instruction_access_slb_pSeries
437instruction_access_slb_pSeries: 438instruction_access_slb_pSeries:
438 HMT_MEDIUM 439 HMT_MEDIUM
439 mtspr SPRG1,r13 440 mtspr SPRN_SPRG1,r13
440 RUNLATCH_ON(r13) 441 RUNLATCH_ON(r13)
441 mfspr r13,SPRG3 /* get paca address into r13 */ 442 mfspr r13,SPRN_SPRG3 /* get paca address into r13 */
442 std r9,PACA_EXSLB+EX_R9(r13) /* save r9 - r12 */ 443 std r9,PACA_EXSLB+EX_R9(r13) /* save r9 - r12 */
443 std r10,PACA_EXSLB+EX_R10(r13) 444 std r10,PACA_EXSLB+EX_R10(r13)
444 std r11,PACA_EXSLB+EX_R11(r13) 445 std r11,PACA_EXSLB+EX_R11(r13)
445 std r12,PACA_EXSLB+EX_R12(r13) 446 std r12,PACA_EXSLB+EX_R12(r13)
446 std r3,PACA_EXSLB+EX_R3(r13) 447 std r3,PACA_EXSLB+EX_R3(r13)
447 mfspr r9,SPRG1 448 mfspr r9,SPRN_SPRG1
448 std r9,PACA_EXSLB+EX_R13(r13) 449 std r9,PACA_EXSLB+EX_R13(r13)
449 mfcr r9 450 mfcr r9
450 mfspr r12,SRR1 /* and SRR1 */ 451 mfspr r12,SPRN_SRR1 /* and SRR1 */
451 mfspr r3,SRR0 /* SRR0 is faulting address */ 452 mfspr r3,SPRN_SRR0 /* SRR0 is faulting address */
452 b .do_slb_miss /* Rel. branch works in real mode */ 453 b .do_slb_miss /* Rel. branch works in real mode */
453 454
454 STD_EXCEPTION_PSERIES(0x500, hardware_interrupt) 455 STD_EXCEPTION_PSERIES(0x500, hardware_interrupt)
@@ -466,15 +467,15 @@ system_call_pSeries:
466 RUNLATCH_ON(r9) 467 RUNLATCH_ON(r9)
467 mr r9,r13 468 mr r9,r13
468 mfmsr r10 469 mfmsr r10
469 mfspr r13,SPRG3 470 mfspr r13,SPRN_SPRG3
470 mfspr r11,SRR0 471 mfspr r11,SPRN_SRR0
471 clrrdi r12,r13,32 472 clrrdi r12,r13,32
472 oris r12,r12,system_call_common@h 473 oris r12,r12,system_call_common@h
473 ori r12,r12,system_call_common@l 474 ori r12,r12,system_call_common@l
474 mtspr SRR0,r12 475 mtspr SPRN_SRR0,r12
475 ori r10,r10,MSR_IR|MSR_DR|MSR_RI 476 ori r10,r10,MSR_IR|MSR_DR|MSR_RI
476 mfspr r12,SRR1 477 mfspr r12,SPRN_SRR1
477 mtspr SRR1,r10 478 mtspr SPRN_SRR1,r10
478 rfid 479 rfid
479 b . /* prevent speculative execution */ 480 b . /* prevent speculative execution */
480 481
@@ -504,25 +505,25 @@ system_call_pSeries:
504 .align 7 505 .align 7
505_GLOBAL(do_stab_bolted_pSeries) 506_GLOBAL(do_stab_bolted_pSeries)
506 mtcrf 0x80,r12 507 mtcrf 0x80,r12
507 mfspr r12,SPRG2 508 mfspr r12,SPRN_SPRG2
508 EXCEPTION_PROLOG_PSERIES(PACA_EXSLB, .do_stab_bolted) 509 EXCEPTION_PROLOG_PSERIES(PACA_EXSLB, .do_stab_bolted)
509 510
510/* 511/*
511 * Vectors for the FWNMI option. Share common code. 512 * Vectors for the FWNMI option. Share common code.
512 */ 513 */
513 .globl system_reset_fwnmi 514 .globl system_reset_fwnmi
514system_reset_fwnmi: 515system_reset_fwnmi:
515 HMT_MEDIUM 516 HMT_MEDIUM
516 mtspr SPRG1,r13 /* save r13 */ 517 mtspr SPRN_SPRG1,r13 /* save r13 */
517 RUNLATCH_ON(r13) 518 RUNLATCH_ON(r13)
518 EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, system_reset_common) 519 EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, system_reset_common)
519 520
520 .globl machine_check_fwnmi 521 .globl machine_check_fwnmi
521machine_check_fwnmi: 522machine_check_fwnmi:
522 HMT_MEDIUM 523 HMT_MEDIUM
523 mtspr SPRG1,r13 /* save r13 */ 524 mtspr SPRN_SPRG1,r13 /* save r13 */
524 RUNLATCH_ON(r13) 525 RUNLATCH_ON(r13)
525 EXCEPTION_PROLOG_PSERIES(PACA_EXMC, machine_check_common) 526 EXCEPTION_PROLOG_PSERIES(PACA_EXMC, machine_check_common)
526 527
527#ifdef CONFIG_PPC_ISERIES 528#ifdef CONFIG_PPC_ISERIES
528/*** ISeries-LPAR interrupt handlers ***/ 529/*** ISeries-LPAR interrupt handlers ***/
@@ -531,18 +532,18 @@ machine_check_fwnmi:
531 532
532 .globl data_access_iSeries 533 .globl data_access_iSeries
533data_access_iSeries: 534data_access_iSeries:
534 mtspr SPRG1,r13 535 mtspr SPRN_SPRG1,r13
535BEGIN_FTR_SECTION 536BEGIN_FTR_SECTION
536 mtspr SPRG2,r12 537 mtspr SPRN_SPRG2,r12
537 mfspr r13,DAR 538 mfspr r13,SPRN_DAR
538 mfspr r12,DSISR 539 mfspr r12,SPRN_DSISR
539 srdi r13,r13,60 540 srdi r13,r13,60
540 rlwimi r13,r12,16,0x20 541 rlwimi r13,r12,16,0x20
541 mfcr r12 542 mfcr r12
542 cmpwi r13,0x2c 543 cmpwi r13,0x2c
543 beq .do_stab_bolted_iSeries 544 beq .do_stab_bolted_iSeries
544 mtcrf 0x80,r12 545 mtcrf 0x80,r12
545 mfspr r12,SPRG2 546 mfspr r12,SPRN_SPRG2
546END_FTR_SECTION_IFCLR(CPU_FTR_SLB) 547END_FTR_SECTION_IFCLR(CPU_FTR_SLB)
547 EXCEPTION_PROLOG_ISERIES_1(PACA_EXGEN) 548 EXCEPTION_PROLOG_ISERIES_1(PACA_EXGEN)
548 EXCEPTION_PROLOG_ISERIES_2 549 EXCEPTION_PROLOG_ISERIES_2
@@ -550,25 +551,25 @@ END_FTR_SECTION_IFCLR(CPU_FTR_SLB)
550 551
551.do_stab_bolted_iSeries: 552.do_stab_bolted_iSeries:
552 mtcrf 0x80,r12 553 mtcrf 0x80,r12
553 mfspr r12,SPRG2 554 mfspr r12,SPRN_SPRG2
554 EXCEPTION_PROLOG_ISERIES_1(PACA_EXSLB) 555 EXCEPTION_PROLOG_ISERIES_1(PACA_EXSLB)
555 EXCEPTION_PROLOG_ISERIES_2 556 EXCEPTION_PROLOG_ISERIES_2
556 b .do_stab_bolted 557 b .do_stab_bolted
557 558
558 .globl data_access_slb_iSeries 559 .globl data_access_slb_iSeries
559data_access_slb_iSeries: 560data_access_slb_iSeries:
560 mtspr SPRG1,r13 /* save r13 */ 561 mtspr SPRN_SPRG1,r13 /* save r13 */
561 EXCEPTION_PROLOG_ISERIES_1(PACA_EXSLB) 562 EXCEPTION_PROLOG_ISERIES_1(PACA_EXSLB)
562 std r3,PACA_EXSLB+EX_R3(r13) 563 std r3,PACA_EXSLB+EX_R3(r13)
563 ld r12,PACALPPACA+LPPACASRR1(r13) 564 ld r12,PACALPPACA+LPPACASRR1(r13)
564 mfspr r3,DAR 565 mfspr r3,SPRN_DAR
565 b .do_slb_miss 566 b .do_slb_miss
566 567
567 STD_EXCEPTION_ISERIES(0x400, instruction_access, PACA_EXGEN) 568 STD_EXCEPTION_ISERIES(0x400, instruction_access, PACA_EXGEN)
568 569
569 .globl instruction_access_slb_iSeries 570 .globl instruction_access_slb_iSeries
570instruction_access_slb_iSeries: 571instruction_access_slb_iSeries:
571 mtspr SPRG1,r13 /* save r13 */ 572 mtspr SPRN_SPRG1,r13 /* save r13 */
572 EXCEPTION_PROLOG_ISERIES_1(PACA_EXSLB) 573 EXCEPTION_PROLOG_ISERIES_1(PACA_EXSLB)
573 std r3,PACA_EXSLB+EX_R3(r13) 574 std r3,PACA_EXSLB+EX_R3(r13)
574 ld r12,PACALPPACA+LPPACASRR1(r13) 575 ld r12,PACALPPACA+LPPACASRR1(r13)
@@ -586,7 +587,7 @@ instruction_access_slb_iSeries:
586 .globl system_call_iSeries 587 .globl system_call_iSeries
587system_call_iSeries: 588system_call_iSeries:
588 mr r9,r13 589 mr r9,r13
589 mfspr r13,SPRG3 590 mfspr r13,SPRN_SPRG3
590 EXCEPTION_PROLOG_ISERIES_2 591 EXCEPTION_PROLOG_ISERIES_2
591 b system_call_common 592 b system_call_common
592 593
@@ -596,7 +597,7 @@ system_call_iSeries:
596 597
597 .globl system_reset_iSeries 598 .globl system_reset_iSeries
598system_reset_iSeries: 599system_reset_iSeries:
599 mfspr r13,SPRG3 /* Get paca address */ 600 mfspr r13,SPRN_SPRG3 /* Get paca address */
600 mfmsr r24 601 mfmsr r24
601 ori r24,r24,MSR_RI 602 ori r24,r24,MSR_RI
602 mtmsrd r24 /* RI on */ 603 mtmsrd r24 /* RI on */
@@ -639,7 +640,7 @@ iSeries_secondary_smp_loop:
639#endif /* CONFIG_SMP */ 640#endif /* CONFIG_SMP */
640 li r0,-1 /* r0=-1 indicates a Hypervisor call */ 641 li r0,-1 /* r0=-1 indicates a Hypervisor call */
641 sc /* Invoke the hypervisor via a system call */ 642 sc /* Invoke the hypervisor via a system call */
642 mfspr r13,SPRG3 /* Put r13 back ???? */ 643 mfspr r13,SPRN_SPRG3 /* Put r13 back ???? */
643 b 1b /* If SMP not configured, secondaries 644 b 1b /* If SMP not configured, secondaries
644 * loop forever */ 645 * loop forever */
645 646
@@ -656,8 +657,8 @@ hardware_interrupt_iSeries_masked:
656 mtcrf 0x80,r9 /* Restore regs */ 657 mtcrf 0x80,r9 /* Restore regs */
657 ld r11,PACALPPACA+LPPACASRR0(r13) 658 ld r11,PACALPPACA+LPPACASRR0(r13)
658 ld r12,PACALPPACA+LPPACASRR1(r13) 659 ld r12,PACALPPACA+LPPACASRR1(r13)
659 mtspr SRR0,r11 660 mtspr SPRN_SRR0,r11
660 mtspr SRR1,r12 661 mtspr SPRN_SRR1,r12
661 ld r9,PACA_EXGEN+EX_R9(r13) 662 ld r9,PACA_EXGEN+EX_R9(r13)
662 ld r10,PACA_EXGEN+EX_R10(r13) 663 ld r10,PACA_EXGEN+EX_R10(r13)
663 ld r11,PACA_EXGEN+EX_R11(r13) 664 ld r11,PACA_EXGEN+EX_R11(r13)
@@ -713,8 +714,8 @@ bad_stack:
713 std r10,GPR1(r1) 714 std r10,GPR1(r1)
714 std r11,_NIP(r1) 715 std r11,_NIP(r1)
715 std r12,_MSR(r1) 716 std r12,_MSR(r1)
716 mfspr r11,DAR 717 mfspr r11,SPRN_DAR
717 mfspr r12,DSISR 718 mfspr r12,SPRN_DSISR
718 std r11,_DAR(r1) 719 std r11,_DAR(r1)
719 std r12,_DSISR(r1) 720 std r12,_DSISR(r1)
720 mflr r10 721 mflr r10
@@ -746,6 +747,7 @@ bad_stack:
746 * any task or sent any task a signal, you should use 747 * any task or sent any task a signal, you should use
747 * ret_from_except or ret_from_except_lite instead of this. 748 * ret_from_except or ret_from_except_lite instead of this.
748 */ 749 */
750 .globl fast_exception_return
749fast_exception_return: 751fast_exception_return:
750 ld r12,_MSR(r1) 752 ld r12,_MSR(r1)
751 ld r11,_NIP(r1) 753 ld r11,_NIP(r1)
@@ -766,8 +768,8 @@ fast_exception_return:
766 clrrdi r10,r10,2 /* clear RI (LE is 0 already) */ 768 clrrdi r10,r10,2 /* clear RI (LE is 0 already) */
767 mtmsrd r10,1 769 mtmsrd r10,1
768 770
769 mtspr SRR1,r12 771 mtspr SPRN_SRR1,r12
770 mtspr SRR0,r11 772 mtspr SPRN_SRR0,r11
771 REST_4GPRS(10, r1) 773 REST_4GPRS(10, r1)
772 ld r1,GPR1(r1) 774 ld r1,GPR1(r1)
773 rfid 775 rfid
@@ -788,9 +790,9 @@ unrecov_fer:
788 .globl data_access_common 790 .globl data_access_common
789data_access_common: 791data_access_common:
790 RUNLATCH_ON(r10) /* It wont fit in the 0x300 handler */ 792 RUNLATCH_ON(r10) /* It wont fit in the 0x300 handler */
791 mfspr r10,DAR 793 mfspr r10,SPRN_DAR
792 std r10,PACA_EXGEN+EX_DAR(r13) 794 std r10,PACA_EXGEN+EX_DAR(r13)
793 mfspr r10,DSISR 795 mfspr r10,SPRN_DSISR
794 stw r10,PACA_EXGEN+EX_DSISR(r13) 796 stw r10,PACA_EXGEN+EX_DSISR(r13)
795 EXCEPTION_PROLOG_COMMON(0x300, PACA_EXGEN) 797 EXCEPTION_PROLOG_COMMON(0x300, PACA_EXGEN)
796 ld r3,PACA_EXGEN+EX_DAR(r13) 798 ld r3,PACA_EXGEN+EX_DAR(r13)
@@ -821,9 +823,9 @@ hardware_interrupt_entry:
821 .align 7 823 .align 7
822 .globl alignment_common 824 .globl alignment_common
823alignment_common: 825alignment_common:
824 mfspr r10,DAR 826 mfspr r10,SPRN_DAR
825 std r10,PACA_EXGEN+EX_DAR(r13) 827 std r10,PACA_EXGEN+EX_DAR(r13)
826 mfspr r10,DSISR 828 mfspr r10,SPRN_DSISR
827 stw r10,PACA_EXGEN+EX_DSISR(r13) 829 stw r10,PACA_EXGEN+EX_DSISR(r13)
828 EXCEPTION_PROLOG_COMMON(0x600, PACA_EXGEN) 830 EXCEPTION_PROLOG_COMMON(0x600, PACA_EXGEN)
829 ld r3,PACA_EXGEN+EX_DAR(r13) 831 ld r3,PACA_EXGEN+EX_DAR(r13)
@@ -857,62 +859,6 @@ fp_unavailable_common:
857 bl .kernel_fp_unavailable_exception 859 bl .kernel_fp_unavailable_exception
858 BUG_OPCODE 860 BUG_OPCODE
859 861
860/*
861 * load_up_fpu(unused, unused, tsk)
862 * Disable FP for the task which had the FPU previously,
863 * and save its floating-point registers in its thread_struct.
864 * Enables the FPU for use in the kernel on return.
865 * On SMP we know the fpu is free, since we give it up every
866 * switch (ie, no lazy save of the FP registers).
867 * On entry: r13 == 'current' && last_task_used_math != 'current'
868 */
869_STATIC(load_up_fpu)
870 mfmsr r5 /* grab the current MSR */
871 ori r5,r5,MSR_FP
872 mtmsrd r5 /* enable use of fpu now */
873 isync
874/*
875 * For SMP, we don't do lazy FPU switching because it just gets too
876 * horrendously complex, especially when a task switches from one CPU
877 * to another. Instead we call giveup_fpu in switch_to.
878 *
879 */
880#ifndef CONFIG_SMP
881 ld r3,last_task_used_math@got(r2)
882 ld r4,0(r3)
883 cmpdi 0,r4,0
884 beq 1f
885 /* Save FP state to last_task_used_math's THREAD struct */
886 addi r4,r4,THREAD
887 SAVE_32FPRS(0, r4)
888 mffs fr0
889 stfd fr0,THREAD_FPSCR(r4)
890 /* Disable FP for last_task_used_math */
891 ld r5,PT_REGS(r4)
892 ld r4,_MSR-STACK_FRAME_OVERHEAD(r5)
893 li r6,MSR_FP|MSR_FE0|MSR_FE1
894 andc r4,r4,r6
895 std r4,_MSR-STACK_FRAME_OVERHEAD(r5)
8961:
897#endif /* CONFIG_SMP */
898 /* enable use of FP after return */
899 ld r4,PACACURRENT(r13)
900 addi r5,r4,THREAD /* Get THREAD */
901 ld r4,THREAD_FPEXC_MODE(r5)
902 ori r12,r12,MSR_FP
903 or r12,r12,r4
904 std r12,_MSR(r1)
905 lfd fr0,THREAD_FPSCR(r5)
906 mtfsf 0xff,fr0
907 REST_32FPRS(0, r5)
908#ifndef CONFIG_SMP
909 /* Update last_task_used_math to 'current' */
910 subi r4,r5,THREAD /* Back to 'current' */
911 std r4,0(r3)
912#endif /* CONFIG_SMP */
913 /* restore registers and return */
914 b fast_exception_return
915
916 .align 7 862 .align 7
917 .globl altivec_unavailable_common 863 .globl altivec_unavailable_common
918altivec_unavailable_common: 864altivec_unavailable_common:
@@ -1120,7 +1066,7 @@ _GLOBAL(do_stab_bolted)
1120 1066
1121 /* Hash to the primary group */ 1067 /* Hash to the primary group */
1122 ld r10,PACASTABVIRT(r13) 1068 ld r10,PACASTABVIRT(r13)
1123 mfspr r11,DAR 1069 mfspr r11,SPRN_DAR
1124 srdi r11,r11,28 1070 srdi r11,r11,28
1125 rldimi r10,r11,7,52 /* r10 = first ste of the group */ 1071 rldimi r10,r11,7,52 /* r10 = first ste of the group */
1126 1072
@@ -1162,7 +1108,7 @@ _GLOBAL(do_stab_bolted)
11622: std r9,8(r10) /* Store the vsid part of the ste */ 11082: std r9,8(r10) /* Store the vsid part of the ste */
1163 eieio 1109 eieio
1164 1110
1165 mfspr r11,DAR /* Get the new esid */ 1111 mfspr r11,SPRN_DAR /* Get the new esid */
1166 clrrdi r11,r11,28 /* Permits a full 32b of ESID */ 1112 clrrdi r11,r11,28 /* Permits a full 32b of ESID */
1167 ori r11,r11,0x90 /* Turn on valid and kp */ 1113 ori r11,r11,0x90 /* Turn on valid and kp */
1168 std r11,0(r10) /* Put new entry back into the stab */ 1114 std r11,0(r10) /* Put new entry back into the stab */
@@ -1182,8 +1128,8 @@ _GLOBAL(do_stab_bolted)
1182 clrrdi r10,r10,2 1128 clrrdi r10,r10,2
1183 mtmsrd r10,1 1129 mtmsrd r10,1
1184 1130
1185 mtspr SRR0,r11 1131 mtspr SPRN_SRR0,r11
1186 mtspr SRR1,r12 1132 mtspr SPRN_SRR1,r12
1187 ld r9,PACA_EXSLB+EX_R9(r13) 1133 ld r9,PACA_EXSLB+EX_R9(r13)
1188 ld r10,PACA_EXSLB+EX_R10(r13) 1134 ld r10,PACA_EXSLB+EX_R10(r13)
1189 ld r11,PACA_EXSLB+EX_R11(r13) 1135 ld r11,PACA_EXSLB+EX_R11(r13)
@@ -1229,8 +1175,8 @@ _GLOBAL(do_slb_miss)
1229.machine pop 1175.machine pop
1230 1176
1231#ifdef CONFIG_PPC_ISERIES 1177#ifdef CONFIG_PPC_ISERIES
1232 mtspr SRR0,r11 1178 mtspr SPRN_SRR0,r11
1233 mtspr SRR1,r12 1179 mtspr SPRN_SRR1,r12
1234#endif /* CONFIG_PPC_ISERIES */ 1180#endif /* CONFIG_PPC_ISERIES */
1235 ld r9,PACA_EXSLB+EX_R9(r13) 1181 ld r9,PACA_EXSLB+EX_R9(r13)
1236 ld r10,PACA_EXSLB+EX_R10(r13) 1182 ld r10,PACA_EXSLB+EX_R10(r13)
@@ -1253,7 +1199,7 @@ unrecov_slb:
1253 * 1199 *
1254 * On iSeries, the hypervisor must fill in at least one entry before 1200 * On iSeries, the hypervisor must fill in at least one entry before
1255 * we get control (with relocate on). The address is give to the hv 1201 * we get control (with relocate on). The address is give to the hv
1256 * as a page number (see xLparMap in LparData.c), so this must be at a 1202 * as a page number (see xLparMap in lpardata.c), so this must be at a
1257 * fixed address (the linker can't compute (u64)&initial_stab >> 1203 * fixed address (the linker can't compute (u64)&initial_stab >>
1258 * PAGE_SHIFT). 1204 * PAGE_SHIFT).
1259 */ 1205 */
@@ -1316,7 +1262,7 @@ _GLOBAL(pSeries_secondary_smp_init)
1316 mr r3,r24 /* not found, copy phys to r3 */ 1262 mr r3,r24 /* not found, copy phys to r3 */
1317 b .kexec_wait /* next kernel might do better */ 1263 b .kexec_wait /* next kernel might do better */
1318 1264
13192: mtspr SPRG3,r13 /* Save vaddr of paca in SPRG3 */ 12652: mtspr SPRN_SPRG3,r13 /* Save vaddr of paca in SPRG3 */
1320 /* From now on, r24 is expected to be logical cpuid */ 1266 /* From now on, r24 is expected to be logical cpuid */
1321 mr r24,r5 1267 mr r24,r5
13223: HMT_LOW 12683: HMT_LOW
@@ -1364,6 +1310,7 @@ _STATIC(__start_initialization_iSeries)
1364 addi r2,r2,0x4000 1310 addi r2,r2,0x4000
1365 1311
1366 bl .iSeries_early_setup 1312 bl .iSeries_early_setup
1313 bl .early_setup
1367 1314
1368 /* relocation is on at this point */ 1315 /* relocation is on at this point */
1369 1316
@@ -1554,20 +1501,17 @@ copy_to_here:
1554 .section ".text"; 1501 .section ".text";
1555 .align 2 ; 1502 .align 2 ;
1556 1503
1557 .globl pmac_secondary_start_1 1504 .globl __secondary_start_pmac_0
1558pmac_secondary_start_1: 1505__secondary_start_pmac_0:
1559 li r24, 1 1506 /* NB the entries for cpus 0, 1, 2 must each occupy 8 bytes. */
1560 b .pmac_secondary_start 1507 li r24,0
1561 1508 b 1f
1562 .globl pmac_secondary_start_2 1509 li r24,1
1563pmac_secondary_start_2: 1510 b 1f
1564 li r24, 2 1511 li r24,2
1565 b .pmac_secondary_start 1512 b 1f
1566 1513 li r24,3
1567 .globl pmac_secondary_start_3 15141:
1568pmac_secondary_start_3:
1569 li r24, 3
1570 b .pmac_secondary_start
1571 1515
1572_GLOBAL(pmac_secondary_start) 1516_GLOBAL(pmac_secondary_start)
1573 /* turn on 64-bit mode */ 1517 /* turn on 64-bit mode */
@@ -1586,7 +1530,7 @@ _GLOBAL(pmac_secondary_start)
1586 LOADADDR(r4, paca) /* Get base vaddr of paca array */ 1530 LOADADDR(r4, paca) /* Get base vaddr of paca array */
1587 mulli r13,r24,PACA_SIZE /* Calculate vaddr of right paca */ 1531 mulli r13,r24,PACA_SIZE /* Calculate vaddr of right paca */
1588 add r13,r13,r4 /* for this processor. */ 1532 add r13,r13,r4 /* for this processor. */
1589 mtspr SPRG3,r13 /* Save vaddr of paca in SPRG3 */ 1533 mtspr SPRN_SPRG3,r13 /* Save vaddr of paca in SPRG3 */
1590 1534
1591 /* Create a temp kernel stack for use before relocation is on. */ 1535 /* Create a temp kernel stack for use before relocation is on. */
1592 ld r1,PACAEMERGSP(r13) 1536 ld r1,PACAEMERGSP(r13)
@@ -1621,7 +1565,7 @@ _GLOBAL(__secondary_start)
1621 /* Initialize the page table pointer register. */ 1565 /* Initialize the page table pointer register. */
1622 LOADADDR(r6,_SDR1) 1566 LOADADDR(r6,_SDR1)
1623 ld r6,0(r6) /* get the value of _SDR1 */ 1567 ld r6,0(r6) /* get the value of _SDR1 */
1624 mtspr SDR1,r6 /* set the htab location */ 1568 mtspr SPRN_SDR1,r6 /* set the htab location */
1625#endif 1569#endif
1626 /* Initialize the first segment table (or SLB) entry */ 1570 /* Initialize the first segment table (or SLB) entry */
1627 ld r3,PACASTABVIRT(r13) /* get addr of segment table */ 1571 ld r3,PACASTABVIRT(r13) /* get addr of segment table */
@@ -1650,7 +1594,7 @@ _GLOBAL(__secondary_start)
1650 lwz r3,PLATFORM(r3) /* r3 = platform flags */ 1594 lwz r3,PLATFORM(r3) /* r3 = platform flags */
1651 andi. r3,r3,PLATFORM_LPAR /* Test if bit 0 is set (LPAR bit) */ 1595 andi. r3,r3,PLATFORM_LPAR /* Test if bit 0 is set (LPAR bit) */
1652 beq 98f /* branch if result is 0 */ 1596 beq 98f /* branch if result is 0 */
1653 mfspr r3,PVR 1597 mfspr r3,SPRN_PVR
1654 srwi r3,r3,16 1598 srwi r3,r3,16
1655 cmpwi r3,0x37 /* SStar */ 1599 cmpwi r3,0x37 /* SStar */
1656 beq 97f 1600 beq 97f
@@ -1674,8 +1618,8 @@ _GLOBAL(__secondary_start)
1674#ifdef DO_SOFT_DISABLE 1618#ifdef DO_SOFT_DISABLE
1675 ori r4,r4,MSR_EE 1619 ori r4,r4,MSR_EE
1676#endif 1620#endif
1677 mtspr SRR0,r3 1621 mtspr SPRN_SRR0,r3
1678 mtspr SRR1,r4 1622 mtspr SPRN_SRR1,r4
1679 rfid 1623 rfid
1680 b . /* prevent speculative execution */ 1624 b . /* prevent speculative execution */
1681 1625
@@ -1737,7 +1681,7 @@ _STATIC(start_here_multiplatform)
1737 1681
1738#ifdef CONFIG_HMT 1682#ifdef CONFIG_HMT
1739 /* Start up the second thread on cpu 0 */ 1683 /* Start up the second thread on cpu 0 */
1740 mfspr r3,PVR 1684 mfspr r3,SPRN_PVR
1741 srwi r3,r3,16 1685 srwi r3,r3,16
1742 cmpwi r3,0x34 /* Pulsar */ 1686 cmpwi r3,0x34 /* Pulsar */
1743 beq 90f 1687 beq 90f
@@ -1797,7 +1741,7 @@ _STATIC(start_here_multiplatform)
1797 mulli r13,r27,PACA_SIZE /* Calculate vaddr of right paca */ 1741 mulli r13,r27,PACA_SIZE /* Calculate vaddr of right paca */
1798 add r13,r13,r24 /* for this processor. */ 1742 add r13,r13,r24 /* for this processor. */
1799 sub r13,r13,r26 /* convert to physical addr */ 1743 sub r13,r13,r26 /* convert to physical addr */
1800 mtspr SPRG3,r13 /* PPPBBB: Temp... -Peter */ 1744 mtspr SPRN_SPRG3,r13 /* PPPBBB: Temp... -Peter */
1801 1745
1802 /* Do very early kernel initializations, including initial hash table, 1746 /* Do very early kernel initializations, including initial hash table,
1803 * stab and slb setup before we turn on relocation. */ 1747 * stab and slb setup before we turn on relocation. */
@@ -1814,7 +1758,7 @@ _STATIC(start_here_multiplatform)
1814 lwz r3,PLATFORM(r3) /* r3 = platform flags */ 1758 lwz r3,PLATFORM(r3) /* r3 = platform flags */
1815 andi. r3,r3,PLATFORM_LPAR /* Test if bit 0 is set (LPAR bit) */ 1759 andi. r3,r3,PLATFORM_LPAR /* Test if bit 0 is set (LPAR bit) */
1816 beq 98f /* branch if result is 0 */ 1760 beq 98f /* branch if result is 0 */
1817 mfspr r3,PVR 1761 mfspr r3,SPRN_PVR
1818 srwi r3,r3,16 1762 srwi r3,r3,16
1819 cmpwi r3,0x37 /* SStar */ 1763 cmpwi r3,0x37 /* SStar */
1820 beq 97f 1764 beq 97f
@@ -1838,12 +1782,12 @@ _STATIC(start_here_multiplatform)
1838 LOADADDR(r6,_SDR1) /* Only if NOT LPAR */ 1782 LOADADDR(r6,_SDR1) /* Only if NOT LPAR */
1839 sub r6,r6,r26 1783 sub r6,r6,r26
1840 ld r6,0(r6) /* get the value of _SDR1 */ 1784 ld r6,0(r6) /* get the value of _SDR1 */
1841 mtspr SDR1,r6 /* set the htab location */ 1785 mtspr SPRN_SDR1,r6 /* set the htab location */
184298: 178698:
1843 LOADADDR(r3,.start_here_common) 1787 LOADADDR(r3,.start_here_common)
1844 SET_REG_TO_CONST(r4, MSR_KERNEL) 1788 SET_REG_TO_CONST(r4, MSR_KERNEL)
1845 mtspr SRR0,r3 1789 mtspr SPRN_SRR0,r3
1846 mtspr SRR1,r4 1790 mtspr SPRN_SRR1,r4
1847 rfid 1791 rfid
1848 b . /* prevent speculative execution */ 1792 b . /* prevent speculative execution */
1849#endif /* CONFIG_PPC_MULTIPLATFORM */ 1793#endif /* CONFIG_PPC_MULTIPLATFORM */
@@ -1874,7 +1818,7 @@ _STATIC(start_here_common)
1874 LOADADDR(r24, paca) /* Get base vaddr of paca array */ 1818 LOADADDR(r24, paca) /* Get base vaddr of paca array */
1875 mulli r13,r26,PACA_SIZE /* Calculate vaddr of right paca */ 1819 mulli r13,r26,PACA_SIZE /* Calculate vaddr of right paca */
1876 add r13,r13,r24 /* for this processor. */ 1820 add r13,r13,r24 /* for this processor. */
1877 mtspr SPRG3,r13 1821 mtspr SPRN_SPRG3,r13
1878 1822
1879 /* ptr to current */ 1823 /* ptr to current */
1880 LOADADDR(r4,init_task) 1824 LOADADDR(r4,init_task)
@@ -1901,7 +1845,7 @@ _STATIC(start_here_common)
1901_GLOBAL(hmt_init) 1845_GLOBAL(hmt_init)
1902#ifdef CONFIG_HMT 1846#ifdef CONFIG_HMT
1903 LOADADDR(r5, hmt_thread_data) 1847 LOADADDR(r5, hmt_thread_data)
1904 mfspr r7,PVR 1848 mfspr r7,SPRN_PVR
1905 srwi r7,r7,16 1849 srwi r7,r7,16
1906 cmpwi r7,0x34 /* Pulsar */ 1850 cmpwi r7,0x34 /* Pulsar */
1907 beq 90f 1851 beq 90f
@@ -1910,10 +1854,10 @@ _GLOBAL(hmt_init)
1910 cmpwi r7,0x37 /* SStar */ 1854 cmpwi r7,0x37 /* SStar */
1911 beq 91f 1855 beq 91f
1912 b 101f 1856 b 101f
191390: mfspr r6,PIR 185790: mfspr r6,SPRN_PIR
1914 andi. r6,r6,0x1f 1858 andi. r6,r6,0x1f
1915 b 92f 1859 b 92f
191691: mfspr r6,PIR 186091: mfspr r6,SPRN_PIR
1917 andi. r6,r6,0x3ff 1861 andi. r6,r6,0x3ff
191892: sldi r4,r24,3 186292: sldi r4,r24,3
1919 stwx r6,r5,r4 1863 stwx r6,r5,r4
@@ -1924,8 +1868,8 @@ __hmt_secondary_hold:
1924 LOADADDR(r5, hmt_thread_data) 1868 LOADADDR(r5, hmt_thread_data)
1925 clrldi r5,r5,4 1869 clrldi r5,r5,4
1926 li r7,0 1870 li r7,0
1927 mfspr r6,PIR 1871 mfspr r6,SPRN_PIR
1928 mfspr r8,PVR 1872 mfspr r8,SPRN_PVR
1929 srwi r8,r8,16 1873 srwi r8,r8,16
1930 cmpwi r8,0x34 1874 cmpwi r8,0x34
1931 bne 93f 1875 bne 93f
@@ -1951,39 +1895,41 @@ __hmt_secondary_hold:
1951_GLOBAL(hmt_start_secondary) 1895_GLOBAL(hmt_start_secondary)
1952 LOADADDR(r4,__hmt_secondary_hold) 1896 LOADADDR(r4,__hmt_secondary_hold)
1953 clrldi r4,r4,4 1897 clrldi r4,r4,4
1954 mtspr NIADORM, r4 1898 mtspr SPRN_NIADORM, r4
1955 mfspr r4, MSRDORM 1899 mfspr r4, SPRN_MSRDORM
1956 li r5, -65 1900 li r5, -65
1957 and r4, r4, r5 1901 and r4, r4, r5
1958 mtspr MSRDORM, r4 1902 mtspr SPRN_MSRDORM, r4
1959 lis r4,0xffef 1903 lis r4,0xffef
1960 ori r4,r4,0x7403 1904 ori r4,r4,0x7403
1961 mtspr TSC, r4 1905 mtspr SPRN_TSC, r4
1962 li r4,0x1f4 1906 li r4,0x1f4
1963 mtspr TST, r4 1907 mtspr SPRN_TST, r4
1964 mfspr r4, HID0 1908 mfspr r4, SPRN_HID0
1965 ori r4, r4, 0x1 1909 ori r4, r4, 0x1
1966 mtspr HID0, r4 1910 mtspr SPRN_HID0, r4
1967 mfspr r4, SPRN_CTRLF 1911 mfspr r4, SPRN_CTRLF
1968 oris r4, r4, 0x40 1912 oris r4, r4, 0x40
1969 mtspr SPRN_CTRLT, r4 1913 mtspr SPRN_CTRLT, r4
1970 blr 1914 blr
1971#endif 1915#endif
1972 1916
1973#if defined(CONFIG_KEXEC) || (defined(CONFIG_SMP) && !defined(CONFIG_PPC_ISERIES)) 1917#if defined(CONFIG_KEXEC) || defined(CONFIG_SMP)
1974_GLOBAL(smp_release_cpus) 1918_GLOBAL(smp_release_cpus)
1975 /* All secondary cpus are spinning on a common 1919 /* All secondary cpus are spinning on a common
1976 * spinloop, release them all now so they can start 1920 * spinloop, release them all now so they can start
1977 * to spin on their individual paca spinloops. 1921 * to spin on their individual paca spinloops.
1978 * For non SMP kernels, the secondary cpus never 1922 * For non SMP kernels, the secondary cpus never
1979 * get out of the common spinloop. 1923 * get out of the common spinloop.
1924 * XXX This does nothing useful on iSeries, secondaries are
1925 * already waiting on their paca.
1980 */ 1926 */
1981 li r3,1 1927 li r3,1
1982 LOADADDR(r5,__secondary_hold_spinloop) 1928 LOADADDR(r5,__secondary_hold_spinloop)
1983 std r3,0(r5) 1929 std r3,0(r5)
1984 sync 1930 sync
1985 blr 1931 blr
1986#endif /* CONFIG_SMP && !CONFIG_PPC_ISERIES */ 1932#endif /* CONFIG_SMP */
1987 1933
1988 1934
1989/* 1935/*
@@ -1992,7 +1938,7 @@ _GLOBAL(smp_release_cpus)
1992 */ 1938 */
1993 .section ".bss" 1939 .section ".bss"
1994 1940
1995 .align 12 1941 .align PAGE_SHIFT
1996 1942
1997 .globl empty_zero_page 1943 .globl empty_zero_page
1998empty_zero_page: 1944empty_zero_page:
diff --git a/arch/ppc64/kernel/hvcserver.c b/arch/ppc64/kernel/hvcserver.c
index bde8f42da854..4d584172055a 100644
--- a/arch/ppc64/kernel/hvcserver.c
+++ b/arch/ppc64/kernel/hvcserver.c
@@ -22,6 +22,8 @@
22#include <linux/kernel.h> 22#include <linux/kernel.h>
23#include <linux/list.h> 23#include <linux/list.h>
24#include <linux/module.h> 24#include <linux/module.h>
25#include <linux/slab.h>
26
25#include <asm/hvcall.h> 27#include <asm/hvcall.h>
26#include <asm/hvcserver.h> 28#include <asm/hvcserver.h>
27#include <asm/io.h> 29#include <asm/io.h>
diff --git a/arch/ppc64/kernel/i8259.c b/arch/ppc64/kernel/i8259.c
deleted file mode 100644
index 74dcfd68fc75..000000000000
--- a/arch/ppc64/kernel/i8259.c
+++ /dev/null
@@ -1,177 +0,0 @@
1/*
2 * c 2001 PPC64 Team, IBM Corp
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 */
9#include <linux/stddef.h>
10#include <linux/init.h>
11#include <linux/sched.h>
12#include <linux/signal.h>
13#include <linux/cache.h>
14#include <linux/irq.h>
15#include <linux/interrupt.h>
16#include <asm/io.h>
17#include <asm/ppcdebug.h>
18#include "i8259.h"
19
20unsigned char cached_8259[2] = { 0xff, 0xff };
21#define cached_A1 (cached_8259[0])
22#define cached_21 (cached_8259[1])
23
24static __cacheline_aligned_in_smp DEFINE_SPINLOCK(i8259_lock);
25
26static int i8259_pic_irq_offset;
27static int i8259_present;
28
29int i8259_irq(int cpu)
30{
31 int irq;
32
33 spin_lock/*_irqsave*/(&i8259_lock/*, flags*/);
34 /*
35 * Perform an interrupt acknowledge cycle on controller 1
36 */
37 outb(0x0C, 0x20);
38 irq = inb(0x20) & 7;
39 if (irq == 2)
40 {
41 /*
42 * Interrupt is cascaded so perform interrupt
43 * acknowledge on controller 2
44 */
45 outb(0x0C, 0xA0);
46 irq = (inb(0xA0) & 7) + 8;
47 }
48 else if (irq==7)
49 {
50 /*
51 * This may be a spurious interrupt
52 *
53 * Read the interrupt status register. If the most
54 * significant bit is not set then there is no valid
55 * interrupt
56 */
57 outb(0x0b, 0x20);
58 if(~inb(0x20)&0x80) {
59 spin_unlock/*_irqrestore*/(&i8259_lock/*, flags*/);
60 return -1;
61 }
62 }
63 spin_unlock/*_irqrestore*/(&i8259_lock/*, flags*/);
64 return irq;
65}
66
67static void i8259_mask_and_ack_irq(unsigned int irq_nr)
68{
69 unsigned long flags;
70
71 spin_lock_irqsave(&i8259_lock, flags);
72 if ( irq_nr >= i8259_pic_irq_offset )
73 irq_nr -= i8259_pic_irq_offset;
74
75 if (irq_nr > 7) {
76 cached_A1 |= 1 << (irq_nr-8);
77 inb(0xA1); /* DUMMY */
78 outb(cached_A1,0xA1);
79 outb(0x20,0xA0); /* Non-specific EOI */
80 outb(0x20,0x20); /* Non-specific EOI to cascade */
81 } else {
82 cached_21 |= 1 << irq_nr;
83 inb(0x21); /* DUMMY */
84 outb(cached_21,0x21);
85 outb(0x20,0x20); /* Non-specific EOI */
86 }
87 spin_unlock_irqrestore(&i8259_lock, flags);
88}
89
90static void i8259_set_irq_mask(int irq_nr)
91{
92 outb(cached_A1,0xA1);
93 outb(cached_21,0x21);
94}
95
96static void i8259_mask_irq(unsigned int irq_nr)
97{
98 unsigned long flags;
99
100 spin_lock_irqsave(&i8259_lock, flags);
101 if ( irq_nr >= i8259_pic_irq_offset )
102 irq_nr -= i8259_pic_irq_offset;
103 if ( irq_nr < 8 )
104 cached_21 |= 1 << irq_nr;
105 else
106 cached_A1 |= 1 << (irq_nr-8);
107 i8259_set_irq_mask(irq_nr);
108 spin_unlock_irqrestore(&i8259_lock, flags);
109}
110
111static void i8259_unmask_irq(unsigned int irq_nr)
112{
113 unsigned long flags;
114
115 spin_lock_irqsave(&i8259_lock, flags);
116 if ( irq_nr >= i8259_pic_irq_offset )
117 irq_nr -= i8259_pic_irq_offset;
118 if ( irq_nr < 8 )
119 cached_21 &= ~(1 << irq_nr);
120 else
121 cached_A1 &= ~(1 << (irq_nr-8));
122 i8259_set_irq_mask(irq_nr);
123 spin_unlock_irqrestore(&i8259_lock, flags);
124}
125
126static void i8259_end_irq(unsigned int irq)
127{
128 if (!(get_irq_desc(irq)->status & (IRQ_DISABLED|IRQ_INPROGRESS)) &&
129 get_irq_desc(irq)->action)
130 i8259_unmask_irq(irq);
131}
132
133struct hw_interrupt_type i8259_pic = {
134 .typename = " i8259 ",
135 .enable = i8259_unmask_irq,
136 .disable = i8259_mask_irq,
137 .ack = i8259_mask_and_ack_irq,
138 .end = i8259_end_irq,
139};
140
141void __init i8259_init(int offset)
142{
143 unsigned long flags;
144
145 spin_lock_irqsave(&i8259_lock, flags);
146 i8259_pic_irq_offset = offset;
147 i8259_present = 1;
148 /* init master interrupt controller */
149 outb(0x11, 0x20); /* Start init sequence */
150 outb(0x00, 0x21); /* Vector base */
151 outb(0x04, 0x21); /* edge tiggered, Cascade (slave) on IRQ2 */
152 outb(0x01, 0x21); /* Select 8086 mode */
153 outb(0xFF, 0x21); /* Mask all */
154 /* init slave interrupt controller */
155 outb(0x11, 0xA0); /* Start init sequence */
156 outb(0x08, 0xA1); /* Vector base */
157 outb(0x02, 0xA1); /* edge triggered, Cascade (slave) on IRQ2 */
158 outb(0x01, 0xA1); /* Select 8086 mode */
159 outb(0xFF, 0xA1); /* Mask all */
160 outb(cached_A1, 0xA1);
161 outb(cached_21, 0x21);
162 spin_unlock_irqrestore(&i8259_lock, flags);
163
164}
165
166static int i8259_request_cascade(void)
167{
168 if (!i8259_present)
169 return -ENODEV;
170
171 request_irq( i8259_pic_irq_offset + 2, no_action, SA_INTERRUPT,
172 "82c59 secondary cascade", NULL );
173
174 return 0;
175}
176
177arch_initcall(i8259_request_cascade);
diff --git a/arch/ppc64/kernel/i8259.h b/arch/ppc64/kernel/i8259.h
deleted file mode 100644
index f74764ba0bfa..000000000000
--- a/arch/ppc64/kernel/i8259.h
+++ /dev/null
@@ -1,17 +0,0 @@
1/*
2 * c 2001 PPC 64 Team, IBM Corp
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 */
9#ifndef _PPC_KERNEL_i8259_H
10#define _PPC_KERNEL_i8259_H
11
12extern struct hw_interrupt_type i8259_pic;
13
14extern void i8259_init(int offset);
15extern int i8259_irq(int);
16
17#endif /* _PPC_KERNEL_i8259_H */
diff --git a/arch/ppc64/kernel/idle.c b/arch/ppc64/kernel/idle.c
index 954395d42636..8abd2ad92832 100644
--- a/arch/ppc64/kernel/idle.c
+++ b/arch/ppc64/kernel/idle.c
@@ -31,7 +31,7 @@
31 31
32extern void power4_idle(void); 32extern void power4_idle(void);
33 33
34int default_idle(void) 34void default_idle(void)
35{ 35{
36 long oldval; 36 long oldval;
37 unsigned int cpu = smp_processor_id(); 37 unsigned int cpu = smp_processor_id();
@@ -64,11 +64,9 @@ int default_idle(void)
64 if (cpu_is_offline(cpu) && system_state == SYSTEM_RUNNING) 64 if (cpu_is_offline(cpu) && system_state == SYSTEM_RUNNING)
65 cpu_die(); 65 cpu_die();
66 } 66 }
67
68 return 0;
69} 67}
70 68
71int native_idle(void) 69void native_idle(void)
72{ 70{
73 while (1) { 71 while (1) {
74 ppc64_runlatch_off(); 72 ppc64_runlatch_off();
@@ -85,8 +83,6 @@ int native_idle(void)
85 system_state == SYSTEM_RUNNING) 83 system_state == SYSTEM_RUNNING)
86 cpu_die(); 84 cpu_die();
87 } 85 }
88
89 return 0;
90} 86}
91 87
92void cpu_idle(void) 88void cpu_idle(void)
diff --git a/arch/ppc64/kernel/ioctl32.c b/arch/ppc64/kernel/ioctl32.c
index a8005db23ec5..ba4a899045c2 100644
--- a/arch/ppc64/kernel/ioctl32.c
+++ b/arch/ppc64/kernel/ioctl32.c
@@ -39,9 +39,7 @@ IOCTL_TABLE_START
39#include <linux/compat_ioctl.h> 39#include <linux/compat_ioctl.h>
40#define DECLARES 40#define DECLARES
41#include "compat_ioctl.c" 41#include "compat_ioctl.c"
42COMPATIBLE_IOCTL(TIOCSTART) 42
43COMPATIBLE_IOCTL(TIOCSTOP)
44COMPATIBLE_IOCTL(TIOCSLTC)
45/* Little p (/dev/rtc, /dev/envctrl, etc.) */ 43/* Little p (/dev/rtc, /dev/envctrl, etc.) */
46COMPATIBLE_IOCTL(_IOR('p', 20, int[7])) /* RTCGET */ 44COMPATIBLE_IOCTL(_IOR('p', 20, int[7])) /* RTCGET */
47COMPATIBLE_IOCTL(_IOW('p', 21, int[7])) /* RTCSET */ 45COMPATIBLE_IOCTL(_IOW('p', 21, int[7])) /* RTCSET */
diff --git a/arch/ppc64/kernel/kprobes.c b/arch/ppc64/kernel/kprobes.c
index 9c6facc24f70..ed876a5178ae 100644
--- a/arch/ppc64/kernel/kprobes.c
+++ b/arch/ppc64/kernel/kprobes.c
@@ -395,7 +395,6 @@ int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
395 if (post_kprobe_handler(args->regs)) 395 if (post_kprobe_handler(args->regs))
396 ret = NOTIFY_STOP; 396 ret = NOTIFY_STOP;
397 break; 397 break;
398 case DIE_GPF:
399 case DIE_PAGE_FAULT: 398 case DIE_PAGE_FAULT:
400 if (kprobe_running() && 399 if (kprobe_running() &&
401 kprobe_fault_handler(args->regs, args->trapnr)) 400 kprobe_fault_handler(args->regs, args->trapnr))
diff --git a/arch/ppc64/kernel/misc.S b/arch/ppc64/kernel/misc.S
index e7241ad80a08..077507ffbab8 100644
--- a/arch/ppc64/kernel/misc.S
+++ b/arch/ppc64/kernel/misc.S
@@ -28,6 +28,7 @@
28#include <asm/ppc_asm.h> 28#include <asm/ppc_asm.h>
29#include <asm/asm-offsets.h> 29#include <asm/asm-offsets.h>
30#include <asm/cputable.h> 30#include <asm/cputable.h>
31#include <asm/thread_info.h>
31 32
32 .text 33 .text
33 34
@@ -64,44 +65,6 @@ _GLOBAL(get_srr1)
64_GLOBAL(get_sp) 65_GLOBAL(get_sp)
65 mr r3,r1 66 mr r3,r1
66 blr 67 blr
67
68#ifdef CONFIG_PPC_ISERIES
69/* unsigned long local_save_flags(void) */
70_GLOBAL(local_get_flags)
71 lbz r3,PACAPROCENABLED(r13)
72 blr
73
74/* unsigned long local_irq_disable(void) */
75_GLOBAL(local_irq_disable)
76 lbz r3,PACAPROCENABLED(r13)
77 li r4,0
78 stb r4,PACAPROCENABLED(r13)
79 blr /* Done */
80
81/* void local_irq_restore(unsigned long flags) */
82_GLOBAL(local_irq_restore)
83 lbz r5,PACAPROCENABLED(r13)
84 /* Check if things are setup the way we want _already_. */
85 cmpw 0,r3,r5
86 beqlr
87 /* are we enabling interrupts? */
88 cmpdi 0,r3,0
89 stb r3,PACAPROCENABLED(r13)
90 beqlr
91 /* Check pending interrupts */
92 /* A decrementer, IPI or PMC interrupt may have occurred
93 * while we were in the hypervisor (which enables) */
94 ld r4,PACALPPACA+LPPACAANYINT(r13)
95 cmpdi r4,0
96 beqlr
97
98 /*
99 * Handle pending interrupts in interrupt context
100 */
101 li r0,0x5555
102 sc
103 blr
104#endif /* CONFIG_PPC_ISERIES */
105 68
106#ifdef CONFIG_IRQSTACKS 69#ifdef CONFIG_IRQSTACKS
107_GLOBAL(call_do_softirq) 70_GLOBAL(call_do_softirq)
@@ -329,7 +292,7 @@ _GLOBAL(__flush_dcache_icache)
329 292
330/* Flush the dcache */ 293/* Flush the dcache */
331 ld r7,PPC64_CACHES@toc(r2) 294 ld r7,PPC64_CACHES@toc(r2)
332 clrrdi r3,r3,12 /* Page align */ 295 clrrdi r3,r3,PAGE_SHIFT /* Page align */
333 lwz r4,DCACHEL1LINESPERPAGE(r7) /* Get # dcache lines per page */ 296 lwz r4,DCACHEL1LINESPERPAGE(r7) /* Get # dcache lines per page */
334 lwz r5,DCACHEL1LINESIZE(r7) /* Get dcache line size */ 297 lwz r5,DCACHEL1LINESIZE(r7) /* Get dcache line size */
335 mr r6,r3 298 mr r6,r3
@@ -488,25 +451,6 @@ _GLOBAL(_outsl_ns)
488 sync 451 sync
489 blr 452 blr
490 453
491
492_GLOBAL(cvt_fd)
493 lfd 0,0(r5) /* load up fpscr value */
494 mtfsf 0xff,0
495 lfs 0,0(r3)
496 stfd 0,0(r4)
497 mffs 0 /* save new fpscr value */
498 stfd 0,0(r5)
499 blr
500
501_GLOBAL(cvt_df)
502 lfd 0,0(r5) /* load up fpscr value */
503 mtfsf 0xff,0
504 lfd 0,0(r3)
505 stfs 0,0(r4)
506 mffs 0 /* save new fpscr value */
507 stfd 0,0(r5)
508 blr
509
510/* 454/*
511 * identify_cpu and calls setup_cpu 455 * identify_cpu and calls setup_cpu
512 * In: r3 = base of the cpu_specs array 456 * In: r3 = base of the cpu_specs array
@@ -692,38 +636,6 @@ _GLOBAL(disable_kernel_fp)
692 isync 636 isync
693 blr 637 blr
694 638
695/*
696 * giveup_fpu(tsk)
697 * Disable FP for the task given as the argument,
698 * and save the floating-point registers in its thread_struct.
699 * Enables the FPU for use in the kernel on return.
700 */
701_GLOBAL(giveup_fpu)
702 mfmsr r5
703 ori r5,r5,MSR_FP
704 mtmsrd r5 /* enable use of fpu now */
705 isync
706 cmpdi 0,r3,0
707 beqlr- /* if no previous owner, done */
708 addi r3,r3,THREAD /* want THREAD of task */
709 ld r5,PT_REGS(r3)
710 cmpdi 0,r5,0
711 SAVE_32FPRS(0, r3)
712 mffs fr0
713 stfd fr0,THREAD_FPSCR(r3)
714 beq 1f
715 ld r4,_MSR-STACK_FRAME_OVERHEAD(r5)
716 li r3,MSR_FP|MSR_FE0|MSR_FE1
717 andc r4,r4,r3 /* disable FP for previous task */
718 std r4,_MSR-STACK_FRAME_OVERHEAD(r5)
7191:
720#ifndef CONFIG_SMP
721 li r5,0
722 ld r4,last_task_used_math@got(r2)
723 std r5,0(r4)
724#endif /* CONFIG_SMP */
725 blr
726
727#ifdef CONFIG_ALTIVEC 639#ifdef CONFIG_ALTIVEC
728 640
729#if 0 /* this has no callers for now */ 641#if 0 /* this has no callers for now */
@@ -778,6 +690,13 @@ _GLOBAL(giveup_altivec)
778_GLOBAL(__setup_cpu_power3) 690_GLOBAL(__setup_cpu_power3)
779 blr 691 blr
780 692
693_GLOBAL(execve)
694 li r0,__NR_execve
695 sc
696 bnslr
697 neg r3,r3
698 blr
699
781/* kexec_wait(phys_cpu) 700/* kexec_wait(phys_cpu)
782 * 701 *
783 * wait for the flag to change, indicating this kernel is going away but 702 * wait for the flag to change, indicating this kernel is going away but
@@ -948,566 +867,3 @@ _GLOBAL(kexec_sequence)
948 li r5,0 867 li r5,0
949 blr /* image->start(physid, image->start, 0); */ 868 blr /* image->start(physid, image->start, 0); */
950#endif /* CONFIG_KEXEC */ 869#endif /* CONFIG_KEXEC */
951
952/* Why isn't this a) automatic, b) written in 'C'? */
953 .balign 8
954_GLOBAL(sys_call_table32)
955 .llong .sys_restart_syscall /* 0 */
956 .llong .sys_exit
957 .llong .ppc_fork
958 .llong .sys_read
959 .llong .sys_write
960 .llong .compat_sys_open /* 5 */
961 .llong .sys_close
962 .llong .sys32_waitpid
963 .llong .sys32_creat
964 .llong .sys_link
965 .llong .sys_unlink /* 10 */
966 .llong .sys32_execve
967 .llong .sys_chdir
968 .llong .compat_sys_time
969 .llong .sys_mknod
970 .llong .sys_chmod /* 15 */
971 .llong .sys_lchown
972 .llong .sys_ni_syscall /* old break syscall */
973 .llong .sys_ni_syscall /* old stat syscall */
974 .llong .ppc32_lseek
975 .llong .sys_getpid /* 20 */
976 .llong .compat_sys_mount
977 .llong .sys_oldumount
978 .llong .sys_setuid
979 .llong .sys_getuid
980 .llong .compat_sys_stime /* 25 */
981 .llong .sys32_ptrace
982 .llong .sys_alarm
983 .llong .sys_ni_syscall /* old fstat syscall */
984 .llong .sys32_pause
985 .llong .compat_sys_utime /* 30 */
986 .llong .sys_ni_syscall /* old stty syscall */
987 .llong .sys_ni_syscall /* old gtty syscall */
988 .llong .sys32_access
989 .llong .sys32_nice
990 .llong .sys_ni_syscall /* 35 - old ftime syscall */
991 .llong .sys_sync
992 .llong .sys32_kill
993 .llong .sys_rename
994 .llong .sys32_mkdir
995 .llong .sys_rmdir /* 40 */
996 .llong .sys_dup
997 .llong .sys_pipe
998 .llong .compat_sys_times
999 .llong .sys_ni_syscall /* old prof syscall */
1000 .llong .sys_brk /* 45 */
1001 .llong .sys_setgid
1002 .llong .sys_getgid
1003 .llong .sys_signal
1004 .llong .sys_geteuid
1005 .llong .sys_getegid /* 50 */
1006 .llong .sys_acct
1007 .llong .sys_umount
1008 .llong .sys_ni_syscall /* old lock syscall */
1009 .llong .compat_sys_ioctl
1010 .llong .compat_sys_fcntl /* 55 */
1011 .llong .sys_ni_syscall /* old mpx syscall */
1012 .llong .sys32_setpgid
1013 .llong .sys_ni_syscall /* old ulimit syscall */
1014 .llong .sys32_olduname
1015 .llong .sys32_umask /* 60 */
1016 .llong .sys_chroot
1017 .llong .sys_ustat
1018 .llong .sys_dup2
1019 .llong .sys_getppid
1020 .llong .sys_getpgrp /* 65 */
1021 .llong .sys_setsid
1022 .llong .sys32_sigaction
1023 .llong .sys_sgetmask
1024 .llong .sys32_ssetmask
1025 .llong .sys_setreuid /* 70 */
1026 .llong .sys_setregid
1027 .llong .ppc32_sigsuspend
1028 .llong .compat_sys_sigpending
1029 .llong .sys32_sethostname
1030 .llong .compat_sys_setrlimit /* 75 */
1031 .llong .compat_sys_old_getrlimit
1032 .llong .compat_sys_getrusage
1033 .llong .sys32_gettimeofday
1034 .llong .sys32_settimeofday
1035 .llong .sys32_getgroups /* 80 */
1036 .llong .sys32_setgroups
1037 .llong .sys_ni_syscall /* old select syscall */
1038 .llong .sys_symlink
1039 .llong .sys_ni_syscall /* old lstat syscall */
1040 .llong .sys32_readlink /* 85 */
1041 .llong .sys_uselib
1042 .llong .sys_swapon
1043 .llong .sys_reboot
1044 .llong .old32_readdir
1045 .llong .sys_mmap /* 90 */
1046 .llong .sys_munmap
1047 .llong .sys_truncate
1048 .llong .sys_ftruncate
1049 .llong .sys_fchmod
1050 .llong .sys_fchown /* 95 */
1051 .llong .sys32_getpriority
1052 .llong .sys32_setpriority
1053 .llong .sys_ni_syscall /* old profil syscall */
1054 .llong .compat_sys_statfs
1055 .llong .compat_sys_fstatfs /* 100 */
1056 .llong .sys_ni_syscall /* old ioperm syscall */
1057 .llong .compat_sys_socketcall
1058 .llong .sys32_syslog
1059 .llong .compat_sys_setitimer
1060 .llong .compat_sys_getitimer /* 105 */
1061 .llong .compat_sys_newstat
1062 .llong .compat_sys_newlstat
1063 .llong .compat_sys_newfstat
1064 .llong .sys32_uname
1065 .llong .sys_ni_syscall /* 110 old iopl syscall */
1066 .llong .sys_vhangup
1067 .llong .sys_ni_syscall /* old idle syscall */
1068 .llong .sys_ni_syscall /* old vm86 syscall */
1069 .llong .compat_sys_wait4
1070 .llong .sys_swapoff /* 115 */
1071 .llong .sys32_sysinfo
1072 .llong .sys32_ipc
1073 .llong .sys_fsync
1074 .llong .ppc32_sigreturn
1075 .llong .ppc_clone /* 120 */
1076 .llong .sys32_setdomainname
1077 .llong .ppc64_newuname
1078 .llong .sys_ni_syscall /* old modify_ldt syscall */
1079 .llong .sys32_adjtimex
1080 .llong .sys_mprotect /* 125 */
1081 .llong .compat_sys_sigprocmask
1082 .llong .sys_ni_syscall /* old create_module syscall */
1083 .llong .sys_init_module
1084 .llong .sys_delete_module
1085 .llong .sys_ni_syscall /* 130 old get_kernel_syms syscall */
1086 .llong .sys_quotactl
1087 .llong .sys32_getpgid
1088 .llong .sys_fchdir
1089 .llong .sys_bdflush
1090 .llong .sys32_sysfs /* 135 */
1091 .llong .ppc64_personality
1092 .llong .sys_ni_syscall /* for afs_syscall */
1093 .llong .sys_setfsuid
1094 .llong .sys_setfsgid
1095 .llong .sys_llseek /* 140 */
1096 .llong .sys32_getdents
1097 .llong .ppc32_select
1098 .llong .sys_flock
1099 .llong .sys_msync
1100 .llong .compat_sys_readv /* 145 */
1101 .llong .compat_sys_writev
1102 .llong .sys32_getsid
1103 .llong .sys_fdatasync
1104 .llong .sys32_sysctl
1105 .llong .sys_mlock /* 150 */
1106 .llong .sys_munlock
1107 .llong .sys_mlockall
1108 .llong .sys_munlockall
1109 .llong .sys32_sched_setparam
1110 .llong .sys32_sched_getparam /* 155 */
1111 .llong .sys32_sched_setscheduler
1112 .llong .sys32_sched_getscheduler
1113 .llong .sys_sched_yield
1114 .llong .sys32_sched_get_priority_max
1115 .llong .sys32_sched_get_priority_min /* 160 */
1116 .llong .sys32_sched_rr_get_interval
1117 .llong .compat_sys_nanosleep
1118 .llong .sys_mremap
1119 .llong .sys_setresuid
1120 .llong .sys_getresuid /* 165 */
1121 .llong .sys_ni_syscall /* old query_module syscall */
1122 .llong .sys_poll
1123 .llong .compat_sys_nfsservctl
1124 .llong .sys_setresgid
1125 .llong .sys_getresgid /* 170 */
1126 .llong .sys32_prctl
1127 .llong .ppc32_rt_sigreturn
1128 .llong .sys32_rt_sigaction
1129 .llong .sys32_rt_sigprocmask
1130 .llong .sys32_rt_sigpending /* 175 */
1131 .llong .compat_sys_rt_sigtimedwait
1132 .llong .sys32_rt_sigqueueinfo
1133 .llong .ppc32_rt_sigsuspend
1134 .llong .sys32_pread64
1135 .llong .sys32_pwrite64 /* 180 */
1136 .llong .sys_chown
1137 .llong .sys_getcwd
1138 .llong .sys_capget
1139 .llong .sys_capset
1140 .llong .sys32_sigaltstack /* 185 */
1141 .llong .sys32_sendfile
1142 .llong .sys_ni_syscall /* reserved for streams1 */
1143 .llong .sys_ni_syscall /* reserved for streams2 */
1144 .llong .ppc_vfork
1145 .llong .compat_sys_getrlimit /* 190 */
1146 .llong .sys32_readahead
1147 .llong .sys32_mmap2
1148 .llong .sys32_truncate64
1149 .llong .sys32_ftruncate64
1150 .llong .sys_stat64 /* 195 */
1151 .llong .sys_lstat64
1152 .llong .sys_fstat64
1153 .llong .sys32_pciconfig_read
1154 .llong .sys32_pciconfig_write
1155 .llong .sys32_pciconfig_iobase /* 200 - pciconfig_iobase */
1156 .llong .sys_ni_syscall /* reserved for MacOnLinux */
1157 .llong .sys_getdents64
1158 .llong .sys_pivot_root
1159 .llong .compat_sys_fcntl64
1160 .llong .sys_madvise /* 205 */
1161 .llong .sys_mincore
1162 .llong .sys_gettid
1163 .llong .sys_tkill
1164 .llong .sys_setxattr
1165 .llong .sys_lsetxattr /* 210 */
1166 .llong .sys_fsetxattr
1167 .llong .sys_getxattr
1168 .llong .sys_lgetxattr
1169 .llong .sys_fgetxattr
1170 .llong .sys_listxattr /* 215 */
1171 .llong .sys_llistxattr
1172 .llong .sys_flistxattr
1173 .llong .sys_removexattr
1174 .llong .sys_lremovexattr
1175 .llong .sys_fremovexattr /* 220 */
1176 .llong .compat_sys_futex
1177 .llong .compat_sys_sched_setaffinity
1178 .llong .compat_sys_sched_getaffinity
1179 .llong .sys_ni_syscall
1180 .llong .sys_ni_syscall /* 225 - reserved for tux */
1181 .llong .sys32_sendfile64
1182 .llong .compat_sys_io_setup
1183 .llong .sys_io_destroy
1184 .llong .compat_sys_io_getevents
1185 .llong .compat_sys_io_submit
1186 .llong .sys_io_cancel
1187 .llong .sys_set_tid_address
1188 .llong .ppc32_fadvise64
1189 .llong .sys_exit_group
1190 .llong .ppc32_lookup_dcookie /* 235 */
1191 .llong .sys_epoll_create
1192 .llong .sys_epoll_ctl
1193 .llong .sys_epoll_wait
1194 .llong .sys_remap_file_pages
1195 .llong .ppc32_timer_create /* 240 */
1196 .llong .compat_sys_timer_settime
1197 .llong .compat_sys_timer_gettime
1198 .llong .sys_timer_getoverrun
1199 .llong .sys_timer_delete
1200 .llong .compat_sys_clock_settime /* 245 */
1201 .llong .compat_sys_clock_gettime
1202 .llong .compat_sys_clock_getres
1203 .llong .compat_sys_clock_nanosleep
1204 .llong .ppc32_swapcontext
1205 .llong .sys32_tgkill /* 250 */
1206 .llong .sys32_utimes
1207 .llong .compat_sys_statfs64
1208 .llong .compat_sys_fstatfs64
1209 .llong .ppc32_fadvise64_64 /* 32bit only fadvise64_64 */
1210 .llong .ppc_rtas /* 255 */
1211 .llong .sys_ni_syscall /* 256 reserved for sys_debug_setcontext */
1212 .llong .sys_ni_syscall /* 257 reserved for vserver */
1213 .llong .sys_ni_syscall /* 258 reserved for new sys_remap_file_pages */
1214 .llong .compat_sys_mbind
1215 .llong .compat_sys_get_mempolicy /* 260 */
1216 .llong .compat_sys_set_mempolicy
1217 .llong .compat_sys_mq_open
1218 .llong .sys_mq_unlink
1219 .llong .compat_sys_mq_timedsend
1220 .llong .compat_sys_mq_timedreceive /* 265 */
1221 .llong .compat_sys_mq_notify
1222 .llong .compat_sys_mq_getsetattr
1223 .llong .compat_sys_kexec_load
1224 .llong .sys32_add_key
1225 .llong .sys32_request_key /* 270 */
1226 .llong .compat_sys_keyctl
1227 .llong .compat_sys_waitid
1228 .llong .sys32_ioprio_set
1229 .llong .sys32_ioprio_get
1230 .llong .sys_inotify_init /* 275 */
1231 .llong .sys_inotify_add_watch
1232 .llong .sys_inotify_rm_watch
1233
1234 .balign 8
1235_GLOBAL(sys_call_table)
1236 .llong .sys_restart_syscall /* 0 */
1237 .llong .sys_exit
1238 .llong .ppc_fork
1239 .llong .sys_read
1240 .llong .sys_write
1241 .llong .sys_open /* 5 */
1242 .llong .sys_close
1243 .llong .sys_waitpid
1244 .llong .sys_creat
1245 .llong .sys_link
1246 .llong .sys_unlink /* 10 */
1247 .llong .sys_execve
1248 .llong .sys_chdir
1249 .llong .sys64_time
1250 .llong .sys_mknod
1251 .llong .sys_chmod /* 15 */
1252 .llong .sys_lchown
1253 .llong .sys_ni_syscall /* old break syscall */
1254 .llong .sys_ni_syscall /* old stat syscall */
1255 .llong .sys_lseek
1256 .llong .sys_getpid /* 20 */
1257 .llong .sys_mount
1258 .llong .sys_ni_syscall /* old umount syscall */
1259 .llong .sys_setuid
1260 .llong .sys_getuid
1261 .llong .sys_stime /* 25 */
1262 .llong .sys_ptrace
1263 .llong .sys_alarm
1264 .llong .sys_ni_syscall /* old fstat syscall */
1265 .llong .sys_pause
1266 .llong .sys_utime /* 30 */
1267 .llong .sys_ni_syscall /* old stty syscall */
1268 .llong .sys_ni_syscall /* old gtty syscall */
1269 .llong .sys_access
1270 .llong .sys_nice
1271 .llong .sys_ni_syscall /* 35 - old ftime syscall */
1272 .llong .sys_sync
1273 .llong .sys_kill
1274 .llong .sys_rename
1275 .llong .sys_mkdir
1276 .llong .sys_rmdir /* 40 */
1277 .llong .sys_dup
1278 .llong .sys_pipe
1279 .llong .sys_times
1280 .llong .sys_ni_syscall /* old prof syscall */
1281 .llong .sys_brk /* 45 */
1282 .llong .sys_setgid
1283 .llong .sys_getgid
1284 .llong .sys_signal
1285 .llong .sys_geteuid
1286 .llong .sys_getegid /* 50 */
1287 .llong .sys_acct
1288 .llong .sys_umount
1289 .llong .sys_ni_syscall /* old lock syscall */
1290 .llong .sys_ioctl
1291 .llong .sys_fcntl /* 55 */
1292 .llong .sys_ni_syscall /* old mpx syscall */
1293 .llong .sys_setpgid
1294 .llong .sys_ni_syscall /* old ulimit syscall */
1295 .llong .sys_ni_syscall /* old uname syscall */
1296 .llong .sys_umask /* 60 */
1297 .llong .sys_chroot
1298 .llong .sys_ustat
1299 .llong .sys_dup2
1300 .llong .sys_getppid
1301 .llong .sys_getpgrp /* 65 */
1302 .llong .sys_setsid
1303 .llong .sys_ni_syscall
1304 .llong .sys_sgetmask
1305 .llong .sys_ssetmask
1306 .llong .sys_setreuid /* 70 */
1307 .llong .sys_setregid
1308 .llong .sys_ni_syscall
1309 .llong .sys_ni_syscall
1310 .llong .sys_sethostname
1311 .llong .sys_setrlimit /* 75 */
1312 .llong .sys_ni_syscall /* old getrlimit syscall */
1313 .llong .sys_getrusage
1314 .llong .sys_gettimeofday
1315 .llong .sys_settimeofday
1316 .llong .sys_getgroups /* 80 */
1317 .llong .sys_setgroups
1318 .llong .sys_ni_syscall /* old select syscall */
1319 .llong .sys_symlink
1320 .llong .sys_ni_syscall /* old lstat syscall */
1321 .llong .sys_readlink /* 85 */
1322 .llong .sys_uselib
1323 .llong .sys_swapon
1324 .llong .sys_reboot
1325 .llong .sys_ni_syscall /* old readdir syscall */
1326 .llong .sys_mmap /* 90 */
1327 .llong .sys_munmap
1328 .llong .sys_truncate
1329 .llong .sys_ftruncate
1330 .llong .sys_fchmod
1331 .llong .sys_fchown /* 95 */
1332 .llong .sys_getpriority
1333 .llong .sys_setpriority
1334 .llong .sys_ni_syscall /* old profil syscall holder */
1335 .llong .sys_statfs
1336 .llong .sys_fstatfs /* 100 */
1337 .llong .sys_ni_syscall /* old ioperm syscall */
1338 .llong .sys_socketcall
1339 .llong .sys_syslog
1340 .llong .sys_setitimer
1341 .llong .sys_getitimer /* 105 */
1342 .llong .sys_newstat
1343 .llong .sys_newlstat
1344 .llong .sys_newfstat
1345 .llong .sys_ni_syscall /* old uname syscall */
1346 .llong .sys_ni_syscall /* 110 old iopl syscall */
1347 .llong .sys_vhangup
1348 .llong .sys_ni_syscall /* old idle syscall */
1349 .llong .sys_ni_syscall /* old vm86 syscall */
1350 .llong .sys_wait4
1351 .llong .sys_swapoff /* 115 */
1352 .llong .sys_sysinfo
1353 .llong .sys_ipc
1354 .llong .sys_fsync
1355 .llong .sys_ni_syscall
1356 .llong .ppc_clone /* 120 */
1357 .llong .sys_setdomainname
1358 .llong .ppc64_newuname
1359 .llong .sys_ni_syscall /* old modify_ldt syscall */
1360 .llong .sys_adjtimex
1361 .llong .sys_mprotect /* 125 */
1362 .llong .sys_ni_syscall
1363 .llong .sys_ni_syscall /* old create_module syscall */
1364 .llong .sys_init_module
1365 .llong .sys_delete_module
1366 .llong .sys_ni_syscall /* 130 old get_kernel_syms syscall */
1367 .llong .sys_quotactl
1368 .llong .sys_getpgid
1369 .llong .sys_fchdir
1370 .llong .sys_bdflush
1371 .llong .sys_sysfs /* 135 */
1372 .llong .ppc64_personality
1373 .llong .sys_ni_syscall /* for afs_syscall */
1374 .llong .sys_setfsuid
1375 .llong .sys_setfsgid
1376 .llong .sys_llseek /* 140 */
1377 .llong .sys_getdents
1378 .llong .sys_select
1379 .llong .sys_flock
1380 .llong .sys_msync
1381 .llong .sys_readv /* 145 */
1382 .llong .sys_writev
1383 .llong .sys_getsid
1384 .llong .sys_fdatasync
1385 .llong .sys_sysctl
1386 .llong .sys_mlock /* 150 */
1387 .llong .sys_munlock
1388 .llong .sys_mlockall
1389 .llong .sys_munlockall
1390 .llong .sys_sched_setparam
1391 .llong .sys_sched_getparam /* 155 */
1392 .llong .sys_sched_setscheduler
1393 .llong .sys_sched_getscheduler
1394 .llong .sys_sched_yield
1395 .llong .sys_sched_get_priority_max
1396 .llong .sys_sched_get_priority_min /* 160 */
1397 .llong .sys_sched_rr_get_interval
1398 .llong .sys_nanosleep
1399 .llong .sys_mremap
1400 .llong .sys_setresuid
1401 .llong .sys_getresuid /* 165 */
1402 .llong .sys_ni_syscall /* old query_module syscall */
1403 .llong .sys_poll
1404 .llong .sys_nfsservctl
1405 .llong .sys_setresgid
1406 .llong .sys_getresgid /* 170 */
1407 .llong .sys_prctl
1408 .llong .ppc64_rt_sigreturn
1409 .llong .sys_rt_sigaction
1410 .llong .sys_rt_sigprocmask
1411 .llong .sys_rt_sigpending /* 175 */
1412 .llong .sys_rt_sigtimedwait
1413 .llong .sys_rt_sigqueueinfo
1414 .llong .ppc64_rt_sigsuspend
1415 .llong .sys_pread64
1416 .llong .sys_pwrite64 /* 180 */
1417 .llong .sys_chown
1418 .llong .sys_getcwd
1419 .llong .sys_capget
1420 .llong .sys_capset
1421 .llong .sys_sigaltstack /* 185 */
1422 .llong .sys_sendfile64
1423 .llong .sys_ni_syscall /* reserved for streams1 */
1424 .llong .sys_ni_syscall /* reserved for streams2 */
1425 .llong .ppc_vfork
1426 .llong .sys_getrlimit /* 190 */
1427 .llong .sys_readahead
1428 .llong .sys_ni_syscall /* 32bit only mmap2 */
1429 .llong .sys_ni_syscall /* 32bit only truncate64 */
1430 .llong .sys_ni_syscall /* 32bit only ftruncate64 */
1431 .llong .sys_ni_syscall /* 195 - 32bit only stat64 */
1432 .llong .sys_ni_syscall /* 32bit only lstat64 */
1433 .llong .sys_ni_syscall /* 32bit only fstat64 */
1434 .llong .sys_pciconfig_read
1435 .llong .sys_pciconfig_write
1436 .llong .sys_pciconfig_iobase /* 200 - pciconfig_iobase */
1437 .llong .sys_ni_syscall /* reserved for MacOnLinux */
1438 .llong .sys_getdents64
1439 .llong .sys_pivot_root
1440 .llong .sys_ni_syscall /* 32bit only fcntl64 */
1441 .llong .sys_madvise /* 205 */
1442 .llong .sys_mincore
1443 .llong .sys_gettid
1444 .llong .sys_tkill
1445 .llong .sys_setxattr
1446 .llong .sys_lsetxattr /* 210 */
1447 .llong .sys_fsetxattr
1448 .llong .sys_getxattr
1449 .llong .sys_lgetxattr
1450 .llong .sys_fgetxattr
1451 .llong .sys_listxattr /* 215 */
1452 .llong .sys_llistxattr
1453 .llong .sys_flistxattr
1454 .llong .sys_removexattr
1455 .llong .sys_lremovexattr
1456 .llong .sys_fremovexattr /* 220 */
1457 .llong .sys_futex
1458 .llong .sys_sched_setaffinity
1459 .llong .sys_sched_getaffinity
1460 .llong .sys_ni_syscall
1461 .llong .sys_ni_syscall /* 225 - reserved for tux */
1462 .llong .sys_ni_syscall /* 32bit only sendfile64 */
1463 .llong .sys_io_setup
1464 .llong .sys_io_destroy
1465 .llong .sys_io_getevents
1466 .llong .sys_io_submit /* 230 */
1467 .llong .sys_io_cancel
1468 .llong .sys_set_tid_address
1469 .llong .sys_fadvise64
1470 .llong .sys_exit_group
1471 .llong .sys_lookup_dcookie /* 235 */
1472 .llong .sys_epoll_create
1473 .llong .sys_epoll_ctl
1474 .llong .sys_epoll_wait
1475 .llong .sys_remap_file_pages
1476 .llong .sys_timer_create /* 240 */
1477 .llong .sys_timer_settime
1478 .llong .sys_timer_gettime
1479 .llong .sys_timer_getoverrun
1480 .llong .sys_timer_delete
1481 .llong .sys_clock_settime /* 245 */
1482 .llong .sys_clock_gettime
1483 .llong .sys_clock_getres
1484 .llong .sys_clock_nanosleep
1485 .llong .ppc64_swapcontext
1486 .llong .sys_tgkill /* 250 */
1487 .llong .sys_utimes
1488 .llong .sys_statfs64
1489 .llong .sys_fstatfs64
1490 .llong .sys_ni_syscall /* 32bit only fadvise64_64 */
1491 .llong .ppc_rtas /* 255 */
1492 .llong .sys_ni_syscall /* 256 reserved for sys_debug_setcontext */
1493 .llong .sys_ni_syscall /* 257 reserved for vserver */
1494 .llong .sys_ni_syscall /* 258 reserved for new sys_remap_file_pages */
1495 .llong .sys_mbind
1496 .llong .sys_get_mempolicy /* 260 */
1497 .llong .sys_set_mempolicy
1498 .llong .sys_mq_open
1499 .llong .sys_mq_unlink
1500 .llong .sys_mq_timedsend
1501 .llong .sys_mq_timedreceive /* 265 */
1502 .llong .sys_mq_notify
1503 .llong .sys_mq_getsetattr
1504 .llong .sys_kexec_load
1505 .llong .sys_add_key
1506 .llong .sys_request_key /* 270 */
1507 .llong .sys_keyctl
1508 .llong .sys_waitid
1509 .llong .sys_ioprio_set
1510 .llong .sys_ioprio_get
1511 .llong .sys_inotify_init /* 275 */
1512 .llong .sys_inotify_add_watch
1513 .llong .sys_inotify_rm_watch
diff --git a/arch/ppc64/kernel/mpic.h b/arch/ppc64/kernel/mpic.h
deleted file mode 100644
index ca78a7f10528..000000000000
--- a/arch/ppc64/kernel/mpic.h
+++ /dev/null
@@ -1,273 +0,0 @@
1#include <linux/irq.h>
2
3/*
4 * Global registers
5 */
6
7#define MPIC_GREG_BASE 0x01000
8
9#define MPIC_GREG_FEATURE_0 0x00000
10#define MPIC_GREG_FEATURE_LAST_SRC_MASK 0x07ff0000
11#define MPIC_GREG_FEATURE_LAST_SRC_SHIFT 16
12#define MPIC_GREG_FEATURE_LAST_CPU_MASK 0x00001f00
13#define MPIC_GREG_FEATURE_LAST_CPU_SHIFT 8
14#define MPIC_GREG_FEATURE_VERSION_MASK 0xff
15#define MPIC_GREG_FEATURE_1 0x00010
16#define MPIC_GREG_GLOBAL_CONF_0 0x00020
17#define MPIC_GREG_GCONF_RESET 0x80000000
18#define MPIC_GREG_GCONF_8259_PTHROU_DIS 0x20000000
19#define MPIC_GREG_GCONF_BASE_MASK 0x000fffff
20#define MPIC_GREG_GLOBAL_CONF_1 0x00030
21#define MPIC_GREG_VENDOR_0 0x00040
22#define MPIC_GREG_VENDOR_1 0x00050
23#define MPIC_GREG_VENDOR_2 0x00060
24#define MPIC_GREG_VENDOR_3 0x00070
25#define MPIC_GREG_VENDOR_ID 0x00080
26#define MPIC_GREG_VENDOR_ID_STEPPING_MASK 0x00ff0000
27#define MPIC_GREG_VENDOR_ID_STEPPING_SHIFT 16
28#define MPIC_GREG_VENDOR_ID_DEVICE_ID_MASK 0x0000ff00
29#define MPIC_GREG_VENDOR_ID_DEVICE_ID_SHIFT 8
30#define MPIC_GREG_VENDOR_ID_VENDOR_ID_MASK 0x000000ff
31#define MPIC_GREG_PROCESSOR_INIT 0x00090
32#define MPIC_GREG_IPI_VECTOR_PRI_0 0x000a0
33#define MPIC_GREG_IPI_VECTOR_PRI_1 0x000b0
34#define MPIC_GREG_IPI_VECTOR_PRI_2 0x000c0
35#define MPIC_GREG_IPI_VECTOR_PRI_3 0x000d0
36#define MPIC_GREG_SPURIOUS 0x000e0
37#define MPIC_GREG_TIMER_FREQ 0x000f0
38
39/*
40 *
41 * Timer registers
42 */
43#define MPIC_TIMER_BASE 0x01100
44#define MPIC_TIMER_STRIDE 0x40
45
46#define MPIC_TIMER_CURRENT_CNT 0x00000
47#define MPIC_TIMER_BASE_CNT 0x00010
48#define MPIC_TIMER_VECTOR_PRI 0x00020
49#define MPIC_TIMER_DESTINATION 0x00030
50
51/*
52 * Per-Processor registers
53 */
54
55#define MPIC_CPU_THISBASE 0x00000
56#define MPIC_CPU_BASE 0x20000
57#define MPIC_CPU_STRIDE 0x01000
58
59#define MPIC_CPU_IPI_DISPATCH_0 0x00040
60#define MPIC_CPU_IPI_DISPATCH_1 0x00050
61#define MPIC_CPU_IPI_DISPATCH_2 0x00060
62#define MPIC_CPU_IPI_DISPATCH_3 0x00070
63#define MPIC_CPU_CURRENT_TASK_PRI 0x00080
64#define MPIC_CPU_TASKPRI_MASK 0x0000000f
65#define MPIC_CPU_WHOAMI 0x00090
66#define MPIC_CPU_WHOAMI_MASK 0x0000001f
67#define MPIC_CPU_INTACK 0x000a0
68#define MPIC_CPU_EOI 0x000b0
69
70/*
71 * Per-source registers
72 */
73
74#define MPIC_IRQ_BASE 0x10000
75#define MPIC_IRQ_STRIDE 0x00020
76#define MPIC_IRQ_VECTOR_PRI 0x00000
77#define MPIC_VECPRI_MASK 0x80000000
78#define MPIC_VECPRI_ACTIVITY 0x40000000 /* Read Only */
79#define MPIC_VECPRI_PRIORITY_MASK 0x000f0000
80#define MPIC_VECPRI_PRIORITY_SHIFT 16
81#define MPIC_VECPRI_VECTOR_MASK 0x000007ff
82#define MPIC_VECPRI_POLARITY_POSITIVE 0x00800000
83#define MPIC_VECPRI_POLARITY_NEGATIVE 0x00000000
84#define MPIC_VECPRI_POLARITY_MASK 0x00800000
85#define MPIC_VECPRI_SENSE_LEVEL 0x00400000
86#define MPIC_VECPRI_SENSE_EDGE 0x00000000
87#define MPIC_VECPRI_SENSE_MASK 0x00400000
88#define MPIC_IRQ_DESTINATION 0x00010
89
90#define MPIC_MAX_IRQ_SOURCES 2048
91#define MPIC_MAX_CPUS 32
92#define MPIC_MAX_ISU 32
93
94/*
95 * Special vector numbers (internal use only)
96 */
97#define MPIC_VEC_SPURRIOUS 255
98#define MPIC_VEC_IPI_3 254
99#define MPIC_VEC_IPI_2 253
100#define MPIC_VEC_IPI_1 252
101#define MPIC_VEC_IPI_0 251
102
103/* unused */
104#define MPIC_VEC_TIMER_3 250
105#define MPIC_VEC_TIMER_2 249
106#define MPIC_VEC_TIMER_1 248
107#define MPIC_VEC_TIMER_0 247
108
109/* Type definition of the cascade handler */
110typedef int (*mpic_cascade_t)(struct pt_regs *regs, void *data);
111
112#ifdef CONFIG_MPIC_BROKEN_U3
113/* Fixup table entry */
114struct mpic_irq_fixup
115{
116 u8 __iomem *base;
117 unsigned int irq;
118};
119#endif /* CONFIG_MPIC_BROKEN_U3 */
120
121
122/* The instance data of a given MPIC */
123struct mpic
124{
125 /* The "linux" controller struct */
126 hw_irq_controller hc_irq;
127#ifdef CONFIG_SMP
128 hw_irq_controller hc_ipi;
129#endif
130 const char *name;
131 /* Flags */
132 unsigned int flags;
133 /* How many irq sources in a given ISU */
134 unsigned int isu_size;
135 unsigned int isu_shift;
136 unsigned int isu_mask;
137 /* Offset of irq vector numbers */
138 unsigned int irq_offset;
139 unsigned int irq_count;
140 /* Offset of ipi vector numbers */
141 unsigned int ipi_offset;
142 /* Number of sources */
143 unsigned int num_sources;
144 /* Number of CPUs */
145 unsigned int num_cpus;
146 /* cascade handler */
147 mpic_cascade_t cascade;
148 void *cascade_data;
149 unsigned int cascade_vec;
150 /* senses array */
151 unsigned char *senses;
152 unsigned int senses_count;
153
154#ifdef CONFIG_MPIC_BROKEN_U3
155 /* The fixup table */
156 struct mpic_irq_fixup *fixups;
157 spinlock_t fixup_lock;
158#endif
159
160 /* The various ioremap'ed bases */
161 volatile u32 __iomem *gregs;
162 volatile u32 __iomem *tmregs;
163 volatile u32 __iomem *cpuregs[MPIC_MAX_CPUS];
164 volatile u32 __iomem *isus[MPIC_MAX_ISU];
165
166 /* link */
167 struct mpic *next;
168};
169
170/* This is the primary controller, only that one has IPIs and
171 * has afinity control. A non-primary MPIC always uses CPU0
172 * registers only
173 */
174#define MPIC_PRIMARY 0x00000001
175/* Set this for a big-endian MPIC */
176#define MPIC_BIG_ENDIAN 0x00000002
177/* Broken U3 MPIC */
178#define MPIC_BROKEN_U3 0x00000004
179/* Broken IPI registers (autodetected) */
180#define MPIC_BROKEN_IPI 0x00000008
181/* MPIC wants a reset */
182#define MPIC_WANTS_RESET 0x00000010
183
184/* Allocate the controller structure and setup the linux irq descs
185 * for the range if interrupts passed in. No HW initialization is
186 * actually performed.
187 *
188 * @phys_addr: physial base address of the MPIC
189 * @flags: flags, see constants above
190 * @isu_size: number of interrupts in an ISU. Use 0 to use a
191 * standard ISU-less setup (aka powermac)
192 * @irq_offset: first irq number to assign to this mpic
193 * @irq_count: number of irqs to use with this mpic IRQ sources. Pass 0
194 * to match the number of sources
195 * @ipi_offset: first irq number to assign to this mpic IPI sources,
196 * used only on primary mpic
197 * @senses: array of sense values
198 * @senses_num: number of entries in the array
199 *
200 * Note about the sense array. If none is passed, all interrupts are
201 * setup to be level negative unless MPIC_BROKEN_U3 is set in which
202 * case they are edge positive (and the array is ignored anyway).
203 * The values in the array start at the first source of the MPIC,
204 * that is senses[0] correspond to linux irq "irq_offset".
205 */
206extern struct mpic *mpic_alloc(unsigned long phys_addr,
207 unsigned int flags,
208 unsigned int isu_size,
209 unsigned int irq_offset,
210 unsigned int irq_count,
211 unsigned int ipi_offset,
212 unsigned char *senses,
213 unsigned int senses_num,
214 const char *name);
215
216/* Assign ISUs, to call before mpic_init()
217 *
218 * @mpic: controller structure as returned by mpic_alloc()
219 * @isu_num: ISU number
220 * @phys_addr: physical address of the ISU
221 */
222extern void mpic_assign_isu(struct mpic *mpic, unsigned int isu_num,
223 unsigned long phys_addr);
224
225/* Initialize the controller. After this has been called, none of the above
226 * should be called again for this mpic
227 */
228extern void mpic_init(struct mpic *mpic);
229
230/* Setup a cascade. Currently, only one cascade is supported this
231 * way, though you can always do a normal request_irq() and add
232 * other cascades this way. You should call this _after_ having
233 * added all the ISUs
234 *
235 * @irq_no: "linux" irq number of the cascade (that is offset'ed vector)
236 * @handler: cascade handler function
237 */
238extern void mpic_setup_cascade(unsigned int irq_no, mpic_cascade_t hanlder,
239 void *data);
240
241/*
242 * All of the following functions must only be used after the
243 * ISUs have been assigned and the controller fully initialized
244 * with mpic_init()
245 */
246
247
248/* Change/Read the priority of an interrupt. Default is 8 for irqs and
249 * 10 for IPIs. You can call this on both IPIs and IRQ numbers, but the
250 * IPI number is then the offset'ed (linux irq number mapped to the IPI)
251 */
252extern void mpic_irq_set_priority(unsigned int irq, unsigned int pri);
253extern unsigned int mpic_irq_get_priority(unsigned int irq);
254
255/* Setup a non-boot CPU */
256extern void mpic_setup_this_cpu(void);
257
258/* Clean up for kexec (or cpu offline or ...) */
259extern void mpic_teardown_this_cpu(int secondary);
260
261/* Request IPIs on primary mpic */
262extern void mpic_request_ipis(void);
263
264/* Send an IPI (non offseted number 0..3) */
265extern void mpic_send_ipi(unsigned int ipi_no, unsigned int cpu_mask);
266
267/* Fetch interrupt from a given mpic */
268extern int mpic_get_one_irq(struct mpic *mpic, struct pt_regs *regs);
269/* This one gets to the primary mpic */
270extern int mpic_get_irq(struct pt_regs *regs);
271
272/* global mpic for pSeries */
273extern struct mpic *pSeries_mpic;
diff --git a/arch/ppc64/kernel/pci.c b/arch/ppc64/kernel/pci.c
index ff4be1da69d5..3d2106b022a1 100644
--- a/arch/ppc64/kernel/pci.c
+++ b/arch/ppc64/kernel/pci.c
@@ -31,8 +31,7 @@
31#include <asm/irq.h> 31#include <asm/irq.h>
32#include <asm/machdep.h> 32#include <asm/machdep.h>
33#include <asm/udbg.h> 33#include <asm/udbg.h>
34 34#include <asm/ppc-pci.h>
35#include "pci.h"
36 35
37#ifdef DEBUG 36#ifdef DEBUG
38#define DBG(fmt...) udbg_printf(fmt) 37#define DBG(fmt...) udbg_printf(fmt)
@@ -727,16 +726,17 @@ static pgprot_t __pci_mmap_set_pgprot(struct pci_dev *dev, struct resource *rp,
727 * above routine 726 * above routine
728 */ 727 */
729pgprot_t pci_phys_mem_access_prot(struct file *file, 728pgprot_t pci_phys_mem_access_prot(struct file *file,
730 unsigned long offset, 729 unsigned long pfn,
731 unsigned long size, 730 unsigned long size,
732 pgprot_t protection) 731 pgprot_t protection)
733{ 732{
734 struct pci_dev *pdev = NULL; 733 struct pci_dev *pdev = NULL;
735 struct resource *found = NULL; 734 struct resource *found = NULL;
736 unsigned long prot = pgprot_val(protection); 735 unsigned long prot = pgprot_val(protection);
736 unsigned long offset = pfn << PAGE_SHIFT;
737 int i; 737 int i;
738 738
739 if (page_is_ram(offset >> PAGE_SHIFT)) 739 if (page_is_ram(pfn))
740 return __pgprot(prot); 740 return __pgprot(prot);
741 741
742 prot |= _PAGE_NO_CACHE | _PAGE_GUARDED; 742 prot |= _PAGE_NO_CACHE | _PAGE_GUARDED;
@@ -881,9 +881,9 @@ static void __devinit pci_process_ISA_OF_ranges(struct device_node *isa_node,
881} 881}
882 882
883void __devinit pci_process_bridge_OF_ranges(struct pci_controller *hose, 883void __devinit pci_process_bridge_OF_ranges(struct pci_controller *hose,
884 struct device_node *dev) 884 struct device_node *dev, int prim)
885{ 885{
886 unsigned int *ranges; 886 unsigned int *ranges, pci_space;
887 unsigned long size; 887 unsigned long size;
888 int rlen = 0; 888 int rlen = 0;
889 int memno = 0; 889 int memno = 0;
@@ -906,16 +906,39 @@ void __devinit pci_process_bridge_OF_ranges(struct pci_controller *hose,
906 ranges = (unsigned int *) get_property(dev, "ranges", &rlen); 906 ranges = (unsigned int *) get_property(dev, "ranges", &rlen);
907 while ((rlen -= np * sizeof(unsigned int)) >= 0) { 907 while ((rlen -= np * sizeof(unsigned int)) >= 0) {
908 res = NULL; 908 res = NULL;
909 pci_addr = (unsigned long)ranges[1] << 32 | ranges[2]; 909 pci_space = ranges[0];
910 pci_addr = ((unsigned long)ranges[1] << 32) | ranges[2];
910 911
911 cpu_phys_addr = ranges[3]; 912 cpu_phys_addr = ranges[3];
912 if (na == 2) 913 if (na >= 2)
913 cpu_phys_addr = cpu_phys_addr << 32 | ranges[4]; 914 cpu_phys_addr = (cpu_phys_addr << 32) | ranges[4];
914 915
915 size = (unsigned long)ranges[na+3] << 32 | ranges[na+4]; 916 size = ((unsigned long)ranges[na+3] << 32) | ranges[na+4];
917 ranges += np;
916 if (size == 0) 918 if (size == 0)
917 continue; 919 continue;
918 switch ((ranges[0] >> 24) & 0x3) { 920
921 /* Now consume following elements while they are contiguous */
922 while (rlen >= np * sizeof(unsigned int)) {
923 unsigned long addr, phys;
924
925 if (ranges[0] != pci_space)
926 break;
927 addr = ((unsigned long)ranges[1] << 32) | ranges[2];
928 phys = ranges[3];
929 if (na >= 2)
930 phys = (phys << 32) | ranges[4];
931 if (addr != pci_addr + size ||
932 phys != cpu_phys_addr + size)
933 break;
934
935 size += ((unsigned long)ranges[na+3] << 32)
936 | ranges[na+4];
937 ranges += np;
938 rlen -= np * sizeof(unsigned int);
939 }
940
941 switch ((pci_space >> 24) & 0x3) {
919 case 1: /* I/O space */ 942 case 1: /* I/O space */
920 hose->io_base_phys = cpu_phys_addr; 943 hose->io_base_phys = cpu_phys_addr;
921 hose->pci_io_size = size; 944 hose->pci_io_size = size;
@@ -949,7 +972,6 @@ void __devinit pci_process_bridge_OF_ranges(struct pci_controller *hose,
949 res->sibling = NULL; 972 res->sibling = NULL;
950 res->child = NULL; 973 res->child = NULL;
951 } 974 }
952 ranges += np;
953 } 975 }
954} 976}
955 977
diff --git a/arch/ppc64/kernel/pci.h b/arch/ppc64/kernel/pci.h
deleted file mode 100644
index 5eb2cc320566..000000000000
--- a/arch/ppc64/kernel/pci.h
+++ /dev/null
@@ -1,54 +0,0 @@
1/*
2 * c 2001 PPC 64 Team, IBM Corp
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 */
9#ifndef __PPC_KERNEL_PCI_H__
10#define __PPC_KERNEL_PCI_H__
11
12#include <linux/pci.h>
13#include <asm/pci-bridge.h>
14
15extern unsigned long isa_io_base;
16
17extern void pci_setup_pci_controller(struct pci_controller *hose);
18extern void pci_setup_phb_io(struct pci_controller *hose, int primary);
19extern void pci_setup_phb_io_dynamic(struct pci_controller *hose, int primary);
20
21
22extern struct list_head hose_list;
23extern int global_phb_number;
24
25extern unsigned long find_and_init_phbs(void);
26
27extern struct pci_dev *ppc64_isabridge_dev; /* may be NULL if no ISA bus */
28
29/* PCI device_node operations */
30struct device_node;
31typedef void *(*traverse_func)(struct device_node *me, void *data);
32void *traverse_pci_devices(struct device_node *start, traverse_func pre,
33 void *data);
34
35void pci_devs_phb_init(void);
36void pci_devs_phb_init_dynamic(struct pci_controller *phb);
37
38/* PCI address cache management routines */
39void pci_addr_cache_insert_device(struct pci_dev *dev);
40void pci_addr_cache_remove_device(struct pci_dev *dev);
41
42/* From rtas_pci.h */
43void init_pci_config_tokens (void);
44unsigned long get_phb_buid (struct device_node *);
45
46/* From pSeries_pci.h */
47extern void pSeries_final_fixup(void);
48extern void pSeries_irq_bus_setup(struct pci_bus *bus);
49
50extern unsigned long pci_probe_only;
51extern unsigned long pci_assign_all_buses;
52extern int pci_read_irq_line(struct pci_dev *pci_dev);
53
54#endif /* __PPC_KERNEL_PCI_H__ */
diff --git a/arch/ppc64/kernel/pci_direct_iommu.c b/arch/ppc64/kernel/pci_direct_iommu.c
index 54055c81017a..e1a32f802c0b 100644
--- a/arch/ppc64/kernel/pci_direct_iommu.c
+++ b/arch/ppc64/kernel/pci_direct_iommu.c
@@ -27,8 +27,7 @@
27#include <asm/machdep.h> 27#include <asm/machdep.h>
28#include <asm/pmac_feature.h> 28#include <asm/pmac_feature.h>
29#include <asm/abs_addr.h> 29#include <asm/abs_addr.h>
30 30#include <asm/ppc-pci.h>
31#include "pci.h"
32 31
33static void *pci_direct_alloc_coherent(struct device *hwdev, size_t size, 32static void *pci_direct_alloc_coherent(struct device *hwdev, size_t size,
34 dma_addr_t *dma_handle, gfp_t flag) 33 dma_addr_t *dma_handle, gfp_t flag)
diff --git a/arch/ppc64/kernel/pci_dn.c b/arch/ppc64/kernel/pci_dn.c
index a86389d07d57..493bbe43f5b4 100644
--- a/arch/ppc64/kernel/pci_dn.c
+++ b/arch/ppc64/kernel/pci_dn.c
@@ -30,8 +30,7 @@
30#include <asm/prom.h> 30#include <asm/prom.h>
31#include <asm/pci-bridge.h> 31#include <asm/pci-bridge.h>
32#include <asm/pSeries_reconfig.h> 32#include <asm/pSeries_reconfig.h>
33 33#include <asm/ppc-pci.h>
34#include "pci.h"
35 34
36/* 35/*
37 * Traverse_func that inits the PCI fields of the device node. 36 * Traverse_func that inits the PCI fields of the device node.
diff --git a/arch/ppc64/kernel/pci_iommu.c b/arch/ppc64/kernel/pci_iommu.c
index d9e33b7d4203..bdf15dbbf4f0 100644
--- a/arch/ppc64/kernel/pci_iommu.c
+++ b/arch/ppc64/kernel/pci_iommu.c
@@ -1,8 +1,8 @@
1/* 1/*
2 * arch/ppc64/kernel/pci_iommu.c 2 * arch/ppc64/kernel/pci_iommu.c
3 * Copyright (C) 2001 Mike Corrigan & Dave Engebretsen, IBM Corporation 3 * Copyright (C) 2001 Mike Corrigan & Dave Engebretsen, IBM Corporation
4 * 4 *
5 * Rewrite, cleanup, new allocation schemes: 5 * Rewrite, cleanup, new allocation schemes:
6 * Copyright (C) 2004 Olof Johansson, IBM Corporation 6 * Copyright (C) 2004 Olof Johansson, IBM Corporation
7 * 7 *
8 * Dynamic DMA mapping support, platform-independent parts. 8 * Dynamic DMA mapping support, platform-independent parts.
@@ -11,19 +11,18 @@
11 * it under the terms of the GNU General Public License as published by 11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or 12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version. 13 * (at your option) any later version.
14 * 14 *
15 * This program is distributed in the hope that it will be useful, 15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of 16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details. 18 * GNU General Public License for more details.
19 * 19 *
20 * You should have received a copy of the GNU General Public License 20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software 21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 */ 23 */
24 24
25 25
26#include <linux/config.h>
27#include <linux/init.h> 26#include <linux/init.h>
28#include <linux/types.h> 27#include <linux/types.h>
29#include <linux/slab.h> 28#include <linux/slab.h>
@@ -37,11 +36,7 @@
37#include <asm/iommu.h> 36#include <asm/iommu.h>
38#include <asm/pci-bridge.h> 37#include <asm/pci-bridge.h>
39#include <asm/machdep.h> 38#include <asm/machdep.h>
40#include "pci.h" 39#include <asm/ppc-pci.h>
41
42#ifdef CONFIG_PPC_ISERIES
43#include <asm/iSeries/iSeries_pci.h>
44#endif /* CONFIG_PPC_ISERIES */
45 40
46/* 41/*
47 * We can use ->sysdata directly and avoid the extra work in 42 * We can use ->sysdata directly and avoid the extra work in
@@ -61,13 +56,7 @@ static inline struct iommu_table *devnode_table(struct device *dev)
61 } else 56 } else
62 pdev = to_pci_dev(dev); 57 pdev = to_pci_dev(dev);
63 58
64#ifdef CONFIG_PPC_ISERIES
65 return ISERIES_DEVNODE(pdev)->iommu_table;
66#endif /* CONFIG_PPC_ISERIES */
67
68#ifdef CONFIG_PPC_MULTIPLATFORM
69 return PCI_DN(PCI_GET_DN(pdev))->iommu_table; 59 return PCI_DN(PCI_GET_DN(pdev))->iommu_table;
70#endif /* CONFIG_PPC_MULTIPLATFORM */
71} 60}
72 61
73 62
diff --git a/arch/ppc64/kernel/pmac.h b/arch/ppc64/kernel/pmac.h
deleted file mode 100644
index 40e1c5030f74..000000000000
--- a/arch/ppc64/kernel/pmac.h
+++ /dev/null
@@ -1,31 +0,0 @@
1#ifndef __PMAC_H__
2#define __PMAC_H__
3
4#include <linux/pci.h>
5#include <linux/ide.h>
6
7/*
8 * Declaration for the various functions exported by the
9 * pmac_* files. Mostly for use by pmac_setup
10 */
11
12extern void pmac_get_boot_time(struct rtc_time *tm);
13extern void pmac_get_rtc_time(struct rtc_time *tm);
14extern int pmac_set_rtc_time(struct rtc_time *tm);
15extern void pmac_read_rtc_time(void);
16extern void pmac_calibrate_decr(void);
17
18extern void pmac_pcibios_fixup(void);
19extern void pmac_pci_init(void);
20extern void pmac_setup_pci_dma(void);
21extern void pmac_check_ht_link(void);
22
23extern void pmac_setup_smp(void);
24
25extern unsigned long pmac_ide_get_base(int index);
26extern void pmac_ide_init_hwif_ports(hw_regs_t *hw,
27 unsigned long data_port, unsigned long ctrl_port, int *irq);
28
29extern void pmac_nvram_init(void);
30
31#endif /* __PMAC_H__ */
diff --git a/arch/ppc64/kernel/pmac_feature.c b/arch/ppc64/kernel/pmac_feature.c
deleted file mode 100644
index eb4e6c3f694d..000000000000
--- a/arch/ppc64/kernel/pmac_feature.c
+++ /dev/null
@@ -1,767 +0,0 @@
1/*
2 * arch/ppc/platforms/pmac_feature.c
3 *
4 * Copyright (C) 1996-2001 Paul Mackerras (paulus@cs.anu.edu.au)
5 * Ben. Herrenschmidt (benh@kernel.crashing.org)
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
11 *
12 * TODO:
13 *
14 * - Replace mdelay with some schedule loop if possible
15 * - Shorten some obfuscated delays on some routines (like modem
16 * power)
17 * - Refcount some clocks (see darwin)
18 * - Split split split...
19 *
20 */
21#include <linux/config.h>
22#include <linux/types.h>
23#include <linux/init.h>
24#include <linux/delay.h>
25#include <linux/kernel.h>
26#include <linux/sched.h>
27#include <linux/spinlock.h>
28#include <linux/adb.h>
29#include <linux/pmu.h>
30#include <linux/ioport.h>
31#include <linux/pci.h>
32#include <asm/sections.h>
33#include <asm/errno.h>
34#include <asm/keylargo.h>
35#include <asm/uninorth.h>
36#include <asm/io.h>
37#include <asm/prom.h>
38#include <asm/machdep.h>
39#include <asm/pmac_feature.h>
40#include <asm/dbdma.h>
41#include <asm/pci-bridge.h>
42#include <asm/pmac_low_i2c.h>
43
44#undef DEBUG_FEATURE
45
46#ifdef DEBUG_FEATURE
47#define DBG(fmt...) printk(KERN_DEBUG fmt)
48#else
49#define DBG(fmt...)
50#endif
51
52/*
53 * We use a single global lock to protect accesses. Each driver has
54 * to take care of its own locking
55 */
56static DEFINE_SPINLOCK(feature_lock __pmacdata);
57
58#define LOCK(flags) spin_lock_irqsave(&feature_lock, flags);
59#define UNLOCK(flags) spin_unlock_irqrestore(&feature_lock, flags);
60
61
62/*
63 * Instance of some macio stuffs
64 */
65struct macio_chip macio_chips[MAX_MACIO_CHIPS] __pmacdata;
66
67struct macio_chip* __pmac macio_find(struct device_node* child, int type)
68{
69 while(child) {
70 int i;
71
72 for (i=0; i < MAX_MACIO_CHIPS && macio_chips[i].of_node; i++)
73 if (child == macio_chips[i].of_node &&
74 (!type || macio_chips[i].type == type))
75 return &macio_chips[i];
76 child = child->parent;
77 }
78 return NULL;
79}
80EXPORT_SYMBOL_GPL(macio_find);
81
82static const char* macio_names[] __pmacdata =
83{
84 "Unknown",
85 "Grand Central",
86 "OHare",
87 "OHareII",
88 "Heathrow",
89 "Gatwick",
90 "Paddington",
91 "Keylargo",
92 "Pangea",
93 "Intrepid",
94 "K2"
95};
96
97
98
99/*
100 * Uninorth reg. access. Note that Uni-N regs are big endian
101 */
102
103#define UN_REG(r) (uninorth_base + ((r) >> 2))
104#define UN_IN(r) (in_be32(UN_REG(r)))
105#define UN_OUT(r,v) (out_be32(UN_REG(r), (v)))
106#define UN_BIS(r,v) (UN_OUT((r), UN_IN(r) | (v)))
107#define UN_BIC(r,v) (UN_OUT((r), UN_IN(r) & ~(v)))
108
109static struct device_node* uninorth_node __pmacdata;
110static u32* uninorth_base __pmacdata;
111static u32 uninorth_rev __pmacdata;
112static void *u3_ht;
113
114extern struct device_node *k2_skiplist[2];
115
116/*
117 * For each motherboard family, we have a table of functions pointers
118 * that handle the various features.
119 */
120
121typedef long (*feature_call)(struct device_node* node, long param, long value);
122
123struct feature_table_entry {
124 unsigned int selector;
125 feature_call function;
126};
127
128struct pmac_mb_def
129{
130 const char* model_string;
131 const char* model_name;
132 int model_id;
133 struct feature_table_entry* features;
134 unsigned long board_flags;
135};
136static struct pmac_mb_def pmac_mb __pmacdata;
137
138/*
139 * Here are the chip specific feature functions
140 */
141
142
143static long __pmac g5_read_gpio(struct device_node* node, long param, long value)
144{
145 struct macio_chip* macio = &macio_chips[0];
146
147 return MACIO_IN8(param);
148}
149
150
151static long __pmac g5_write_gpio(struct device_node* node, long param, long value)
152{
153 struct macio_chip* macio = &macio_chips[0];
154
155 MACIO_OUT8(param, (u8)(value & 0xff));
156 return 0;
157}
158
159static long __pmac g5_gmac_enable(struct device_node* node, long param, long value)
160{
161 struct macio_chip* macio = &macio_chips[0];
162 unsigned long flags;
163
164 if (node == NULL)
165 return -ENODEV;
166
167 LOCK(flags);
168 if (value) {
169 MACIO_BIS(KEYLARGO_FCR1, K2_FCR1_GMAC_CLK_ENABLE);
170 mb();
171 k2_skiplist[0] = NULL;
172 } else {
173 k2_skiplist[0] = node;
174 mb();
175 MACIO_BIC(KEYLARGO_FCR1, K2_FCR1_GMAC_CLK_ENABLE);
176 }
177
178 UNLOCK(flags);
179 mdelay(1);
180
181 return 0;
182}
183
184static long __pmac g5_fw_enable(struct device_node* node, long param, long value)
185{
186 struct macio_chip* macio = &macio_chips[0];
187 unsigned long flags;
188
189 if (node == NULL)
190 return -ENODEV;
191
192 LOCK(flags);
193 if (value) {
194 MACIO_BIS(KEYLARGO_FCR1, K2_FCR1_FW_CLK_ENABLE);
195 mb();
196 k2_skiplist[1] = NULL;
197 } else {
198 k2_skiplist[1] = node;
199 mb();
200 MACIO_BIC(KEYLARGO_FCR1, K2_FCR1_FW_CLK_ENABLE);
201 }
202
203 UNLOCK(flags);
204 mdelay(1);
205
206 return 0;
207}
208
209static long __pmac g5_mpic_enable(struct device_node* node, long param, long value)
210{
211 unsigned long flags;
212
213 if (node->parent == NULL || strcmp(node->parent->name, "u3"))
214 return 0;
215
216 LOCK(flags);
217 UN_BIS(U3_TOGGLE_REG, U3_MPIC_RESET | U3_MPIC_OUTPUT_ENABLE);
218 UNLOCK(flags);
219
220 return 0;
221}
222
223static long __pmac g5_eth_phy_reset(struct device_node* node, long param, long value)
224{
225 struct macio_chip* macio = &macio_chips[0];
226 struct device_node *phy;
227 int need_reset;
228
229 /*
230 * We must not reset the combo PHYs, only the BCM5221 found in
231 * the iMac G5.
232 */
233 phy = of_get_next_child(node, NULL);
234 if (!phy)
235 return -ENODEV;
236 need_reset = device_is_compatible(phy, "B5221");
237 of_node_put(phy);
238 if (!need_reset)
239 return 0;
240
241 /* PHY reset is GPIO 29, not in device-tree unfortunately */
242 MACIO_OUT8(K2_GPIO_EXTINT_0 + 29,
243 KEYLARGO_GPIO_OUTPUT_ENABLE | KEYLARGO_GPIO_OUTOUT_DATA);
244 /* Thankfully, this is now always called at a time when we can
245 * schedule by sungem.
246 */
247 msleep(10);
248 MACIO_OUT8(K2_GPIO_EXTINT_0 + 29, 0);
249
250 return 0;
251}
252
253static long __pmac g5_i2s_enable(struct device_node *node, long param, long value)
254{
255 /* Very crude implementation for now */
256 struct macio_chip* macio = &macio_chips[0];
257 unsigned long flags;
258
259 if (value == 0)
260 return 0; /* don't disable yet */
261
262 LOCK(flags);
263 MACIO_BIS(KEYLARGO_FCR3, KL3_CLK45_ENABLE | KL3_CLK49_ENABLE |
264 KL3_I2S0_CLK18_ENABLE);
265 udelay(10);
266 MACIO_BIS(KEYLARGO_FCR1, K2_FCR1_I2S0_CELL_ENABLE |
267 K2_FCR1_I2S0_CLK_ENABLE_BIT | K2_FCR1_I2S0_ENABLE);
268 udelay(10);
269 MACIO_BIC(KEYLARGO_FCR1, K2_FCR1_I2S0_RESET);
270 UNLOCK(flags);
271 udelay(10);
272
273 return 0;
274}
275
276
277#ifdef CONFIG_SMP
278static long __pmac g5_reset_cpu(struct device_node* node, long param, long value)
279{
280 unsigned int reset_io = 0;
281 unsigned long flags;
282 struct macio_chip* macio;
283 struct device_node* np;
284
285 macio = &macio_chips[0];
286 if (macio->type != macio_keylargo2)
287 return -ENODEV;
288
289 np = find_path_device("/cpus");
290 if (np == NULL)
291 return -ENODEV;
292 for (np = np->child; np != NULL; np = np->sibling) {
293 u32* num = (u32 *)get_property(np, "reg", NULL);
294 u32* rst = (u32 *)get_property(np, "soft-reset", NULL);
295 if (num == NULL || rst == NULL)
296 continue;
297 if (param == *num) {
298 reset_io = *rst;
299 break;
300 }
301 }
302 if (np == NULL || reset_io == 0)
303 return -ENODEV;
304
305 LOCK(flags);
306 MACIO_OUT8(reset_io, KEYLARGO_GPIO_OUTPUT_ENABLE);
307 (void)MACIO_IN8(reset_io);
308 udelay(1);
309 MACIO_OUT8(reset_io, 0);
310 (void)MACIO_IN8(reset_io);
311 UNLOCK(flags);
312
313 return 0;
314}
315#endif /* CONFIG_SMP */
316
317/*
318 * This can be called from pmac_smp so isn't static
319 *
320 * This takes the second CPU off the bus on dual CPU machines
321 * running UP
322 */
323void __pmac g5_phy_disable_cpu1(void)
324{
325 UN_OUT(U3_API_PHY_CONFIG_1, 0);
326}
327
328static long __pmac generic_get_mb_info(struct device_node* node, long param, long value)
329{
330 switch(param) {
331 case PMAC_MB_INFO_MODEL:
332 return pmac_mb.model_id;
333 case PMAC_MB_INFO_FLAGS:
334 return pmac_mb.board_flags;
335 case PMAC_MB_INFO_NAME:
336 /* hack hack hack... but should work */
337 *((const char **)value) = pmac_mb.model_name;
338 return 0;
339 }
340 return -EINVAL;
341}
342
343
344/*
345 * Table definitions
346 */
347
348/* Used on any machine
349 */
350static struct feature_table_entry any_features[] __pmacdata = {
351 { PMAC_FTR_GET_MB_INFO, generic_get_mb_info },
352 { 0, NULL }
353};
354
355/* G5 features
356 */
357static struct feature_table_entry g5_features[] __pmacdata = {
358 { PMAC_FTR_GMAC_ENABLE, g5_gmac_enable },
359 { PMAC_FTR_1394_ENABLE, g5_fw_enable },
360 { PMAC_FTR_ENABLE_MPIC, g5_mpic_enable },
361 { PMAC_FTR_READ_GPIO, g5_read_gpio },
362 { PMAC_FTR_WRITE_GPIO, g5_write_gpio },
363 { PMAC_FTR_GMAC_PHY_RESET, g5_eth_phy_reset },
364 { PMAC_FTR_SOUND_CHIP_ENABLE, g5_i2s_enable },
365#ifdef CONFIG_SMP
366 { PMAC_FTR_RESET_CPU, g5_reset_cpu },
367#endif /* CONFIG_SMP */
368 { 0, NULL }
369};
370
371static struct pmac_mb_def pmac_mb_defs[] __pmacdata = {
372 { "PowerMac7,2", "PowerMac G5",
373 PMAC_TYPE_POWERMAC_G5, g5_features,
374 0,
375 },
376 { "PowerMac7,3", "PowerMac G5",
377 PMAC_TYPE_POWERMAC_G5, g5_features,
378 0,
379 },
380 { "PowerMac8,1", "iMac G5",
381 PMAC_TYPE_IMAC_G5, g5_features,
382 0,
383 },
384 { "PowerMac9,1", "PowerMac G5",
385 PMAC_TYPE_POWERMAC_G5_U3L, g5_features,
386 0,
387 },
388 { "RackMac3,1", "XServe G5",
389 PMAC_TYPE_XSERVE_G5, g5_features,
390 0,
391 },
392};
393
394/*
395 * The toplevel feature_call callback
396 */
397long __pmac pmac_do_feature_call(unsigned int selector, ...)
398{
399 struct device_node* node;
400 long param, value;
401 int i;
402 feature_call func = NULL;
403 va_list args;
404
405 if (pmac_mb.features)
406 for (i=0; pmac_mb.features[i].function; i++)
407 if (pmac_mb.features[i].selector == selector) {
408 func = pmac_mb.features[i].function;
409 break;
410 }
411 if (!func)
412 for (i=0; any_features[i].function; i++)
413 if (any_features[i].selector == selector) {
414 func = any_features[i].function;
415 break;
416 }
417 if (!func)
418 return -ENODEV;
419
420 va_start(args, selector);
421 node = (struct device_node*)va_arg(args, void*);
422 param = va_arg(args, long);
423 value = va_arg(args, long);
424 va_end(args);
425
426 return func(node, param, value);
427}
428
429static int __init probe_motherboard(void)
430{
431 int i;
432 struct macio_chip* macio = &macio_chips[0];
433 const char* model = NULL;
434 struct device_node *dt;
435
436 /* Lookup known motherboard type in device-tree. First try an
437 * exact match on the "model" property, then try a "compatible"
438 * match is none is found.
439 */
440 dt = find_devices("device-tree");
441 if (dt != NULL)
442 model = (const char *) get_property(dt, "model", NULL);
443 for(i=0; model && i<(sizeof(pmac_mb_defs)/sizeof(struct pmac_mb_def)); i++) {
444 if (strcmp(model, pmac_mb_defs[i].model_string) == 0) {
445 pmac_mb = pmac_mb_defs[i];
446 goto found;
447 }
448 }
449 for(i=0; i<(sizeof(pmac_mb_defs)/sizeof(struct pmac_mb_def)); i++) {
450 if (machine_is_compatible(pmac_mb_defs[i].model_string)) {
451 pmac_mb = pmac_mb_defs[i];
452 goto found;
453 }
454 }
455
456 /* Fallback to selection depending on mac-io chip type */
457 switch(macio->type) {
458 case macio_keylargo2:
459 pmac_mb.model_id = PMAC_TYPE_UNKNOWN_K2;
460 pmac_mb.model_name = "Unknown K2-based";
461 pmac_mb.features = g5_features;
462
463 default:
464 return -ENODEV;
465 }
466found:
467 /* Check for "mobile" machine */
468 if (model && (strncmp(model, "PowerBook", 9) == 0
469 || strncmp(model, "iBook", 5) == 0))
470 pmac_mb.board_flags |= PMAC_MB_MOBILE;
471
472
473 printk(KERN_INFO "PowerMac motherboard: %s\n", pmac_mb.model_name);
474 return 0;
475}
476
477/* Initialize the Core99 UniNorth host bridge and memory controller
478 */
479static void __init probe_uninorth(void)
480{
481 uninorth_node = of_find_node_by_name(NULL, "u3");
482 if (uninorth_node && uninorth_node->n_addrs > 0) {
483 /* Small hack until I figure out if parsing in prom.c is correct. I should
484 * get rid of those pre-parsed junk anyway
485 */
486 unsigned long address = uninorth_node->addrs[0].address;
487 uninorth_base = ioremap(address, 0x40000);
488 uninorth_rev = in_be32(UN_REG(UNI_N_VERSION));
489 u3_ht = ioremap(address + U3_HT_CONFIG_BASE, 0x1000);
490 } else
491 uninorth_node = NULL;
492
493 if (!uninorth_node)
494 return;
495
496 printk(KERN_INFO "Found U3 memory controller & host bridge, revision: %d\n",
497 uninorth_rev);
498 printk(KERN_INFO "Mapped at 0x%08lx\n", (unsigned long)uninorth_base);
499
500}
501
502static void __init probe_one_macio(const char* name, const char* compat, int type)
503{
504 struct device_node* node;
505 int i;
506 volatile u32* base;
507 u32* revp;
508
509 node = find_devices(name);
510 if (!node || !node->n_addrs)
511 return;
512 if (compat)
513 do {
514 if (device_is_compatible(node, compat))
515 break;
516 node = node->next;
517 } while (node);
518 if (!node)
519 return;
520 for(i=0; i<MAX_MACIO_CHIPS; i++) {
521 if (!macio_chips[i].of_node)
522 break;
523 if (macio_chips[i].of_node == node)
524 return;
525 }
526 if (i >= MAX_MACIO_CHIPS) {
527 printk(KERN_ERR "pmac_feature: Please increase MAX_MACIO_CHIPS !\n");
528 printk(KERN_ERR "pmac_feature: %s skipped\n", node->full_name);
529 return;
530 }
531 base = (volatile u32*)ioremap(node->addrs[0].address, node->addrs[0].size);
532 if (!base) {
533 printk(KERN_ERR "pmac_feature: Can't map mac-io chip !\n");
534 return;
535 }
536 if (type == macio_keylargo) {
537 u32* did = (u32 *)get_property(node, "device-id", NULL);
538 if (*did == 0x00000025)
539 type = macio_pangea;
540 if (*did == 0x0000003e)
541 type = macio_intrepid;
542 }
543 macio_chips[i].of_node = node;
544 macio_chips[i].type = type;
545 macio_chips[i].base = base;
546 macio_chips[i].flags = MACIO_FLAG_SCCB_ON | MACIO_FLAG_SCCB_ON;
547 macio_chips[i].name = macio_names[type];
548 revp = (u32 *)get_property(node, "revision-id", NULL);
549 if (revp)
550 macio_chips[i].rev = *revp;
551 printk(KERN_INFO "Found a %s mac-io controller, rev: %d, mapped at 0x%p\n",
552 macio_names[type], macio_chips[i].rev, macio_chips[i].base);
553}
554
555static int __init
556probe_macios(void)
557{
558 probe_one_macio("mac-io", "K2-Keylargo", macio_keylargo2);
559
560 macio_chips[0].lbus.index = 0;
561 macio_chips[1].lbus.index = 1;
562
563 return (macio_chips[0].of_node == NULL) ? -ENODEV : 0;
564}
565
566static void __init
567set_initial_features(void)
568{
569 struct device_node *np;
570
571 if (macio_chips[0].type == macio_keylargo2) {
572#ifndef CONFIG_SMP
573 /* On SMP machines running UP, we have the second CPU eating
574 * bus cycles. We need to take it off the bus. This is done
575 * from pmac_smp for SMP kernels running on one CPU
576 */
577 np = of_find_node_by_type(NULL, "cpu");
578 if (np != NULL)
579 np = of_find_node_by_type(np, "cpu");
580 if (np != NULL) {
581 g5_phy_disable_cpu1();
582 of_node_put(np);
583 }
584#endif /* CONFIG_SMP */
585 /* Enable GMAC for now for PCI probing. It will be disabled
586 * later on after PCI probe
587 */
588 np = of_find_node_by_name(NULL, "ethernet");
589 while(np) {
590 if (device_is_compatible(np, "K2-GMAC"))
591 g5_gmac_enable(np, 0, 1);
592 np = of_find_node_by_name(np, "ethernet");
593 }
594
595 /* Enable FW before PCI probe. Will be disabled later on
596 * Note: We should have a batter way to check that we are
597 * dealing with uninorth internal cell and not a PCI cell
598 * on the external PCI. The code below works though.
599 */
600 np = of_find_node_by_name(NULL, "firewire");
601 while(np) {
602 if (device_is_compatible(np, "pci106b,5811")) {
603 macio_chips[0].flags |= MACIO_FLAG_FW_SUPPORTED;
604 g5_fw_enable(np, 0, 1);
605 }
606 np = of_find_node_by_name(np, "firewire");
607 }
608 }
609}
610
611void __init
612pmac_feature_init(void)
613{
614 /* Detect the UniNorth memory controller */
615 probe_uninorth();
616
617 /* Probe mac-io controllers */
618 if (probe_macios()) {
619 printk(KERN_WARNING "No mac-io chip found\n");
620 return;
621 }
622
623 /* Setup low-level i2c stuffs */
624 pmac_init_low_i2c();
625
626 /* Probe machine type */
627 if (probe_motherboard())
628 printk(KERN_WARNING "Unknown PowerMac !\n");
629
630 /* Set some initial features (turn off some chips that will
631 * be later turned on)
632 */
633 set_initial_features();
634}
635
636int __init pmac_feature_late_init(void)
637{
638#if 0
639 struct device_node* np;
640
641 /* Request some resources late */
642 if (uninorth_node)
643 request_OF_resource(uninorth_node, 0, NULL);
644 np = find_devices("hammerhead");
645 if (np)
646 request_OF_resource(np, 0, NULL);
647 np = find_devices("interrupt-controller");
648 if (np)
649 request_OF_resource(np, 0, NULL);
650#endif
651 return 0;
652}
653
654device_initcall(pmac_feature_late_init);
655
656#if 0
657static void dump_HT_speeds(char *name, u32 cfg, u32 frq)
658{
659 int freqs[16] = { 200,300,400,500,600,800,1000,0,0,0,0,0,0,0,0,0 };
660 int bits[8] = { 8,16,0,32,2,4,0,0 };
661 int freq = (frq >> 8) & 0xf;
662
663 if (freqs[freq] == 0)
664 printk("%s: Unknown HT link frequency %x\n", name, freq);
665 else
666 printk("%s: %d MHz on main link, (%d in / %d out) bits width\n",
667 name, freqs[freq],
668 bits[(cfg >> 28) & 0x7], bits[(cfg >> 24) & 0x7]);
669}
670#endif
671
672void __init pmac_check_ht_link(void)
673{
674#if 0 /* Disabled for now */
675 u32 ufreq, freq, ucfg, cfg;
676 struct device_node *pcix_node;
677 struct pci_dn *pdn;
678 u8 px_bus, px_devfn;
679 struct pci_controller *px_hose;
680
681 (void)in_be32(u3_ht + U3_HT_LINK_COMMAND);
682 ucfg = cfg = in_be32(u3_ht + U3_HT_LINK_CONFIG);
683 ufreq = freq = in_be32(u3_ht + U3_HT_LINK_FREQ);
684 dump_HT_speeds("U3 HyperTransport", cfg, freq);
685
686 pcix_node = of_find_compatible_node(NULL, "pci", "pci-x");
687 if (pcix_node == NULL) {
688 printk("No PCI-X bridge found\n");
689 return;
690 }
691 pdn = pcix_node->data;
692 px_hose = pdn->phb;
693 px_bus = pdn->busno;
694 px_devfn = pdn->devfn;
695
696 early_read_config_dword(px_hose, px_bus, px_devfn, 0xc4, &cfg);
697 early_read_config_dword(px_hose, px_bus, px_devfn, 0xcc, &freq);
698 dump_HT_speeds("PCI-X HT Uplink", cfg, freq);
699 early_read_config_dword(px_hose, px_bus, px_devfn, 0xc8, &cfg);
700 early_read_config_dword(px_hose, px_bus, px_devfn, 0xd0, &freq);
701 dump_HT_speeds("PCI-X HT Downlink", cfg, freq);
702#endif
703}
704
705/*
706 * Early video resume hook
707 */
708
709static void (*pmac_early_vresume_proc)(void *data) __pmacdata;
710static void *pmac_early_vresume_data __pmacdata;
711
712void pmac_set_early_video_resume(void (*proc)(void *data), void *data)
713{
714 if (_machine != _MACH_Pmac)
715 return;
716 preempt_disable();
717 pmac_early_vresume_proc = proc;
718 pmac_early_vresume_data = data;
719 preempt_enable();
720}
721EXPORT_SYMBOL(pmac_set_early_video_resume);
722
723
724/*
725 * AGP related suspend/resume code
726 */
727
728static struct pci_dev *pmac_agp_bridge __pmacdata;
729static int (*pmac_agp_suspend)(struct pci_dev *bridge) __pmacdata;
730static int (*pmac_agp_resume)(struct pci_dev *bridge) __pmacdata;
731
732void __pmac pmac_register_agp_pm(struct pci_dev *bridge,
733 int (*suspend)(struct pci_dev *bridge),
734 int (*resume)(struct pci_dev *bridge))
735{
736 if (suspend || resume) {
737 pmac_agp_bridge = bridge;
738 pmac_agp_suspend = suspend;
739 pmac_agp_resume = resume;
740 return;
741 }
742 if (bridge != pmac_agp_bridge)
743 return;
744 pmac_agp_suspend = pmac_agp_resume = NULL;
745 return;
746}
747EXPORT_SYMBOL(pmac_register_agp_pm);
748
749void __pmac pmac_suspend_agp_for_card(struct pci_dev *dev)
750{
751 if (pmac_agp_bridge == NULL || pmac_agp_suspend == NULL)
752 return;
753 if (pmac_agp_bridge->bus != dev->bus)
754 return;
755 pmac_agp_suspend(pmac_agp_bridge);
756}
757EXPORT_SYMBOL(pmac_suspend_agp_for_card);
758
759void __pmac pmac_resume_agp_for_card(struct pci_dev *dev)
760{
761 if (pmac_agp_bridge == NULL || pmac_agp_resume == NULL)
762 return;
763 if (pmac_agp_bridge->bus != dev->bus)
764 return;
765 pmac_agp_resume(pmac_agp_bridge);
766}
767EXPORT_SYMBOL(pmac_resume_agp_for_card);
diff --git a/arch/ppc64/kernel/pmac_pci.c b/arch/ppc64/kernel/pmac_pci.c
deleted file mode 100644
index dc40a0cad0b4..000000000000
--- a/arch/ppc64/kernel/pmac_pci.c
+++ /dev/null
@@ -1,793 +0,0 @@
1/*
2 * Support for PCI bridges found on Power Macintoshes.
3 * At present the "bandit" and "chaos" bridges are supported.
4 * Fortunately you access configuration space in the same
5 * way with either bridge.
6 *
7 * Copyright (C) 2003 Benjamin Herrenschmuidt (benh@kernel.crashing.org)
8 * Copyright (C) 1997 Paul Mackerras (paulus@samba.org)
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version
13 * 2 of the License, or (at your option) any later version.
14 */
15
16#include <linux/kernel.h>
17#include <linux/pci.h>
18#include <linux/delay.h>
19#include <linux/string.h>
20#include <linux/init.h>
21#include <linux/bootmem.h>
22
23#include <asm/sections.h>
24#include <asm/io.h>
25#include <asm/prom.h>
26#include <asm/pci-bridge.h>
27#include <asm/machdep.h>
28#include <asm/pmac_feature.h>
29#include <asm/iommu.h>
30
31#include "pci.h"
32#include "pmac.h"
33
34#define DEBUG
35
36#ifdef DEBUG
37#define DBG(x...) printk(x)
38#else
39#define DBG(x...)
40#endif
41
42/* XXX Could be per-controller, but I don't think we risk anything by
43 * assuming we won't have both UniNorth and Bandit */
44static int has_uninorth;
45static struct pci_controller *u3_agp;
46struct device_node *k2_skiplist[2];
47
48static int __init fixup_one_level_bus_range(struct device_node *node, int higher)
49{
50 for (; node != 0;node = node->sibling) {
51 int * bus_range;
52 unsigned int *class_code;
53 int len;
54
55 /* For PCI<->PCI bridges or CardBus bridges, we go down */
56 class_code = (unsigned int *) get_property(node, "class-code", NULL);
57 if (!class_code || ((*class_code >> 8) != PCI_CLASS_BRIDGE_PCI &&
58 (*class_code >> 8) != PCI_CLASS_BRIDGE_CARDBUS))
59 continue;
60 bus_range = (int *) get_property(node, "bus-range", &len);
61 if (bus_range != NULL && len > 2 * sizeof(int)) {
62 if (bus_range[1] > higher)
63 higher = bus_range[1];
64 }
65 higher = fixup_one_level_bus_range(node->child, higher);
66 }
67 return higher;
68}
69
70/* This routine fixes the "bus-range" property of all bridges in the
71 * system since they tend to have their "last" member wrong on macs
72 *
73 * Note that the bus numbers manipulated here are OF bus numbers, they
74 * are not Linux bus numbers.
75 */
76static void __init fixup_bus_range(struct device_node *bridge)
77{
78 int * bus_range;
79 int len;
80
81 /* Lookup the "bus-range" property for the hose */
82 bus_range = (int *) get_property(bridge, "bus-range", &len);
83 if (bus_range == NULL || len < 2 * sizeof(int)) {
84 printk(KERN_WARNING "Can't get bus-range for %s\n",
85 bridge->full_name);
86 return;
87 }
88 bus_range[1] = fixup_one_level_bus_range(bridge->child, bus_range[1]);
89}
90
91/*
92 * Apple MacRISC (U3, UniNorth, Bandit, Chaos) PCI controllers.
93 *
94 * The "Bandit" version is present in all early PCI PowerMacs,
95 * and up to the first ones using Grackle. Some machines may
96 * have 2 bandit controllers (2 PCI busses).
97 *
98 * "Chaos" is used in some "Bandit"-type machines as a bridge
99 * for the separate display bus. It is accessed the same
100 * way as bandit, but cannot be probed for devices. It therefore
101 * has its own config access functions.
102 *
103 * The "UniNorth" version is present in all Core99 machines
104 * (iBook, G4, new IMacs, and all the recent Apple machines).
105 * It contains 3 controllers in one ASIC.
106 *
107 * The U3 is the bridge used on G5 machines. It contains on
108 * AGP bus which is dealt with the old UniNorth access routines
109 * and an HyperTransport bus which uses its own set of access
110 * functions.
111 */
112
113#define MACRISC_CFA0(devfn, off) \
114 ((1 << (unsigned long)PCI_SLOT(dev_fn)) \
115 | (((unsigned long)PCI_FUNC(dev_fn)) << 8) \
116 | (((unsigned long)(off)) & 0xFCUL))
117
118#define MACRISC_CFA1(bus, devfn, off) \
119 ((((unsigned long)(bus)) << 16) \
120 |(((unsigned long)(devfn)) << 8) \
121 |(((unsigned long)(off)) & 0xFCUL) \
122 |1UL)
123
124static unsigned long __pmac macrisc_cfg_access(struct pci_controller* hose,
125 u8 bus, u8 dev_fn, u8 offset)
126{
127 unsigned int caddr;
128
129 if (bus == hose->first_busno) {
130 if (dev_fn < (11 << 3))
131 return 0;
132 caddr = MACRISC_CFA0(dev_fn, offset);
133 } else
134 caddr = MACRISC_CFA1(bus, dev_fn, offset);
135
136 /* Uninorth will return garbage if we don't read back the value ! */
137 do {
138 out_le32(hose->cfg_addr, caddr);
139 } while (in_le32(hose->cfg_addr) != caddr);
140
141 offset &= has_uninorth ? 0x07 : 0x03;
142 return ((unsigned long)hose->cfg_data) + offset;
143}
144
145static int __pmac macrisc_read_config(struct pci_bus *bus, unsigned int devfn,
146 int offset, int len, u32 *val)
147{
148 struct pci_controller *hose;
149 unsigned long addr;
150
151 hose = pci_bus_to_host(bus);
152 if (hose == NULL)
153 return PCIBIOS_DEVICE_NOT_FOUND;
154
155 addr = macrisc_cfg_access(hose, bus->number, devfn, offset);
156 if (!addr)
157 return PCIBIOS_DEVICE_NOT_FOUND;
158 /*
159 * Note: the caller has already checked that offset is
160 * suitably aligned and that len is 1, 2 or 4.
161 */
162 switch (len) {
163 case 1:
164 *val = in_8((u8 *)addr);
165 break;
166 case 2:
167 *val = in_le16((u16 *)addr);
168 break;
169 default:
170 *val = in_le32((u32 *)addr);
171 break;
172 }
173 return PCIBIOS_SUCCESSFUL;
174}
175
176static int __pmac macrisc_write_config(struct pci_bus *bus, unsigned int devfn,
177 int offset, int len, u32 val)
178{
179 struct pci_controller *hose;
180 unsigned long addr;
181
182 hose = pci_bus_to_host(bus);
183 if (hose == NULL)
184 return PCIBIOS_DEVICE_NOT_FOUND;
185
186 addr = macrisc_cfg_access(hose, bus->number, devfn, offset);
187 if (!addr)
188 return PCIBIOS_DEVICE_NOT_FOUND;
189 /*
190 * Note: the caller has already checked that offset is
191 * suitably aligned and that len is 1, 2 or 4.
192 */
193 switch (len) {
194 case 1:
195 out_8((u8 *)addr, val);
196 (void) in_8((u8 *)addr);
197 break;
198 case 2:
199 out_le16((u16 *)addr, val);
200 (void) in_le16((u16 *)addr);
201 break;
202 default:
203 out_le32((u32 *)addr, val);
204 (void) in_le32((u32 *)addr);
205 break;
206 }
207 return PCIBIOS_SUCCESSFUL;
208}
209
210static struct pci_ops macrisc_pci_ops =
211{
212 macrisc_read_config,
213 macrisc_write_config
214};
215
216/*
217 * These versions of U3 HyperTransport config space access ops do not
218 * implement self-view of the HT host yet
219 */
220
221/*
222 * This function deals with some "special cases" devices.
223 *
224 * 0 -> No special case
225 * 1 -> Skip the device but act as if the access was successfull
226 * (return 0xff's on reads, eventually, cache config space
227 * accesses in a later version)
228 * -1 -> Hide the device (unsuccessful acess)
229 */
230static int u3_ht_skip_device(struct pci_controller *hose,
231 struct pci_bus *bus, unsigned int devfn)
232{
233 struct device_node *busdn, *dn;
234 int i;
235
236 /* We only allow config cycles to devices that are in OF device-tree
237 * as we are apparently having some weird things going on with some
238 * revs of K2 on recent G5s
239 */
240 if (bus->self)
241 busdn = pci_device_to_OF_node(bus->self);
242 else
243 busdn = hose->arch_data;
244 for (dn = busdn->child; dn; dn = dn->sibling)
245 if (dn->data && PCI_DN(dn)->devfn == devfn)
246 break;
247 if (dn == NULL)
248 return -1;
249
250 /*
251 * When a device in K2 is powered down, we die on config
252 * cycle accesses. Fix that here.
253 */
254 for (i=0; i<2; i++)
255 if (k2_skiplist[i] == dn)
256 return 1;
257
258 return 0;
259}
260
261#define U3_HT_CFA0(devfn, off) \
262 ((((unsigned long)devfn) << 8) | offset)
263#define U3_HT_CFA1(bus, devfn, off) \
264 (U3_HT_CFA0(devfn, off) \
265 + (((unsigned long)bus) << 16) \
266 + 0x01000000UL)
267
268static unsigned long __pmac u3_ht_cfg_access(struct pci_controller* hose,
269 u8 bus, u8 devfn, u8 offset)
270{
271 if (bus == hose->first_busno) {
272 /* For now, we don't self probe U3 HT bridge */
273 if (PCI_SLOT(devfn) == 0)
274 return 0;
275 return ((unsigned long)hose->cfg_data) + U3_HT_CFA0(devfn, offset);
276 } else
277 return ((unsigned long)hose->cfg_data) + U3_HT_CFA1(bus, devfn, offset);
278}
279
280static int __pmac u3_ht_read_config(struct pci_bus *bus, unsigned int devfn,
281 int offset, int len, u32 *val)
282{
283 struct pci_controller *hose;
284 unsigned long addr;
285
286
287 hose = pci_bus_to_host(bus);
288 if (hose == NULL)
289 return PCIBIOS_DEVICE_NOT_FOUND;
290
291 addr = u3_ht_cfg_access(hose, bus->number, devfn, offset);
292 if (!addr)
293 return PCIBIOS_DEVICE_NOT_FOUND;
294
295 switch (u3_ht_skip_device(hose, bus, devfn)) {
296 case 0:
297 break;
298 case 1:
299 switch (len) {
300 case 1:
301 *val = 0xff; break;
302 case 2:
303 *val = 0xffff; break;
304 default:
305 *val = 0xfffffffful; break;
306 }
307 return PCIBIOS_SUCCESSFUL;
308 default:
309 return PCIBIOS_DEVICE_NOT_FOUND;
310 }
311
312 /*
313 * Note: the caller has already checked that offset is
314 * suitably aligned and that len is 1, 2 or 4.
315 */
316 switch (len) {
317 case 1:
318 *val = in_8((u8 *)addr);
319 break;
320 case 2:
321 *val = in_le16((u16 *)addr);
322 break;
323 default:
324 *val = in_le32((u32 *)addr);
325 break;
326 }
327 return PCIBIOS_SUCCESSFUL;
328}
329
330static int __pmac u3_ht_write_config(struct pci_bus *bus, unsigned int devfn,
331 int offset, int len, u32 val)
332{
333 struct pci_controller *hose;
334 unsigned long addr;
335
336 hose = pci_bus_to_host(bus);
337 if (hose == NULL)
338 return PCIBIOS_DEVICE_NOT_FOUND;
339
340 addr = u3_ht_cfg_access(hose, bus->number, devfn, offset);
341 if (!addr)
342 return PCIBIOS_DEVICE_NOT_FOUND;
343
344 switch (u3_ht_skip_device(hose, bus, devfn)) {
345 case 0:
346 break;
347 case 1:
348 return PCIBIOS_SUCCESSFUL;
349 default:
350 return PCIBIOS_DEVICE_NOT_FOUND;
351 }
352
353 /*
354 * Note: the caller has already checked that offset is
355 * suitably aligned and that len is 1, 2 or 4.
356 */
357 switch (len) {
358 case 1:
359 out_8((u8 *)addr, val);
360 (void) in_8((u8 *)addr);
361 break;
362 case 2:
363 out_le16((u16 *)addr, val);
364 (void) in_le16((u16 *)addr);
365 break;
366 default:
367 out_le32((u32 *)addr, val);
368 (void) in_le32((u32 *)addr);
369 break;
370 }
371 return PCIBIOS_SUCCESSFUL;
372}
373
374static struct pci_ops u3_ht_pci_ops =
375{
376 u3_ht_read_config,
377 u3_ht_write_config
378};
379
380static void __init setup_u3_agp(struct pci_controller* hose)
381{
382 /* On G5, we move AGP up to high bus number so we don't need
383 * to reassign bus numbers for HT. If we ever have P2P bridges
384 * on AGP, we'll have to move pci_assign_all_busses to the
385 * pci_controller structure so we enable it for AGP and not for
386 * HT childs.
387 * We hard code the address because of the different size of
388 * the reg address cell, we shall fix that by killing struct
389 * reg_property and using some accessor functions instead
390 */
391 hose->first_busno = 0xf0;
392 hose->last_busno = 0xff;
393 has_uninorth = 1;
394 hose->ops = &macrisc_pci_ops;
395 hose->cfg_addr = ioremap(0xf0000000 + 0x800000, 0x1000);
396 hose->cfg_data = ioremap(0xf0000000 + 0xc00000, 0x1000);
397
398 u3_agp = hose;
399}
400
401static void __init setup_u3_ht(struct pci_controller* hose)
402{
403 struct device_node *np = (struct device_node *)hose->arch_data;
404 int i, cur;
405
406 hose->ops = &u3_ht_pci_ops;
407
408 /* We hard code the address because of the different size of
409 * the reg address cell, we shall fix that by killing struct
410 * reg_property and using some accessor functions instead
411 */
412 hose->cfg_data = (volatile unsigned char *)ioremap(0xf2000000, 0x02000000);
413
414 /*
415 * /ht node doesn't expose a "ranges" property, so we "remove" regions that
416 * have been allocated to AGP. So far, this version of the code doesn't assign
417 * any of the 0xfxxxxxxx "fine" memory regions to /ht.
418 * We need to fix that sooner or later by either parsing all child "ranges"
419 * properties or figuring out the U3 address space decoding logic and
420 * then read it's configuration register (if any).
421 */
422 hose->io_base_phys = 0xf4000000;
423 hose->io_base_virt = ioremap(hose->io_base_phys, 0x00400000);
424 isa_io_base = pci_io_base = (unsigned long) hose->io_base_virt;
425 hose->io_resource.name = np->full_name;
426 hose->io_resource.start = 0;
427 hose->io_resource.end = 0x003fffff;
428 hose->io_resource.flags = IORESOURCE_IO;
429 hose->pci_mem_offset = 0;
430 hose->first_busno = 0;
431 hose->last_busno = 0xef;
432 hose->mem_resources[0].name = np->full_name;
433 hose->mem_resources[0].start = 0x80000000;
434 hose->mem_resources[0].end = 0xefffffff;
435 hose->mem_resources[0].flags = IORESOURCE_MEM;
436
437 if (u3_agp == NULL) {
438 DBG("U3 has no AGP, using full resource range\n");
439 return;
440 }
441
442 /* We "remove" the AGP resources from the resources allocated to HT, that
443 * is we create "holes". However, that code does assumptions that so far
444 * happen to be true (cross fingers...), typically that resources in the
445 * AGP node are properly ordered
446 */
447 cur = 0;
448 for (i=0; i<3; i++) {
449 struct resource *res = &u3_agp->mem_resources[i];
450 if (res->flags != IORESOURCE_MEM)
451 continue;
452 /* We don't care about "fine" resources */
453 if (res->start >= 0xf0000000)
454 continue;
455 /* Check if it's just a matter of "shrinking" us in one direction */
456 if (hose->mem_resources[cur].start == res->start) {
457 DBG("U3/HT: shrink start of %d, %08lx -> %08lx\n",
458 cur, hose->mem_resources[cur].start, res->end + 1);
459 hose->mem_resources[cur].start = res->end + 1;
460 continue;
461 }
462 if (hose->mem_resources[cur].end == res->end) {
463 DBG("U3/HT: shrink end of %d, %08lx -> %08lx\n",
464 cur, hose->mem_resources[cur].end, res->start - 1);
465 hose->mem_resources[cur].end = res->start - 1;
466 continue;
467 }
468 /* No, it's not the case, we need a hole */
469 if (cur == 2) {
470 /* not enough resources for a hole, we drop part of the range */
471 printk(KERN_WARNING "Running out of resources for /ht host !\n");
472 hose->mem_resources[cur].end = res->start - 1;
473 continue;
474 }
475 cur++;
476 DBG("U3/HT: hole, %d end at %08lx, %d start at %08lx\n",
477 cur-1, res->start - 1, cur, res->end + 1);
478 hose->mem_resources[cur].name = np->full_name;
479 hose->mem_resources[cur].flags = IORESOURCE_MEM;
480 hose->mem_resources[cur].start = res->end + 1;
481 hose->mem_resources[cur].end = hose->mem_resources[cur-1].end;
482 hose->mem_resources[cur-1].end = res->start - 1;
483 }
484}
485
486static void __init pmac_process_bridge_OF_ranges(struct pci_controller *hose,
487 struct device_node *dev, int primary)
488{
489 static unsigned int static_lc_ranges[2024];
490 unsigned int *dt_ranges, *lc_ranges, *ranges, *prev;
491 unsigned int size;
492 int rlen = 0, orig_rlen;
493 int memno = 0;
494 struct resource *res;
495 int np, na = prom_n_addr_cells(dev);
496
497 np = na + 5;
498
499 /* First we try to merge ranges to fix a problem with some pmacs
500 * that can have more than 3 ranges, fortunately using contiguous
501 * addresses -- BenH
502 */
503 dt_ranges = (unsigned int *) get_property(dev, "ranges", &rlen);
504 if (!dt_ranges)
505 return;
506 /* lc_ranges = alloc_bootmem(rlen);*/
507 lc_ranges = static_lc_ranges;
508 if (!lc_ranges)
509 return; /* what can we do here ? */
510 memcpy(lc_ranges, dt_ranges, rlen);
511 orig_rlen = rlen;
512
513 /* Let's work on a copy of the "ranges" property instead of damaging
514 * the device-tree image in memory
515 */
516 ranges = lc_ranges;
517 prev = NULL;
518 while ((rlen -= np * sizeof(unsigned int)) >= 0) {
519 if (prev) {
520 if (prev[0] == ranges[0] && prev[1] == ranges[1] &&
521 (prev[2] + prev[na+4]) == ranges[2] &&
522 (prev[na+2] + prev[na+4]) == ranges[na+2]) {
523 prev[na+4] += ranges[na+4];
524 ranges[0] = 0;
525 ranges += np;
526 continue;
527 }
528 }
529 prev = ranges;
530 ranges += np;
531 }
532
533 /*
534 * The ranges property is laid out as an array of elements,
535 * each of which comprises:
536 * cells 0 - 2: a PCI address
537 * cells 3 or 3+4: a CPU physical address
538 * (size depending on dev->n_addr_cells)
539 * cells 4+5 or 5+6: the size of the range
540 */
541 ranges = lc_ranges;
542 rlen = orig_rlen;
543 while (ranges && (rlen -= np * sizeof(unsigned int)) >= 0) {
544 res = NULL;
545 size = ranges[na+4];
546 switch (ranges[0] >> 24) {
547 case 1: /* I/O space */
548 if (ranges[2] != 0)
549 break;
550 hose->io_base_phys = ranges[na+2];
551 /* limit I/O space to 16MB */
552 if (size > 0x01000000)
553 size = 0x01000000;
554 hose->io_base_virt = ioremap(ranges[na+2], size);
555 if (primary)
556 isa_io_base = (unsigned long) hose->io_base_virt;
557 res = &hose->io_resource;
558 res->flags = IORESOURCE_IO;
559 res->start = ranges[2];
560 break;
561 case 2: /* memory space */
562 memno = 0;
563 if (ranges[1] == 0 && ranges[2] == 0
564 && ranges[na+4] <= (16 << 20)) {
565 /* 1st 16MB, i.e. ISA memory area */
566#if 0
567 if (primary)
568 isa_mem_base = ranges[na+2];
569#endif
570 memno = 1;
571 }
572 while (memno < 3 && hose->mem_resources[memno].flags)
573 ++memno;
574 if (memno == 0)
575 hose->pci_mem_offset = ranges[na+2] - ranges[2];
576 if (memno < 3) {
577 res = &hose->mem_resources[memno];
578 res->flags = IORESOURCE_MEM;
579 res->start = ranges[na+2];
580 }
581 break;
582 }
583 if (res != NULL) {
584 res->name = dev->full_name;
585 res->end = res->start + size - 1;
586 res->parent = NULL;
587 res->sibling = NULL;
588 res->child = NULL;
589 }
590 ranges += np;
591 }
592}
593
594/*
595 * We assume that if we have a G3 powermac, we have one bridge called
596 * "pci" (a MPC106) and no bandit or chaos bridges, and contrariwise,
597 * if we have one or more bandit or chaos bridges, we don't have a MPC106.
598 */
599static int __init add_bridge(struct device_node *dev)
600{
601 int len;
602 struct pci_controller *hose;
603 char* disp_name;
604 int *bus_range;
605 int primary = 1;
606 struct property *of_prop;
607
608 DBG("Adding PCI host bridge %s\n", dev->full_name);
609
610 bus_range = (int *) get_property(dev, "bus-range", &len);
611 if (bus_range == NULL || len < 2 * sizeof(int)) {
612 printk(KERN_WARNING "Can't get bus-range for %s, assume bus 0\n",
613 dev->full_name);
614 }
615
616 hose = alloc_bootmem(sizeof(struct pci_controller));
617 if (hose == NULL)
618 return -ENOMEM;
619 pci_setup_pci_controller(hose);
620
621 hose->arch_data = dev;
622 hose->first_busno = bus_range ? bus_range[0] : 0;
623 hose->last_busno = bus_range ? bus_range[1] : 0xff;
624
625 of_prop = alloc_bootmem(sizeof(struct property) +
626 sizeof(hose->global_number));
627 if (of_prop) {
628 memset(of_prop, 0, sizeof(struct property));
629 of_prop->name = "linux,pci-domain";
630 of_prop->length = sizeof(hose->global_number);
631 of_prop->value = (unsigned char *)&of_prop[1];
632 memcpy(of_prop->value, &hose->global_number, sizeof(hose->global_number));
633 prom_add_property(dev, of_prop);
634 }
635
636 disp_name = NULL;
637 if (device_is_compatible(dev, "u3-agp")) {
638 setup_u3_agp(hose);
639 disp_name = "U3-AGP";
640 primary = 0;
641 } else if (device_is_compatible(dev, "u3-ht")) {
642 setup_u3_ht(hose);
643 disp_name = "U3-HT";
644 primary = 1;
645 }
646 printk(KERN_INFO "Found %s PCI host bridge. Firmware bus number: %d->%d\n",
647 disp_name, hose->first_busno, hose->last_busno);
648
649 /* Interpret the "ranges" property */
650 /* This also maps the I/O region and sets isa_io/mem_base */
651 pmac_process_bridge_OF_ranges(hose, dev, primary);
652
653 /* Fixup "bus-range" OF property */
654 fixup_bus_range(dev);
655
656 return 0;
657}
658
659/*
660 * We use our own read_irq_line here because PCI_INTERRUPT_PIN is
661 * crap on some of Apple ASICs. We unconditionally use the Open Firmware
662 * interrupt number as this is always right.
663 */
664static int pmac_pci_read_irq_line(struct pci_dev *pci_dev)
665{
666 struct device_node *node;
667
668 node = pci_device_to_OF_node(pci_dev);
669 if (node == NULL)
670 return -1;
671 if (node->n_intrs == 0)
672 return -1;
673 pci_dev->irq = node->intrs[0].line;
674 pci_write_config_byte(pci_dev, PCI_INTERRUPT_LINE, pci_dev->irq);
675
676 return 0;
677}
678
679void __init pmac_pcibios_fixup(void)
680{
681 struct pci_dev *dev = NULL;
682
683 for_each_pci_dev(dev)
684 pmac_pci_read_irq_line(dev);
685}
686
687static void __init pmac_fixup_phb_resources(void)
688{
689 struct pci_controller *hose, *tmp;
690
691 list_for_each_entry_safe(hose, tmp, &hose_list, list_node) {
692 unsigned long offset = (unsigned long)hose->io_base_virt - pci_io_base;
693 hose->io_resource.start += offset;
694 hose->io_resource.end += offset;
695 printk(KERN_INFO "PCI Host %d, io start: %lx; io end: %lx\n",
696 hose->global_number,
697 hose->io_resource.start, hose->io_resource.end);
698 }
699}
700
701void __init pmac_pci_init(void)
702{
703 struct device_node *np, *root;
704 struct device_node *ht = NULL;
705
706 /* Probe root PCI hosts, that is on U3 the AGP host and the
707 * HyperTransport host. That one is actually "kept" around
708 * and actually added last as it's resource management relies
709 * on the AGP resources to have been setup first
710 */
711 root = of_find_node_by_path("/");
712 if (root == NULL) {
713 printk(KERN_CRIT "pmac_find_bridges: can't find root of device tree\n");
714 return;
715 }
716 for (np = NULL; (np = of_get_next_child(root, np)) != NULL;) {
717 if (np->name == NULL)
718 continue;
719 if (strcmp(np->name, "pci") == 0) {
720 if (add_bridge(np) == 0)
721 of_node_get(np);
722 }
723 if (strcmp(np->name, "ht") == 0) {
724 of_node_get(np);
725 ht = np;
726 }
727 }
728 of_node_put(root);
729
730 /* Now setup the HyperTransport host if we found any
731 */
732 if (ht && add_bridge(ht) != 0)
733 of_node_put(ht);
734
735 /* Fixup the IO resources on our host bridges as the common code
736 * does it only for childs of the host bridges
737 */
738 pmac_fixup_phb_resources();
739
740 /* Setup the linkage between OF nodes and PHBs */
741 pci_devs_phb_init();
742
743 /* Fixup the PCI<->OF mapping for U3 AGP due to bus renumbering. We
744 * assume there is no P2P bridge on the AGP bus, which should be a
745 * safe assumptions hopefully.
746 */
747 if (u3_agp) {
748 struct device_node *np = u3_agp->arch_data;
749 PCI_DN(np)->busno = 0xf0;
750 for (np = np->child; np; np = np->sibling)
751 PCI_DN(np)->busno = 0xf0;
752 }
753
754 pmac_check_ht_link();
755
756 /* Tell pci.c to not use the common resource allocation mecanism */
757 pci_probe_only = 1;
758
759 /* Allow all IO */
760 io_page_mask = -1;
761}
762
763/*
764 * Disable second function on K2-SATA, it's broken
765 * and disable IO BARs on first one
766 */
767static void fixup_k2_sata(struct pci_dev* dev)
768{
769 int i;
770 u16 cmd;
771
772 if (PCI_FUNC(dev->devfn) > 0) {
773 pci_read_config_word(dev, PCI_COMMAND, &cmd);
774 cmd &= ~(PCI_COMMAND_IO | PCI_COMMAND_MEMORY);
775 pci_write_config_word(dev, PCI_COMMAND, cmd);
776 for (i = 0; i < 6; i++) {
777 dev->resource[i].start = dev->resource[i].end = 0;
778 dev->resource[i].flags = 0;
779 pci_write_config_dword(dev, PCI_BASE_ADDRESS_0 + 4 * i, 0);
780 }
781 } else {
782 pci_read_config_word(dev, PCI_COMMAND, &cmd);
783 cmd &= ~PCI_COMMAND_IO;
784 pci_write_config_word(dev, PCI_COMMAND, cmd);
785 for (i = 0; i < 5; i++) {
786 dev->resource[i].start = dev->resource[i].end = 0;
787 dev->resource[i].flags = 0;
788 pci_write_config_dword(dev, PCI_BASE_ADDRESS_0 + 4 * i, 0);
789 }
790 }
791}
792DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SERVERWORKS, 0x0240, fixup_k2_sata);
793
diff --git a/arch/ppc64/kernel/pmac_setup.c b/arch/ppc64/kernel/pmac_setup.c
deleted file mode 100644
index fa8121d53b89..000000000000
--- a/arch/ppc64/kernel/pmac_setup.c
+++ /dev/null
@@ -1,525 +0,0 @@
1/*
2 * arch/ppc/platforms/setup.c
3 *
4 * PowerPC version
5 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
6 *
7 * Adapted for Power Macintosh by Paul Mackerras
8 * Copyright (C) 1996 Paul Mackerras (paulus@cs.anu.edu.au)
9 *
10 * Derived from "arch/alpha/kernel/setup.c"
11 * Copyright (C) 1995 Linus Torvalds
12 *
13 * Maintained by Benjamin Herrenschmidt (benh@kernel.crashing.org)
14 *
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License
17 * as published by the Free Software Foundation; either version
18 * 2 of the License, or (at your option) any later version.
19 *
20 */
21
22/*
23 * bootup setup stuff..
24 */
25
26#undef DEBUG
27
28#include <linux/config.h>
29#include <linux/init.h>
30#include <linux/errno.h>
31#include <linux/sched.h>
32#include <linux/kernel.h>
33#include <linux/mm.h>
34#include <linux/stddef.h>
35#include <linux/unistd.h>
36#include <linux/ptrace.h>
37#include <linux/slab.h>
38#include <linux/user.h>
39#include <linux/a.out.h>
40#include <linux/tty.h>
41#include <linux/string.h>
42#include <linux/delay.h>
43#include <linux/ioport.h>
44#include <linux/major.h>
45#include <linux/initrd.h>
46#include <linux/vt_kern.h>
47#include <linux/console.h>
48#include <linux/ide.h>
49#include <linux/pci.h>
50#include <linux/adb.h>
51#include <linux/cuda.h>
52#include <linux/pmu.h>
53#include <linux/irq.h>
54#include <linux/seq_file.h>
55#include <linux/root_dev.h>
56#include <linux/bitops.h>
57
58#include <asm/processor.h>
59#include <asm/sections.h>
60#include <asm/prom.h>
61#include <asm/system.h>
62#include <asm/io.h>
63#include <asm/pci-bridge.h>
64#include <asm/iommu.h>
65#include <asm/machdep.h>
66#include <asm/dma.h>
67#include <asm/btext.h>
68#include <asm/cputable.h>
69#include <asm/pmac_feature.h>
70#include <asm/time.h>
71#include <asm/of_device.h>
72#include <asm/lmb.h>
73#include <asm/smu.h>
74#include <asm/pmc.h>
75
76#include "pmac.h"
77#include "mpic.h"
78
79#ifdef DEBUG
80#define DBG(fmt...) udbg_printf(fmt)
81#else
82#define DBG(fmt...)
83#endif
84
85static int current_root_goodness = -1;
86#define DEFAULT_ROOT_DEVICE Root_SDA1 /* sda1 - slightly silly choice */
87
88extern int powersave_nap;
89int sccdbg;
90
91sys_ctrler_t sys_ctrler;
92EXPORT_SYMBOL(sys_ctrler);
93
94#ifdef CONFIG_PMAC_SMU
95unsigned long smu_cmdbuf_abs;
96EXPORT_SYMBOL(smu_cmdbuf_abs);
97#endif
98
99extern void udbg_init_scc(struct device_node *np);
100
101static void __pmac pmac_show_cpuinfo(struct seq_file *m)
102{
103 struct device_node *np;
104 char *pp;
105 int plen;
106 char* mbname;
107 int mbmodel = pmac_call_feature(PMAC_FTR_GET_MB_INFO, NULL,
108 PMAC_MB_INFO_MODEL, 0);
109 unsigned int mbflags = pmac_call_feature(PMAC_FTR_GET_MB_INFO, NULL,
110 PMAC_MB_INFO_FLAGS, 0);
111
112 if (pmac_call_feature(PMAC_FTR_GET_MB_INFO, NULL, PMAC_MB_INFO_NAME,
113 (long)&mbname) != 0)
114 mbname = "Unknown";
115
116 /* find motherboard type */
117 seq_printf(m, "machine\t\t: ");
118 np = of_find_node_by_path("/");
119 if (np != NULL) {
120 pp = (char *) get_property(np, "model", NULL);
121 if (pp != NULL)
122 seq_printf(m, "%s\n", pp);
123 else
124 seq_printf(m, "PowerMac\n");
125 pp = (char *) get_property(np, "compatible", &plen);
126 if (pp != NULL) {
127 seq_printf(m, "motherboard\t:");
128 while (plen > 0) {
129 int l = strlen(pp) + 1;
130 seq_printf(m, " %s", pp);
131 plen -= l;
132 pp += l;
133 }
134 seq_printf(m, "\n");
135 }
136 of_node_put(np);
137 } else
138 seq_printf(m, "PowerMac\n");
139
140 /* print parsed model */
141 seq_printf(m, "detected as\t: %d (%s)\n", mbmodel, mbname);
142 seq_printf(m, "pmac flags\t: %08x\n", mbflags);
143
144 /* Indicate newworld */
145 seq_printf(m, "pmac-generation\t: NewWorld\n");
146}
147
148
149static void __init pmac_setup_arch(void)
150{
151 /* init to some ~sane value until calibrate_delay() runs */
152 loops_per_jiffy = 50000000;
153
154 /* Probe motherboard chipset */
155 pmac_feature_init();
156#if 0
157 /* Lock-enable the SCC channel used for debug */
158 if (sccdbg) {
159 np = of_find_node_by_name(NULL, "escc");
160 if (np)
161 pmac_call_feature(PMAC_FTR_SCC_ENABLE, np,
162 PMAC_SCC_ASYNC | PMAC_SCC_FLAG_XMON, 1);
163 }
164#endif
165 /* We can NAP */
166 powersave_nap = 1;
167
168#ifdef CONFIG_ADB_PMU
169 /* Initialize the PMU if any */
170 find_via_pmu();
171#endif
172#ifdef CONFIG_PMAC_SMU
173 /* Initialize the SMU if any */
174 smu_init();
175#endif
176
177 /* Init NVRAM access */
178 pmac_nvram_init();
179
180 /* Setup SMP callback */
181#ifdef CONFIG_SMP
182 pmac_setup_smp();
183#endif
184
185 /* Lookup PCI hosts */
186 pmac_pci_init();
187
188#ifdef CONFIG_DUMMY_CONSOLE
189 conswitchp = &dummy_con;
190#endif
191
192 printk(KERN_INFO "Using native/NAP idle loop\n");
193}
194
195#ifdef CONFIG_SCSI
196void note_scsi_host(struct device_node *node, void *host)
197{
198 /* Obsolete */
199}
200#endif
201
202
203static int initializing = 1;
204
205static int pmac_late_init(void)
206{
207 initializing = 0;
208 return 0;
209}
210
211late_initcall(pmac_late_init);
212
213/* can't be __init - can be called whenever a disk is first accessed */
214void __pmac note_bootable_part(dev_t dev, int part, int goodness)
215{
216 extern dev_t boot_dev;
217 char *p;
218
219 if (!initializing)
220 return;
221 if ((goodness <= current_root_goodness) &&
222 ROOT_DEV != DEFAULT_ROOT_DEVICE)
223 return;
224 p = strstr(saved_command_line, "root=");
225 if (p != NULL && (p == saved_command_line || p[-1] == ' '))
226 return;
227
228 if (!boot_dev || dev == boot_dev) {
229 ROOT_DEV = dev + part;
230 boot_dev = 0;
231 current_root_goodness = goodness;
232 }
233}
234
235static void __pmac pmac_restart(char *cmd)
236{
237 switch(sys_ctrler) {
238#ifdef CONFIG_ADB_PMU
239 case SYS_CTRLER_PMU:
240 pmu_restart();
241 break;
242#endif
243
244#ifdef CONFIG_PMAC_SMU
245 case SYS_CTRLER_SMU:
246 smu_restart();
247 break;
248#endif
249 default:
250 ;
251 }
252}
253
254static void __pmac pmac_power_off(void)
255{
256 switch(sys_ctrler) {
257#ifdef CONFIG_ADB_PMU
258 case SYS_CTRLER_PMU:
259 pmu_shutdown();
260 break;
261#endif
262#ifdef CONFIG_PMAC_SMU
263 case SYS_CTRLER_SMU:
264 smu_shutdown();
265 break;
266#endif
267 default:
268 ;
269 }
270}
271
272static void __pmac pmac_halt(void)
273{
274 pmac_power_off();
275}
276
277#ifdef CONFIG_BOOTX_TEXT
278static void btext_putc(unsigned char c)
279{
280 btext_drawchar(c);
281}
282
283static void __init init_boot_display(void)
284{
285 char *name;
286 struct device_node *np = NULL;
287 int rc = -ENODEV;
288
289 printk("trying to initialize btext ...\n");
290
291 name = (char *)get_property(of_chosen, "linux,stdout-path", NULL);
292 if (name != NULL) {
293 np = of_find_node_by_path(name);
294 if (np != NULL) {
295 if (strcmp(np->type, "display") != 0) {
296 printk("boot stdout isn't a display !\n");
297 of_node_put(np);
298 np = NULL;
299 }
300 }
301 }
302 if (np)
303 rc = btext_initialize(np);
304 if (rc == 0)
305 return;
306
307 for (np = NULL; (np = of_find_node_by_type(np, "display"));) {
308 if (get_property(np, "linux,opened", NULL)) {
309 printk("trying %s ...\n", np->full_name);
310 rc = btext_initialize(np);
311 printk("result: %d\n", rc);
312 }
313 if (rc == 0)
314 return;
315 }
316}
317#endif /* CONFIG_BOOTX_TEXT */
318
319/*
320 * Early initialization.
321 */
322static void __init pmac_init_early(void)
323{
324 DBG(" -> pmac_init_early\n");
325
326 /* Initialize hash table, from now on, we can take hash faults
327 * and call ioremap
328 */
329 hpte_init_native();
330
331 /* Init SCC */
332 if (strstr(cmd_line, "sccdbg")) {
333 sccdbg = 1;
334 udbg_init_scc(NULL);
335 }
336#ifdef CONFIG_BOOTX_TEXT
337 else {
338 init_boot_display();
339
340 udbg_putc = btext_putc;
341 }
342#endif /* CONFIG_BOOTX_TEXT */
343
344 /* Setup interrupt mapping options */
345 ppc64_interrupt_controller = IC_OPEN_PIC;
346
347 iommu_init_early_u3();
348
349 DBG(" <- pmac_init_early\n");
350}
351
352static int pmac_u3_cascade(struct pt_regs *regs, void *data)
353{
354 return mpic_get_one_irq((struct mpic *)data, regs);
355}
356
357static __init void pmac_init_IRQ(void)
358{
359 struct device_node *irqctrler = NULL;
360 struct device_node *irqctrler2 = NULL;
361 struct device_node *np = NULL;
362 struct mpic *mpic1, *mpic2;
363
364 /* We first try to detect Apple's new Core99 chipset, since mac-io
365 * is quite different on those machines and contains an IBM MPIC2.
366 */
367 while ((np = of_find_node_by_type(np, "open-pic")) != NULL) {
368 struct device_node *parent = of_get_parent(np);
369 if (parent && !strcmp(parent->name, "u3"))
370 irqctrler2 = of_node_get(np);
371 else
372 irqctrler = of_node_get(np);
373 of_node_put(parent);
374 }
375 if (irqctrler != NULL && irqctrler->n_addrs > 0) {
376 unsigned char senses[128];
377
378 printk(KERN_INFO "PowerMac using OpenPIC irq controller at 0x%08x\n",
379 (unsigned int)irqctrler->addrs[0].address);
380
381 prom_get_irq_senses(senses, 0, 128);
382 mpic1 = mpic_alloc(irqctrler->addrs[0].address,
383 MPIC_PRIMARY | MPIC_WANTS_RESET,
384 0, 0, 128, 256, senses, 128, " K2-MPIC ");
385 BUG_ON(mpic1 == NULL);
386 mpic_init(mpic1);
387
388 if (irqctrler2 != NULL && irqctrler2->n_intrs > 0 &&
389 irqctrler2->n_addrs > 0) {
390 printk(KERN_INFO "Slave OpenPIC at 0x%08x hooked on IRQ %d\n",
391 (u32)irqctrler2->addrs[0].address,
392 irqctrler2->intrs[0].line);
393
394 pmac_call_feature(PMAC_FTR_ENABLE_MPIC, irqctrler2, 0, 0);
395 prom_get_irq_senses(senses, 128, 128 + 128);
396
397 /* We don't need to set MPIC_BROKEN_U3 here since we don't have
398 * hypertransport interrupts routed to it
399 */
400 mpic2 = mpic_alloc(irqctrler2->addrs[0].address,
401 MPIC_BIG_ENDIAN | MPIC_WANTS_RESET,
402 0, 128, 128, 0, senses, 128, " U3-MPIC ");
403 BUG_ON(mpic2 == NULL);
404 mpic_init(mpic2);
405 mpic_setup_cascade(irqctrler2->intrs[0].line,
406 pmac_u3_cascade, mpic2);
407 }
408 }
409 of_node_put(irqctrler);
410 of_node_put(irqctrler2);
411}
412
413static void __init pmac_progress(char *s, unsigned short hex)
414{
415 if (sccdbg) {
416 udbg_puts(s);
417 udbg_puts("\n");
418 }
419#ifdef CONFIG_BOOTX_TEXT
420 else if (boot_text_mapped) {
421 btext_drawstring(s);
422 btext_drawstring("\n");
423 }
424#endif /* CONFIG_BOOTX_TEXT */
425}
426
427/*
428 * pmac has no legacy IO, anything calling this function has to
429 * fail or bad things will happen
430 */
431static int pmac_check_legacy_ioport(unsigned int baseport)
432{
433 return -ENODEV;
434}
435
436static int __init pmac_declare_of_platform_devices(void)
437{
438 struct device_node *np, *npp;
439
440 npp = of_find_node_by_name(NULL, "u3");
441 if (npp) {
442 for (np = NULL; (np = of_get_next_child(npp, np)) != NULL;) {
443 if (strncmp(np->name, "i2c", 3) == 0) {
444 of_platform_device_create(np, "u3-i2c", NULL);
445 of_node_put(np);
446 break;
447 }
448 }
449 of_node_put(npp);
450 }
451 npp = of_find_node_by_type(NULL, "smu");
452 if (npp) {
453 of_platform_device_create(npp, "smu", NULL);
454 of_node_put(npp);
455 }
456
457 return 0;
458}
459
460device_initcall(pmac_declare_of_platform_devices);
461
462/*
463 * Called very early, MMU is off, device-tree isn't unflattened
464 */
465static int __init pmac_probe(int platform)
466{
467 if (platform != PLATFORM_POWERMAC)
468 return 0;
469 /*
470 * On U3, the DART (iommu) must be allocated now since it
471 * has an impact on htab_initialize (due to the large page it
472 * occupies having to be broken up so the DART itself is not
473 * part of the cacheable linar mapping
474 */
475 alloc_u3_dart_table();
476
477#ifdef CONFIG_PMAC_SMU
478 /*
479 * SMU based G5s need some memory below 2Gb, at least the current
480 * driver needs that. We have to allocate it now. We allocate 4k
481 * (1 small page) for now.
482 */
483 smu_cmdbuf_abs = lmb_alloc_base(4096, 4096, 0x80000000UL);
484#endif /* CONFIG_PMAC_SMU */
485
486 return 1;
487}
488
489static int pmac_probe_mode(struct pci_bus *bus)
490{
491 struct device_node *node = bus->sysdata;
492
493 /* We need to use normal PCI probing for the AGP bus,
494 since the device for the AGP bridge isn't in the tree. */
495 if (bus->self == NULL && device_is_compatible(node, "u3-agp"))
496 return PCI_PROBE_NORMAL;
497
498 return PCI_PROBE_DEVTREE;
499}
500
501struct machdep_calls __initdata pmac_md = {
502#ifdef CONFIG_HOTPLUG_CPU
503 .cpu_die = generic_mach_cpu_die,
504#endif
505 .probe = pmac_probe,
506 .setup_arch = pmac_setup_arch,
507 .init_early = pmac_init_early,
508 .get_cpuinfo = pmac_show_cpuinfo,
509 .init_IRQ = pmac_init_IRQ,
510 .get_irq = mpic_get_irq,
511 .pcibios_fixup = pmac_pcibios_fixup,
512 .pci_probe_mode = pmac_probe_mode,
513 .restart = pmac_restart,
514 .power_off = pmac_power_off,
515 .halt = pmac_halt,
516 .get_boot_time = pmac_get_boot_time,
517 .set_rtc_time = pmac_set_rtc_time,
518 .get_rtc_time = pmac_get_rtc_time,
519 .calibrate_decr = pmac_calibrate_decr,
520 .feature_call = pmac_do_feature_call,
521 .progress = pmac_progress,
522 .check_legacy_ioport = pmac_check_legacy_ioport,
523 .idle_loop = native_idle,
524 .enable_pmcs = power4_enable_pmcs,
525};
diff --git a/arch/ppc64/kernel/pmac_smp.c b/arch/ppc64/kernel/pmac_smp.c
deleted file mode 100644
index a23de37227bf..000000000000
--- a/arch/ppc64/kernel/pmac_smp.c
+++ /dev/null
@@ -1,330 +0,0 @@
1/*
2 * SMP support for power macintosh.
3 *
4 * We support both the old "powersurge" SMP architecture
5 * and the current Core99 (G4 PowerMac) machines.
6 *
7 * Note that we don't support the very first rev. of
8 * Apple/DayStar 2 CPUs board, the one with the funky
9 * watchdog. Hopefully, none of these should be there except
10 * maybe internally to Apple. I should probably still add some
11 * code to detect this card though and disable SMP. --BenH.
12 *
13 * Support Macintosh G4 SMP by Troy Benjegerdes (hozer@drgw.net)
14 * and Ben Herrenschmidt <benh@kernel.crashing.org>.
15 *
16 * Support for DayStar quad CPU cards
17 * Copyright (C) XLR8, Inc. 1994-2000
18 *
19 * This program is free software; you can redistribute it and/or
20 * modify it under the terms of the GNU General Public License
21 * as published by the Free Software Foundation; either version
22 * 2 of the License, or (at your option) any later version.
23 */
24
25#undef DEBUG
26
27#include <linux/config.h>
28#include <linux/kernel.h>
29#include <linux/sched.h>
30#include <linux/smp.h>
31#include <linux/smp_lock.h>
32#include <linux/interrupt.h>
33#include <linux/kernel_stat.h>
34#include <linux/init.h>
35#include <linux/spinlock.h>
36#include <linux/errno.h>
37#include <linux/irq.h>
38
39#include <asm/ptrace.h>
40#include <asm/atomic.h>
41#include <asm/irq.h>
42#include <asm/page.h>
43#include <asm/pgtable.h>
44#include <asm/sections.h>
45#include <asm/io.h>
46#include <asm/prom.h>
47#include <asm/smp.h>
48#include <asm/machdep.h>
49#include <asm/pmac_feature.h>
50#include <asm/time.h>
51#include <asm/cacheflush.h>
52#include <asm/keylargo.h>
53#include <asm/pmac_low_i2c.h>
54
55#include "mpic.h"
56
57#ifdef DEBUG
58#define DBG(fmt...) udbg_printf(fmt)
59#else
60#define DBG(fmt...)
61#endif
62
63extern void pmac_secondary_start_1(void);
64extern void pmac_secondary_start_2(void);
65extern void pmac_secondary_start_3(void);
66
67extern struct smp_ops_t *smp_ops;
68
69static void (*pmac_tb_freeze)(int freeze);
70static struct device_node *pmac_tb_clock_chip_host;
71static u8 pmac_tb_pulsar_addr;
72static DEFINE_SPINLOCK(timebase_lock);
73static unsigned long timebase;
74
75static void smp_core99_cypress_tb_freeze(int freeze)
76{
77 u8 data;
78 int rc;
79
80 /* Strangely, the device-tree says address is 0xd2, but darwin
81 * accesses 0xd0 ...
82 */
83 pmac_low_i2c_setmode(pmac_tb_clock_chip_host, pmac_low_i2c_mode_combined);
84 rc = pmac_low_i2c_xfer(pmac_tb_clock_chip_host,
85 0xd0 | pmac_low_i2c_read,
86 0x81, &data, 1);
87 if (rc != 0)
88 goto bail;
89
90 data = (data & 0xf3) | (freeze ? 0x00 : 0x0c);
91
92 pmac_low_i2c_setmode(pmac_tb_clock_chip_host, pmac_low_i2c_mode_stdsub);
93 rc = pmac_low_i2c_xfer(pmac_tb_clock_chip_host,
94 0xd0 | pmac_low_i2c_write,
95 0x81, &data, 1);
96
97 bail:
98 if (rc != 0) {
99 printk("Cypress Timebase %s rc: %d\n",
100 freeze ? "freeze" : "unfreeze", rc);
101 panic("Timebase freeze failed !\n");
102 }
103}
104
105static void smp_core99_pulsar_tb_freeze(int freeze)
106{
107 u8 data;
108 int rc;
109
110 pmac_low_i2c_setmode(pmac_tb_clock_chip_host, pmac_low_i2c_mode_combined);
111 rc = pmac_low_i2c_xfer(pmac_tb_clock_chip_host,
112 pmac_tb_pulsar_addr | pmac_low_i2c_read,
113 0x2e, &data, 1);
114 if (rc != 0)
115 goto bail;
116
117 data = (data & 0x88) | (freeze ? 0x11 : 0x22);
118
119 pmac_low_i2c_setmode(pmac_tb_clock_chip_host, pmac_low_i2c_mode_stdsub);
120 rc = pmac_low_i2c_xfer(pmac_tb_clock_chip_host,
121 pmac_tb_pulsar_addr | pmac_low_i2c_write,
122 0x2e, &data, 1);
123 bail:
124 if (rc != 0) {
125 printk(KERN_ERR "Pulsar Timebase %s rc: %d\n",
126 freeze ? "freeze" : "unfreeze", rc);
127 panic("Timebase freeze failed !\n");
128 }
129}
130
131
132static void smp_core99_give_timebase(void)
133{
134 /* Open i2c bus for synchronous access */
135 if (pmac_low_i2c_open(pmac_tb_clock_chip_host, 0))
136 panic("Can't open i2c for TB sync !\n");
137
138 spin_lock(&timebase_lock);
139 (*pmac_tb_freeze)(1);
140 mb();
141 timebase = get_tb();
142 spin_unlock(&timebase_lock);
143
144 while (timebase)
145 barrier();
146
147 spin_lock(&timebase_lock);
148 (*pmac_tb_freeze)(0);
149 spin_unlock(&timebase_lock);
150
151 /* Close i2c bus */
152 pmac_low_i2c_close(pmac_tb_clock_chip_host);
153}
154
155
156static void __devinit smp_core99_take_timebase(void)
157{
158 while (!timebase)
159 barrier();
160 spin_lock(&timebase_lock);
161 set_tb(timebase >> 32, timebase & 0xffffffff);
162 timebase = 0;
163 spin_unlock(&timebase_lock);
164}
165
166
167static int __init smp_core99_probe(void)
168{
169 struct device_node *cpus;
170 struct device_node *cc;
171 int ncpus = 0;
172
173 /* Maybe use systemconfiguration here ? */
174 if (ppc_md.progress) ppc_md.progress("smp_core99_probe", 0x345);
175
176 /* Count CPUs in the device-tree */
177 for (cpus = NULL; (cpus = of_find_node_by_type(cpus, "cpu")) != NULL;)
178 ++ncpus;
179
180 printk(KERN_INFO "PowerMac SMP probe found %d cpus\n", ncpus);
181
182 /* Nothing more to do if less than 2 of them */
183 if (ncpus <= 1)
184 return 1;
185
186 /* HW sync only on these platforms */
187 if (!machine_is_compatible("PowerMac7,2") &&
188 !machine_is_compatible("PowerMac7,3") &&
189 !machine_is_compatible("RackMac3,1"))
190 goto nohwsync;
191
192 /* Look for the clock chip */
193 for (cc = NULL; (cc = of_find_node_by_name(cc, "i2c-hwclock")) != NULL;) {
194 struct device_node *p = of_get_parent(cc);
195 u32 *reg;
196 int ok;
197 ok = p && device_is_compatible(p, "uni-n-i2c");
198 if (!ok)
199 goto next;
200 reg = (u32 *)get_property(cc, "reg", NULL);
201 if (reg == NULL)
202 goto next;
203 switch (*reg) {
204 case 0xd2:
205 if (device_is_compatible(cc, "pulsar-legacy-slewing")) {
206 pmac_tb_freeze = smp_core99_pulsar_tb_freeze;
207 pmac_tb_pulsar_addr = 0xd2;
208 printk(KERN_INFO "Timebase clock is Pulsar chip\n");
209 } else if (device_is_compatible(cc, "cy28508")) {
210 pmac_tb_freeze = smp_core99_cypress_tb_freeze;
211 printk(KERN_INFO "Timebase clock is Cypress chip\n");
212 }
213 break;
214 case 0xd4:
215 pmac_tb_freeze = smp_core99_pulsar_tb_freeze;
216 pmac_tb_pulsar_addr = 0xd4;
217 printk(KERN_INFO "Timebase clock is Pulsar chip\n");
218 break;
219 }
220 if (pmac_tb_freeze != NULL) {
221 pmac_tb_clock_chip_host = p;
222 smp_ops->give_timebase = smp_core99_give_timebase;
223 smp_ops->take_timebase = smp_core99_take_timebase;
224 of_node_put(cc);
225 of_node_put(p);
226 break;
227 }
228 next:
229 of_node_put(p);
230 }
231
232 nohwsync:
233 mpic_request_ipis();
234
235 return ncpus;
236}
237
238static void __init smp_core99_kick_cpu(int nr)
239{
240 int save_vector, j;
241 unsigned long new_vector;
242 unsigned long flags;
243 volatile unsigned int *vector
244 = ((volatile unsigned int *)(KERNELBASE+0x100));
245
246 if (nr < 1 || nr > 3)
247 return;
248 if (ppc_md.progress) ppc_md.progress("smp_core99_kick_cpu", 0x346);
249
250 local_irq_save(flags);
251 local_irq_disable();
252
253 /* Save reset vector */
254 save_vector = *vector;
255
256 /* Setup fake reset vector that does
257 * b .pmac_secondary_start - KERNELBASE
258 */
259 switch(nr) {
260 case 1:
261 new_vector = (unsigned long)pmac_secondary_start_1;
262 break;
263 case 2:
264 new_vector = (unsigned long)pmac_secondary_start_2;
265 break;
266 case 3:
267 default:
268 new_vector = (unsigned long)pmac_secondary_start_3;
269 break;
270 }
271 *vector = 0x48000002 + (new_vector - KERNELBASE);
272
273 /* flush data cache and inval instruction cache */
274 flush_icache_range((unsigned long) vector, (unsigned long) vector + 4);
275
276 /* Put some life in our friend */
277 pmac_call_feature(PMAC_FTR_RESET_CPU, NULL, nr, 0);
278 paca[nr].cpu_start = 1;
279
280 /* FIXME: We wait a bit for the CPU to take the exception, I should
281 * instead wait for the entry code to set something for me. Well,
282 * ideally, all that crap will be done in prom.c and the CPU left
283 * in a RAM-based wait loop like CHRP.
284 */
285 for (j = 1; j < 1000000; j++)
286 mb();
287
288 /* Restore our exception vector */
289 *vector = save_vector;
290 flush_icache_range((unsigned long) vector, (unsigned long) vector + 4);
291
292 local_irq_restore(flags);
293 if (ppc_md.progress) ppc_md.progress("smp_core99_kick_cpu done", 0x347);
294}
295
296static void __init smp_core99_setup_cpu(int cpu_nr)
297{
298 /* Setup MPIC */
299 mpic_setup_this_cpu();
300
301 if (cpu_nr == 0) {
302 extern void g5_phy_disable_cpu1(void);
303
304 /* If we didn't start the second CPU, we must take
305 * it off the bus
306 */
307 if (num_online_cpus() < 2)
308 g5_phy_disable_cpu1();
309 if (ppc_md.progress) ppc_md.progress("smp_core99_setup_cpu 0 done", 0x349);
310 }
311}
312
313struct smp_ops_t core99_smp_ops __pmacdata = {
314 .message_pass = smp_mpic_message_pass,
315 .probe = smp_core99_probe,
316 .kick_cpu = smp_core99_kick_cpu,
317 .setup_cpu = smp_core99_setup_cpu,
318 .give_timebase = smp_generic_give_timebase,
319 .take_timebase = smp_generic_take_timebase,
320};
321
322void __init pmac_setup_smp(void)
323{
324 smp_ops = &core99_smp_ops;
325#ifdef CONFIG_HOTPLUG_CPU
326 smp_ops->cpu_enable = generic_cpu_enable;
327 smp_ops->cpu_disable = generic_cpu_disable;
328 smp_ops->cpu_die = generic_cpu_die;
329#endif
330}
diff --git a/arch/ppc64/kernel/pmac_time.c b/arch/ppc64/kernel/pmac_time.c
deleted file mode 100644
index 41bbb8c59697..000000000000
--- a/arch/ppc64/kernel/pmac_time.c
+++ /dev/null
@@ -1,195 +0,0 @@
1/*
2 * Support for periodic interrupts (100 per second) and for getting
3 * the current time from the RTC on Power Macintoshes.
4 *
5 * We use the decrementer register for our periodic interrupts.
6 *
7 * Paul Mackerras August 1996.
8 * Copyright (C) 1996 Paul Mackerras.
9 * Copyright (C) 2003-2005 Benjamin Herrenschmidt.
10 *
11 */
12#include <linux/config.h>
13#include <linux/errno.h>
14#include <linux/sched.h>
15#include <linux/kernel.h>
16#include <linux/param.h>
17#include <linux/string.h>
18#include <linux/mm.h>
19#include <linux/init.h>
20#include <linux/time.h>
21#include <linux/adb.h>
22#include <linux/pmu.h>
23#include <linux/interrupt.h>
24
25#include <asm/sections.h>
26#include <asm/prom.h>
27#include <asm/system.h>
28#include <asm/io.h>
29#include <asm/pgtable.h>
30#include <asm/machdep.h>
31#include <asm/time.h>
32#include <asm/nvram.h>
33#include <asm/smu.h>
34
35#undef DEBUG
36
37#ifdef DEBUG
38#define DBG(x...) printk(x)
39#else
40#define DBG(x...)
41#endif
42
43/* Apparently the RTC stores seconds since 1 Jan 1904 */
44#define RTC_OFFSET 2082844800
45
46/*
47 * Calibrate the decrementer frequency with the VIA timer 1.
48 */
49#define VIA_TIMER_FREQ_6 4700000 /* time 1 frequency * 6 */
50
51extern struct timezone sys_tz;
52extern void to_tm(int tim, struct rtc_time * tm);
53
54void __pmac pmac_get_rtc_time(struct rtc_time *tm)
55{
56 switch(sys_ctrler) {
57#ifdef CONFIG_ADB_PMU
58 case SYS_CTRLER_PMU: {
59 /* TODO: Move that to a function in the PMU driver */
60 struct adb_request req;
61 unsigned int now;
62
63 if (pmu_request(&req, NULL, 1, PMU_READ_RTC) < 0)
64 return;
65 pmu_wait_complete(&req);
66 if (req.reply_len != 4)
67 printk(KERN_ERR "pmac_get_rtc_time: PMU returned a %d"
68 " bytes reply\n", req.reply_len);
69 now = (req.reply[0] << 24) + (req.reply[1] << 16)
70 + (req.reply[2] << 8) + req.reply[3];
71 DBG("get: %u -> %u\n", (int)now, (int)(now - RTC_OFFSET));
72 now -= RTC_OFFSET;
73
74 to_tm(now, tm);
75 tm->tm_year -= 1900;
76 tm->tm_mon -= 1;
77
78 DBG("-> tm_mday: %d, tm_mon: %d, tm_year: %d, %d:%02d:%02d\n",
79 tm->tm_mday, tm->tm_mon, tm->tm_year,
80 tm->tm_hour, tm->tm_min, tm->tm_sec);
81 break;
82 }
83#endif /* CONFIG_ADB_PMU */
84
85#ifdef CONFIG_PMAC_SMU
86 case SYS_CTRLER_SMU:
87 smu_get_rtc_time(tm, 1);
88 break;
89#endif /* CONFIG_PMAC_SMU */
90 default:
91 ;
92 }
93}
94
95int __pmac pmac_set_rtc_time(struct rtc_time *tm)
96{
97 switch(sys_ctrler) {
98#ifdef CONFIG_ADB_PMU
99 case SYS_CTRLER_PMU: {
100 /* TODO: Move that to a function in the PMU driver */
101 struct adb_request req;
102 unsigned int nowtime;
103
104 DBG("set: tm_mday: %d, tm_mon: %d, tm_year: %d,"
105 " %d:%02d:%02d\n",
106 tm->tm_mday, tm->tm_mon, tm->tm_year,
107 tm->tm_hour, tm->tm_min, tm->tm_sec);
108
109 nowtime = mktime(tm->tm_year + 1900, tm->tm_mon + 1,
110 tm->tm_mday, tm->tm_hour, tm->tm_min,
111 tm->tm_sec);
112
113 DBG("-> %u -> %u\n", (int)nowtime,
114 (int)(nowtime + RTC_OFFSET));
115 nowtime += RTC_OFFSET;
116
117 if (pmu_request(&req, NULL, 5, PMU_SET_RTC,
118 nowtime >> 24, nowtime >> 16,
119 nowtime >> 8, nowtime) < 0)
120 return -ENXIO;
121 pmu_wait_complete(&req);
122 if (req.reply_len != 0)
123 printk(KERN_ERR "pmac_set_rtc_time: PMU returned a %d"
124 " bytes reply\n", req.reply_len);
125 return 0;
126 }
127#endif /* CONFIG_ADB_PMU */
128
129#ifdef CONFIG_PMAC_SMU
130 case SYS_CTRLER_SMU:
131 return smu_set_rtc_time(tm, 1);
132#endif /* CONFIG_PMAC_SMU */
133 default:
134 return -ENODEV;
135 }
136}
137
138void __init pmac_get_boot_time(struct rtc_time *tm)
139{
140 pmac_get_rtc_time(tm);
141
142#ifdef disabled__CONFIG_NVRAM
143 s32 delta = 0;
144 int dst;
145
146 delta = ((s32)pmac_xpram_read(PMAC_XPRAM_MACHINE_LOC + 0x9)) << 16;
147 delta |= ((s32)pmac_xpram_read(PMAC_XPRAM_MACHINE_LOC + 0xa)) << 8;
148 delta |= pmac_xpram_read(PMAC_XPRAM_MACHINE_LOC + 0xb);
149 if (delta & 0x00800000UL)
150 delta |= 0xFF000000UL;
151 dst = ((pmac_xpram_read(PMAC_XPRAM_MACHINE_LOC + 0x8) & 0x80) != 0);
152 printk("GMT Delta read from XPRAM: %d minutes, DST: %s\n", delta/60,
153 dst ? "on" : "off");
154#endif
155}
156
157/*
158 * Query the OF and get the decr frequency.
159 * FIXME: merge this with generic_calibrate_decr
160 */
161void __init pmac_calibrate_decr(void)
162{
163 struct device_node *cpu;
164 unsigned int freq, *fp;
165 struct div_result divres;
166
167 /*
168 * The cpu node should have a timebase-frequency property
169 * to tell us the rate at which the decrementer counts.
170 */
171 cpu = find_type_devices("cpu");
172 if (cpu == 0)
173 panic("can't find cpu node in time_init");
174 fp = (unsigned int *) get_property(cpu, "timebase-frequency", NULL);
175 if (fp == 0)
176 panic("can't get cpu timebase frequency");
177 freq = *fp;
178 printk("time_init: decrementer frequency = %u.%.6u MHz\n",
179 freq/1000000, freq%1000000);
180 tb_ticks_per_jiffy = freq / HZ;
181 tb_ticks_per_sec = tb_ticks_per_jiffy * HZ;
182 tb_ticks_per_usec = freq / 1000000;
183 tb_to_us = mulhwu_scale_factor(freq, 1000000);
184 div128_by_32( 1024*1024, 0, tb_ticks_per_sec, &divres );
185 tb_to_xs = divres.result_low;
186 ppc_tb_freq = freq;
187
188 fp = (unsigned int *)get_property(cpu, "clock-frequency", NULL);
189 if (fp == 0)
190 panic("can't get cpu processor frequency");
191 ppc_proc_freq = *fp;
192
193 setup_default_decr();
194}
195
diff --git a/arch/ppc64/kernel/ppc_ksyms.c b/arch/ppc64/kernel/ppc_ksyms.c
index 705742f4eec6..84006e26342c 100644
--- a/arch/ppc64/kernel/ppc_ksyms.c
+++ b/arch/ppc64/kernel/ppc_ksyms.c
@@ -19,7 +19,6 @@
19#include <asm/hw_irq.h> 19#include <asm/hw_irq.h>
20#include <asm/abs_addr.h> 20#include <asm/abs_addr.h>
21#include <asm/cacheflush.h> 21#include <asm/cacheflush.h>
22#include <asm/iSeries/HvCallSc.h>
23 22
24EXPORT_SYMBOL(strcpy); 23EXPORT_SYMBOL(strcpy);
25EXPORT_SYMBOL(strncpy); 24EXPORT_SYMBOL(strncpy);
@@ -46,17 +45,6 @@ EXPORT_SYMBOL(__strnlen_user);
46 45
47EXPORT_SYMBOL(reloc_offset); 46EXPORT_SYMBOL(reloc_offset);
48 47
49#ifdef CONFIG_PPC_ISERIES
50EXPORT_SYMBOL(HvCall0);
51EXPORT_SYMBOL(HvCall1);
52EXPORT_SYMBOL(HvCall2);
53EXPORT_SYMBOL(HvCall3);
54EXPORT_SYMBOL(HvCall4);
55EXPORT_SYMBOL(HvCall5);
56EXPORT_SYMBOL(HvCall6);
57EXPORT_SYMBOL(HvCall7);
58#endif
59
60EXPORT_SYMBOL(_insb); 48EXPORT_SYMBOL(_insb);
61EXPORT_SYMBOL(_outsb); 49EXPORT_SYMBOL(_outsb);
62EXPORT_SYMBOL(_insw); 50EXPORT_SYMBOL(_insw);
@@ -77,14 +65,6 @@ EXPORT_SYMBOL(giveup_altivec);
77EXPORT_SYMBOL(__flush_icache_range); 65EXPORT_SYMBOL(__flush_icache_range);
78EXPORT_SYMBOL(flush_dcache_range); 66EXPORT_SYMBOL(flush_dcache_range);
79 67
80#ifdef CONFIG_SMP
81#ifdef CONFIG_PPC_ISERIES
82EXPORT_SYMBOL(local_get_flags);
83EXPORT_SYMBOL(local_irq_disable);
84EXPORT_SYMBOL(local_irq_restore);
85#endif
86#endif
87
88EXPORT_SYMBOL(memcpy); 68EXPORT_SYMBOL(memcpy);
89EXPORT_SYMBOL(memset); 69EXPORT_SYMBOL(memset);
90EXPORT_SYMBOL(memmove); 70EXPORT_SYMBOL(memmove);
diff --git a/arch/ppc64/kernel/prom.c b/arch/ppc64/kernel/prom.c
index 7035deb6de92..97bfceb5353b 100644
--- a/arch/ppc64/kernel/prom.c
+++ b/arch/ppc64/kernel/prom.c
@@ -46,7 +46,6 @@
46#include <asm/pgtable.h> 46#include <asm/pgtable.h>
47#include <asm/pci.h> 47#include <asm/pci.h>
48#include <asm/iommu.h> 48#include <asm/iommu.h>
49#include <asm/bootinfo.h>
50#include <asm/ppcdebug.h> 49#include <asm/ppcdebug.h>
51#include <asm/btext.h> 50#include <asm/btext.h>
52#include <asm/sections.h> 51#include <asm/sections.h>
@@ -78,11 +77,14 @@ typedef int interpret_func(struct device_node *, unsigned long *,
78extern struct rtas_t rtas; 77extern struct rtas_t rtas;
79extern struct lmb lmb; 78extern struct lmb lmb;
80extern unsigned long klimit; 79extern unsigned long klimit;
80extern unsigned long memory_limit;
81 81
82static int __initdata dt_root_addr_cells; 82static int __initdata dt_root_addr_cells;
83static int __initdata dt_root_size_cells; 83static int __initdata dt_root_size_cells;
84static int __initdata iommu_is_off; 84static int __initdata iommu_is_off;
85int __initdata iommu_force_on; 85int __initdata iommu_force_on;
86unsigned long tce_alloc_start, tce_alloc_end;
87
86typedef u32 cell_t; 88typedef u32 cell_t;
87 89
88#if 0 90#if 0
@@ -1063,7 +1065,6 @@ static int __init early_init_dt_scan_chosen(unsigned long node,
1063{ 1065{
1064 u32 *prop; 1066 u32 *prop;
1065 u64 *prop64; 1067 u64 *prop64;
1066 extern unsigned long memory_limit, tce_alloc_start, tce_alloc_end;
1067 1068
1068 DBG("search \"chosen\", depth: %d, uname: %s\n", depth, uname); 1069 DBG("search \"chosen\", depth: %d, uname: %s\n", depth, uname);
1069 1070
@@ -1237,7 +1238,7 @@ void __init early_init_devtree(void *params)
1237 lmb_init(); 1238 lmb_init();
1238 scan_flat_dt(early_init_dt_scan_root, NULL); 1239 scan_flat_dt(early_init_dt_scan_root, NULL);
1239 scan_flat_dt(early_init_dt_scan_memory, NULL); 1240 scan_flat_dt(early_init_dt_scan_memory, NULL);
1240 lmb_enforce_memory_limit(); 1241 lmb_enforce_memory_limit(memory_limit);
1241 lmb_analyze(); 1242 lmb_analyze();
1242 systemcfg->physicalMemorySize = lmb_phys_mem_size(); 1243 systemcfg->physicalMemorySize = lmb_phys_mem_size();
1243 lmb_reserve(0, __pa(klimit)); 1244 lmb_reserve(0, __pa(klimit));
diff --git a/arch/ppc64/kernel/prom_init.c b/arch/ppc64/kernel/prom_init.c
index f252670874a4..69924ba4d7d9 100644
--- a/arch/ppc64/kernel/prom_init.c
+++ b/arch/ppc64/kernel/prom_init.c
@@ -44,7 +44,6 @@
44#include <asm/pgtable.h> 44#include <asm/pgtable.h>
45#include <asm/pci.h> 45#include <asm/pci.h>
46#include <asm/iommu.h> 46#include <asm/iommu.h>
47#include <asm/bootinfo.h>
48#include <asm/ppcdebug.h> 47#include <asm/ppcdebug.h>
49#include <asm/btext.h> 48#include <asm/btext.h>
50#include <asm/sections.h> 49#include <asm/sections.h>
diff --git a/arch/ppc64/kernel/ptrace.c b/arch/ppc64/kernel/ptrace.c
deleted file mode 100644
index b1c044ca5756..000000000000
--- a/arch/ppc64/kernel/ptrace.c
+++ /dev/null
@@ -1,363 +0,0 @@
1/*
2 * linux/arch/ppc64/kernel/ptrace.c
3 *
4 * PowerPC version
5 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
6 *
7 * Derived from "arch/m68k/kernel/ptrace.c"
8 * Copyright (C) 1994 by Hamish Macdonald
9 * Taken from linux/kernel/ptrace.c and modified for M680x0.
10 * linux/kernel/ptrace.c is by Ross Biro 1/23/92, edited by Linus Torvalds
11 *
12 * Modified by Cort Dougan (cort@hq.fsmlabs.com)
13 * and Paul Mackerras (paulus@linuxcare.com.au).
14 *
15 * This file is subject to the terms and conditions of the GNU General
16 * Public License. See the file README.legal in the main directory of
17 * this archive for more details.
18 */
19
20#include <linux/config.h>
21#include <linux/kernel.h>
22#include <linux/sched.h>
23#include <linux/mm.h>
24#include <linux/smp.h>
25#include <linux/smp_lock.h>
26#include <linux/errno.h>
27#include <linux/ptrace.h>
28#include <linux/user.h>
29#include <linux/security.h>
30#include <linux/audit.h>
31#include <linux/seccomp.h>
32#include <linux/signal.h>
33
34#include <asm/uaccess.h>
35#include <asm/page.h>
36#include <asm/pgtable.h>
37#include <asm/system.h>
38#include <asm/ptrace-common.h>
39
40/*
41 * does not yet catch signals sent when the child dies.
42 * in exit.c or in signal.c.
43 */
44
45/*
46 * Called by kernel/ptrace.c when detaching..
47 *
48 * Make sure single step bits etc are not set.
49 */
50void ptrace_disable(struct task_struct *child)
51{
52 /* make sure the single step bit is not set. */
53 clear_single_step(child);
54}
55
56int sys_ptrace(long request, long pid, long addr, long data)
57{
58 struct task_struct *child;
59 int ret = -EPERM;
60
61 lock_kernel();
62 if (request == PTRACE_TRACEME) {
63 /* are we already being traced? */
64 if (current->ptrace & PT_PTRACED)
65 goto out;
66 ret = security_ptrace(current->parent, current);
67 if (ret)
68 goto out;
69 /* set the ptrace bit in the process flags. */
70 current->ptrace |= PT_PTRACED;
71 ret = 0;
72 goto out;
73 }
74 ret = -ESRCH;
75 read_lock(&tasklist_lock);
76 child = find_task_by_pid(pid);
77 if (child)
78 get_task_struct(child);
79 read_unlock(&tasklist_lock);
80 if (!child)
81 goto out;
82
83 ret = -EPERM;
84 if (pid == 1) /* you may not mess with init */
85 goto out_tsk;
86
87 if (request == PTRACE_ATTACH) {
88 ret = ptrace_attach(child);
89 goto out_tsk;
90 }
91
92 ret = ptrace_check_attach(child, request == PTRACE_KILL);
93 if (ret < 0)
94 goto out_tsk;
95
96 switch (request) {
97 /* when I and D space are separate, these will need to be fixed. */
98 case PTRACE_PEEKTEXT: /* read word at location addr. */
99 case PTRACE_PEEKDATA: {
100 unsigned long tmp;
101 int copied;
102
103 copied = access_process_vm(child, addr, &tmp, sizeof(tmp), 0);
104 ret = -EIO;
105 if (copied != sizeof(tmp))
106 break;
107 ret = put_user(tmp,(unsigned long __user *) data);
108 break;
109 }
110
111 /* read the word at location addr in the USER area. */
112 case PTRACE_PEEKUSR: {
113 unsigned long index;
114 unsigned long tmp;
115
116 ret = -EIO;
117 /* convert to index and check */
118 index = (unsigned long) addr >> 3;
119 if ((addr & 7) || (index > PT_FPSCR))
120 break;
121
122 if (index < PT_FPR0) {
123 tmp = get_reg(child, (int)index);
124 } else {
125 flush_fp_to_thread(child);
126 tmp = ((unsigned long *)child->thread.fpr)[index - PT_FPR0];
127 }
128 ret = put_user(tmp,(unsigned long __user *) data);
129 break;
130 }
131
132 /* If I and D space are separate, this will have to be fixed. */
133 case PTRACE_POKETEXT: /* write the word at location addr. */
134 case PTRACE_POKEDATA:
135 ret = 0;
136 if (access_process_vm(child, addr, &data, sizeof(data), 1)
137 == sizeof(data))
138 break;
139 ret = -EIO;
140 break;
141
142 /* write the word at location addr in the USER area */
143 case PTRACE_POKEUSR: {
144 unsigned long index;
145
146 ret = -EIO;
147 /* convert to index and check */
148 index = (unsigned long) addr >> 3;
149 if ((addr & 7) || (index > PT_FPSCR))
150 break;
151
152 if (index == PT_ORIG_R3)
153 break;
154 if (index < PT_FPR0) {
155 ret = put_reg(child, index, data);
156 } else {
157 flush_fp_to_thread(child);
158 ((unsigned long *)child->thread.fpr)[index - PT_FPR0] = data;
159 ret = 0;
160 }
161 break;
162 }
163
164 case PTRACE_SYSCALL: /* continue and stop at next (return from) syscall */
165 case PTRACE_CONT: { /* restart after signal. */
166 ret = -EIO;
167 if (!valid_signal(data))
168 break;
169 if (request == PTRACE_SYSCALL)
170 set_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
171 else
172 clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
173 child->exit_code = data;
174 /* make sure the single step bit is not set. */
175 clear_single_step(child);
176 wake_up_process(child);
177 ret = 0;
178 break;
179 }
180
181 /*
182 * make the child exit. Best I can do is send it a sigkill.
183 * perhaps it should be put in the status that it wants to
184 * exit.
185 */
186 case PTRACE_KILL: {
187 ret = 0;
188 if (child->exit_state == EXIT_ZOMBIE) /* already dead */
189 break;
190 child->exit_code = SIGKILL;
191 /* make sure the single step bit is not set. */
192 clear_single_step(child);
193 wake_up_process(child);
194 break;
195 }
196
197 case PTRACE_SINGLESTEP: { /* set the trap flag. */
198 ret = -EIO;
199 if (!valid_signal(data))
200 break;
201 clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
202 set_single_step(child);
203 child->exit_code = data;
204 /* give it a chance to run. */
205 wake_up_process(child);
206 ret = 0;
207 break;
208 }
209
210 case PTRACE_GET_DEBUGREG: {
211 ret = -EINVAL;
212 /* We only support one DABR and no IABRS at the moment */
213 if (addr > 0)
214 break;
215 ret = put_user(child->thread.dabr,
216 (unsigned long __user *)data);
217 break;
218 }
219
220 case PTRACE_SET_DEBUGREG:
221 ret = ptrace_set_debugreg(child, addr, data);
222 break;
223
224 case PTRACE_DETACH:
225 ret = ptrace_detach(child, data);
226 break;
227
228 case PPC_PTRACE_GETREGS: { /* Get GPRs 0 - 31. */
229 int i;
230 unsigned long *reg = &((unsigned long *)child->thread.regs)[0];
231 unsigned long __user *tmp = (unsigned long __user *)addr;
232
233 for (i = 0; i < 32; i++) {
234 ret = put_user(*reg, tmp);
235 if (ret)
236 break;
237 reg++;
238 tmp++;
239 }
240 break;
241 }
242
243 case PPC_PTRACE_SETREGS: { /* Set GPRs 0 - 31. */
244 int i;
245 unsigned long *reg = &((unsigned long *)child->thread.regs)[0];
246 unsigned long __user *tmp = (unsigned long __user *)addr;
247
248 for (i = 0; i < 32; i++) {
249 ret = get_user(*reg, tmp);
250 if (ret)
251 break;
252 reg++;
253 tmp++;
254 }
255 break;
256 }
257
258 case PPC_PTRACE_GETFPREGS: { /* Get FPRs 0 - 31. */
259 int i;
260 unsigned long *reg = &((unsigned long *)child->thread.fpr)[0];
261 unsigned long __user *tmp = (unsigned long __user *)addr;
262
263 flush_fp_to_thread(child);
264
265 for (i = 0; i < 32; i++) {
266 ret = put_user(*reg, tmp);
267 if (ret)
268 break;
269 reg++;
270 tmp++;
271 }
272 break;
273 }
274
275 case PPC_PTRACE_SETFPREGS: { /* Get FPRs 0 - 31. */
276 int i;
277 unsigned long *reg = &((unsigned long *)child->thread.fpr)[0];
278 unsigned long __user *tmp = (unsigned long __user *)addr;
279
280 flush_fp_to_thread(child);
281
282 for (i = 0; i < 32; i++) {
283 ret = get_user(*reg, tmp);
284 if (ret)
285 break;
286 reg++;
287 tmp++;
288 }
289 break;
290 }
291
292#ifdef CONFIG_ALTIVEC
293 case PTRACE_GETVRREGS:
294 /* Get the child altivec register state. */
295 flush_altivec_to_thread(child);
296 ret = get_vrregs((unsigned long __user *)data, child);
297 break;
298
299 case PTRACE_SETVRREGS:
300 /* Set the child altivec register state. */
301 flush_altivec_to_thread(child);
302 ret = set_vrregs(child, (unsigned long __user *)data);
303 break;
304#endif
305
306 default:
307 ret = ptrace_request(child, request, addr, data);
308 break;
309 }
310out_tsk:
311 put_task_struct(child);
312out:
313 unlock_kernel();
314 return ret;
315}
316
317static void do_syscall_trace(void)
318{
319 /* the 0x80 provides a way for the tracing parent to distinguish
320 between a syscall stop and SIGTRAP delivery */
321 ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD)
322 ? 0x80 : 0));
323
324 /*
325 * this isn't the same as continuing with a signal, but it will do
326 * for normal use. strace only continues with a signal if the
327 * stopping signal is not SIGTRAP. -brl
328 */
329 if (current->exit_code) {
330 send_sig(current->exit_code, current, 1);
331 current->exit_code = 0;
332 }
333}
334
335void do_syscall_trace_enter(struct pt_regs *regs)
336{
337 secure_computing(regs->gpr[0]);
338
339 if (test_thread_flag(TIF_SYSCALL_TRACE)
340 && (current->ptrace & PT_PTRACED))
341 do_syscall_trace();
342
343 if (unlikely(current->audit_context))
344 audit_syscall_entry(current,
345 test_thread_flag(TIF_32BIT)?AUDIT_ARCH_PPC:AUDIT_ARCH_PPC64,
346 regs->gpr[0],
347 regs->gpr[3], regs->gpr[4],
348 regs->gpr[5], regs->gpr[6]);
349
350}
351
352void do_syscall_trace_leave(struct pt_regs *regs)
353{
354 if (unlikely(current->audit_context))
355 audit_syscall_exit(current,
356 (regs->ccr&0x1000)?AUDITSC_FAILURE:AUDITSC_SUCCESS,
357 regs->result);
358
359 if ((test_thread_flag(TIF_SYSCALL_TRACE)
360 || test_thread_flag(TIF_SINGLESTEP))
361 && (current->ptrace & PT_PTRACED))
362 do_syscall_trace();
363}
diff --git a/arch/ppc64/kernel/rtas-proc.c b/arch/ppc64/kernel/rtas-proc.c
index 1f3ff860fdf0..5bdd5b079d96 100644
--- a/arch/ppc64/kernel/rtas-proc.c
+++ b/arch/ppc64/kernel/rtas-proc.c
@@ -23,6 +23,7 @@
23#include <linux/init.h> 23#include <linux/init.h>
24#include <linux/seq_file.h> 24#include <linux/seq_file.h>
25#include <linux/bitops.h> 25#include <linux/bitops.h>
26#include <linux/rtc.h>
26 27
27#include <asm/uaccess.h> 28#include <asm/uaccess.h>
28#include <asm/processor.h> 29#include <asm/processor.h>
diff --git a/arch/ppc64/kernel/rtas_pci.c b/arch/ppc64/kernel/rtas_pci.c
index 4a9719b48abe..3ad15c90fbbd 100644
--- a/arch/ppc64/kernel/rtas_pci.c
+++ b/arch/ppc64/kernel/rtas_pci.c
@@ -38,9 +38,8 @@
38#include <asm/pci-bridge.h> 38#include <asm/pci-bridge.h>
39#include <asm/iommu.h> 39#include <asm/iommu.h>
40#include <asm/rtas.h> 40#include <asm/rtas.h>
41 41#include <asm/mpic.h>
42#include "mpic.h" 42#include <asm/ppc-pci.h>
43#include "pci.h"
44 43
45/* RTAS tokens */ 44/* RTAS tokens */
46static int read_pci_config; 45static int read_pci_config;
@@ -401,7 +400,7 @@ unsigned long __init find_and_init_phbs(void)
401 if (!phb) 400 if (!phb)
402 continue; 401 continue;
403 402
404 pci_process_bridge_OF_ranges(phb, node); 403 pci_process_bridge_OF_ranges(phb, node, 0);
405 pci_setup_phb_io(phb, index == 0); 404 pci_setup_phb_io(phb, index == 0);
406#ifdef CONFIG_PPC_PSERIES 405#ifdef CONFIG_PPC_PSERIES
407 if (ppc64_interrupt_controller == IC_OPEN_PIC && pSeries_mpic) { 406 if (ppc64_interrupt_controller == IC_OPEN_PIC && pSeries_mpic) {
@@ -451,7 +450,7 @@ struct pci_controller * __devinit init_phb_dynamic(struct device_node *dn)
451 if (!phb) 450 if (!phb)
452 return NULL; 451 return NULL;
453 452
454 pci_process_bridge_OF_ranges(phb, dn); 453 pci_process_bridge_OF_ranges(phb, dn, primary);
455 454
456 pci_setup_phb_io_dynamic(phb, primary); 455 pci_setup_phb_io_dynamic(phb, primary);
457 of_node_put(root); 456 of_node_put(root);
diff --git a/arch/ppc64/kernel/rtc.c b/arch/ppc64/kernel/rtc.c
index 6ff52bc61325..79e7ed2858dd 100644
--- a/arch/ppc64/kernel/rtc.c
+++ b/arch/ppc64/kernel/rtc.c
@@ -43,11 +43,8 @@
43#include <asm/time.h> 43#include <asm/time.h>
44#include <asm/rtas.h> 44#include <asm/rtas.h>
45 45
46#include <asm/iSeries/mf.h>
47#include <asm/machdep.h> 46#include <asm/machdep.h>
48 47
49extern int piranha_simulator;
50
51/* 48/*
52 * We sponge a minor off of the misc major. No need slurping 49 * We sponge a minor off of the misc major. No need slurping
53 * up another valuable major dev number for this. If you add 50 * up another valuable major dev number for this. If you add
@@ -265,44 +262,10 @@ static int rtc_read_proc(char *page, char **start, off_t off,
265 return len; 262 return len;
266} 263}
267 264
268#ifdef CONFIG_PPC_ISERIES
269/*
270 * Get the RTC from the virtual service processor
271 * This requires flowing LpEvents to the primary partition
272 */
273void iSeries_get_rtc_time(struct rtc_time *rtc_tm)
274{
275 if (piranha_simulator)
276 return;
277
278 mf_get_rtc(rtc_tm);
279 rtc_tm->tm_mon--;
280}
281
282/*
283 * Set the RTC in the virtual service processor
284 * This requires flowing LpEvents to the primary partition
285 */
286int iSeries_set_rtc_time(struct rtc_time *tm)
287{
288 mf_set_rtc(tm);
289 return 0;
290}
291
292void iSeries_get_boot_time(struct rtc_time *tm)
293{
294 if ( piranha_simulator )
295 return;
296
297 mf_get_boot_rtc(tm);
298 tm->tm_mon -= 1;
299}
300#endif
301
302#ifdef CONFIG_PPC_RTAS 265#ifdef CONFIG_PPC_RTAS
303#define MAX_RTC_WAIT 5000 /* 5 sec */ 266#define MAX_RTC_WAIT 5000 /* 5 sec */
304#define RTAS_CLOCK_BUSY (-2) 267#define RTAS_CLOCK_BUSY (-2)
305void rtas_get_boot_time(struct rtc_time *rtc_tm) 268unsigned long rtas_get_boot_time(void)
306{ 269{
307 int ret[8]; 270 int ret[8];
308 int error, wait_time; 271 int error, wait_time;
@@ -322,15 +285,10 @@ void rtas_get_boot_time(struct rtc_time *rtc_tm)
322 if (error != 0 && printk_ratelimit()) { 285 if (error != 0 && printk_ratelimit()) {
323 printk(KERN_WARNING "error: reading the clock failed (%d)\n", 286 printk(KERN_WARNING "error: reading the clock failed (%d)\n",
324 error); 287 error);
325 return; 288 return 0;
326 } 289 }
327 290
328 rtc_tm->tm_sec = ret[5]; 291 return mktime(ret[0], ret[1], ret[2], ret[3], ret[4], ret[5]);
329 rtc_tm->tm_min = ret[4];
330 rtc_tm->tm_hour = ret[3];
331 rtc_tm->tm_mday = ret[2];
332 rtc_tm->tm_mon = ret[1] - 1;
333 rtc_tm->tm_year = ret[0] - 1900;
334} 292}
335 293
336/* NOTE: get_rtc_time will get an error if executed in interrupt context 294/* NOTE: get_rtc_time will get an error if executed in interrupt context
diff --git a/arch/ppc64/kernel/signal.c b/arch/ppc64/kernel/signal.c
index 347112cca3c0..ec9d0984b6a0 100644
--- a/arch/ppc64/kernel/signal.c
+++ b/arch/ppc64/kernel/signal.c
@@ -133,7 +133,7 @@ static long setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs,
133 flush_fp_to_thread(current); 133 flush_fp_to_thread(current);
134 134
135 /* Make sure signal doesn't get spurrious FP exceptions */ 135 /* Make sure signal doesn't get spurrious FP exceptions */
136 current->thread.fpscr = 0; 136 current->thread.fpscr.val = 0;
137 137
138#ifdef CONFIG_ALTIVEC 138#ifdef CONFIG_ALTIVEC
139 err |= __put_user(v_regs, &sc->v_regs); 139 err |= __put_user(v_regs, &sc->v_regs);
diff --git a/arch/ppc64/kernel/smp.c b/arch/ppc64/kernel/smp.c
index 793b562da653..017c12919832 100644
--- a/arch/ppc64/kernel/smp.c
+++ b/arch/ppc64/kernel/smp.c
@@ -45,8 +45,7 @@
45#include <asm/cputable.h> 45#include <asm/cputable.h>
46#include <asm/system.h> 46#include <asm/system.h>
47#include <asm/abs_addr.h> 47#include <asm/abs_addr.h>
48 48#include <asm/mpic.h>
49#include "mpic.h"
50 49
51#ifdef DEBUG 50#ifdef DEBUG
52#define DBG(fmt...) udbg_printf(fmt) 51#define DBG(fmt...) udbg_printf(fmt)
@@ -70,28 +69,6 @@ void smp_call_function_interrupt(void);
70int smt_enabled_at_boot = 1; 69int smt_enabled_at_boot = 1;
71 70
72#ifdef CONFIG_MPIC 71#ifdef CONFIG_MPIC
73void smp_mpic_message_pass(int target, int msg)
74{
75 /* make sure we're sending something that translates to an IPI */
76 if ( msg > 0x3 ){
77 printk("SMP %d: smp_message_pass: unknown msg %d\n",
78 smp_processor_id(), msg);
79 return;
80 }
81 switch ( target )
82 {
83 case MSG_ALL:
84 mpic_send_ipi(msg, 0xffffffff);
85 break;
86 case MSG_ALL_BUT_SELF:
87 mpic_send_ipi(msg, 0xffffffff & ~(1 << smp_processor_id()));
88 break;
89 default:
90 mpic_send_ipi(msg, 1 << target);
91 break;
92 }
93}
94
95int __init smp_mpic_probe(void) 72int __init smp_mpic_probe(void)
96{ 73{
97 int nr_cpus; 74 int nr_cpus;
@@ -128,21 +105,6 @@ void __devinit smp_generic_kick_cpu(int nr)
128 105
129#endif /* CONFIG_MPIC */ 106#endif /* CONFIG_MPIC */
130 107
131static void __init smp_space_timers(unsigned int max_cpus)
132{
133 int i;
134 unsigned long offset = tb_ticks_per_jiffy / max_cpus;
135 unsigned long previous_tb = paca[boot_cpuid].next_jiffy_update_tb;
136
137 for_each_cpu(i) {
138 if (i != boot_cpuid) {
139 paca[i].next_jiffy_update_tb =
140 previous_tb + offset;
141 previous_tb = paca[i].next_jiffy_update_tb;
142 }
143 }
144}
145
146void smp_message_recv(int msg, struct pt_regs *regs) 108void smp_message_recv(int msg, struct pt_regs *regs)
147{ 109{
148 switch(msg) { 110 switch(msg) {
diff --git a/arch/ppc64/kernel/traps.c b/arch/ppc64/kernel/traps.c
deleted file mode 100644
index 7467ae508e6e..000000000000
--- a/arch/ppc64/kernel/traps.c
+++ /dev/null
@@ -1,568 +0,0 @@
1/*
2 * linux/arch/ppc64/kernel/traps.c
3 *
4 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 *
11 * Modified by Cort Dougan (cort@cs.nmt.edu)
12 * and Paul Mackerras (paulus@cs.anu.edu.au)
13 */
14
15/*
16 * This file handles the architecture-dependent parts of hardware exceptions
17 */
18
19#include <linux/config.h>
20#include <linux/errno.h>
21#include <linux/sched.h>
22#include <linux/kernel.h>
23#include <linux/mm.h>
24#include <linux/stddef.h>
25#include <linux/unistd.h>
26#include <linux/slab.h>
27#include <linux/user.h>
28#include <linux/a.out.h>
29#include <linux/interrupt.h>
30#include <linux/init.h>
31#include <linux/module.h>
32#include <linux/delay.h>
33#include <linux/kprobes.h>
34#include <asm/kdebug.h>
35
36#include <asm/pgtable.h>
37#include <asm/uaccess.h>
38#include <asm/system.h>
39#include <asm/io.h>
40#include <asm/processor.h>
41#include <asm/ppcdebug.h>
42#include <asm/rtas.h>
43#include <asm/systemcfg.h>
44#include <asm/machdep.h>
45#include <asm/pmc.h>
46
47#ifdef CONFIG_DEBUGGER
48int (*__debugger)(struct pt_regs *regs);
49int (*__debugger_ipi)(struct pt_regs *regs);
50int (*__debugger_bpt)(struct pt_regs *regs);
51int (*__debugger_sstep)(struct pt_regs *regs);
52int (*__debugger_iabr_match)(struct pt_regs *regs);
53int (*__debugger_dabr_match)(struct pt_regs *regs);
54int (*__debugger_fault_handler)(struct pt_regs *regs);
55
56EXPORT_SYMBOL(__debugger);
57EXPORT_SYMBOL(__debugger_ipi);
58EXPORT_SYMBOL(__debugger_bpt);
59EXPORT_SYMBOL(__debugger_sstep);
60EXPORT_SYMBOL(__debugger_iabr_match);
61EXPORT_SYMBOL(__debugger_dabr_match);
62EXPORT_SYMBOL(__debugger_fault_handler);
63#endif
64
65struct notifier_block *ppc64_die_chain;
66static DEFINE_SPINLOCK(die_notifier_lock);
67
68int register_die_notifier(struct notifier_block *nb)
69{
70 int err = 0;
71 unsigned long flags;
72
73 spin_lock_irqsave(&die_notifier_lock, flags);
74 err = notifier_chain_register(&ppc64_die_chain, nb);
75 spin_unlock_irqrestore(&die_notifier_lock, flags);
76 return err;
77}
78
79/*
80 * Trap & Exception support
81 */
82
83static DEFINE_SPINLOCK(die_lock);
84
85int die(const char *str, struct pt_regs *regs, long err)
86{
87 static int die_counter;
88 int nl = 0;
89
90 if (debugger(regs))
91 return 1;
92
93 console_verbose();
94 spin_lock_irq(&die_lock);
95 bust_spinlocks(1);
96 printk("Oops: %s, sig: %ld [#%d]\n", str, err, ++die_counter);
97#ifdef CONFIG_PREEMPT
98 printk("PREEMPT ");
99 nl = 1;
100#endif
101#ifdef CONFIG_SMP
102 printk("SMP NR_CPUS=%d ", NR_CPUS);
103 nl = 1;
104#endif
105#ifdef CONFIG_DEBUG_PAGEALLOC
106 printk("DEBUG_PAGEALLOC ");
107 nl = 1;
108#endif
109#ifdef CONFIG_NUMA
110 printk("NUMA ");
111 nl = 1;
112#endif
113 switch(systemcfg->platform) {
114 case PLATFORM_PSERIES:
115 printk("PSERIES ");
116 nl = 1;
117 break;
118 case PLATFORM_PSERIES_LPAR:
119 printk("PSERIES LPAR ");
120 nl = 1;
121 break;
122 case PLATFORM_ISERIES_LPAR:
123 printk("ISERIES LPAR ");
124 nl = 1;
125 break;
126 case PLATFORM_POWERMAC:
127 printk("POWERMAC ");
128 nl = 1;
129 break;
130 case PLATFORM_BPA:
131 printk("BPA ");
132 nl = 1;
133 break;
134 }
135 if (nl)
136 printk("\n");
137 print_modules();
138 show_regs(regs);
139 bust_spinlocks(0);
140 spin_unlock_irq(&die_lock);
141
142 if (in_interrupt())
143 panic("Fatal exception in interrupt");
144
145 if (panic_on_oops) {
146 printk(KERN_EMERG "Fatal exception: panic in 5 seconds\n");
147 ssleep(5);
148 panic("Fatal exception");
149 }
150 do_exit(SIGSEGV);
151
152 return 0;
153}
154
155void _exception(int signr, struct pt_regs *regs, int code, unsigned long addr)
156{
157 siginfo_t info;
158
159 if (!user_mode(regs)) {
160 if (die("Exception in kernel mode", regs, signr))
161 return;
162 }
163
164 memset(&info, 0, sizeof(info));
165 info.si_signo = signr;
166 info.si_code = code;
167 info.si_addr = (void __user *) addr;
168 force_sig_info(signr, &info, current);
169}
170
171void system_reset_exception(struct pt_regs *regs)
172{
173 /* See if any machine dependent calls */
174 if (ppc_md.system_reset_exception)
175 ppc_md.system_reset_exception(regs);
176
177 die("System Reset", regs, 0);
178
179 /* Must die if the interrupt is not recoverable */
180 if (!(regs->msr & MSR_RI))
181 panic("Unrecoverable System Reset");
182
183 /* What should we do here? We could issue a shutdown or hard reset. */
184}
185
186void machine_check_exception(struct pt_regs *regs)
187{
188 int recover = 0;
189
190 /* See if any machine dependent calls */
191 if (ppc_md.machine_check_exception)
192 recover = ppc_md.machine_check_exception(regs);
193
194 if (recover)
195 return;
196
197 if (debugger_fault_handler(regs))
198 return;
199 die("Machine check", regs, 0);
200
201 /* Must die if the interrupt is not recoverable */
202 if (!(regs->msr & MSR_RI))
203 panic("Unrecoverable Machine check");
204}
205
206void unknown_exception(struct pt_regs *regs)
207{
208 printk("Bad trap at PC: %lx, SR: %lx, vector=%lx\n",
209 regs->nip, regs->msr, regs->trap);
210
211 _exception(SIGTRAP, regs, 0, 0);
212}
213
214void instruction_breakpoint_exception(struct pt_regs *regs)
215{
216 if (notify_die(DIE_IABR_MATCH, "iabr_match", regs, 5,
217 5, SIGTRAP) == NOTIFY_STOP)
218 return;
219 if (debugger_iabr_match(regs))
220 return;
221 _exception(SIGTRAP, regs, TRAP_BRKPT, regs->nip);
222}
223
224void __kprobes single_step_exception(struct pt_regs *regs)
225{
226 regs->msr &= ~MSR_SE; /* Turn off 'trace' bit */
227
228 if (notify_die(DIE_SSTEP, "single_step", regs, 5,
229 5, SIGTRAP) == NOTIFY_STOP)
230 return;
231 if (debugger_sstep(regs))
232 return;
233
234 _exception(SIGTRAP, regs, TRAP_TRACE, regs->nip);
235}
236
237/*
238 * After we have successfully emulated an instruction, we have to
239 * check if the instruction was being single-stepped, and if so,
240 * pretend we got a single-step exception. This was pointed out
241 * by Kumar Gala. -- paulus
242 */
243static inline void emulate_single_step(struct pt_regs *regs)
244{
245 if (regs->msr & MSR_SE)
246 single_step_exception(regs);
247}
248
249static void parse_fpe(struct pt_regs *regs)
250{
251 int code = 0;
252 unsigned long fpscr;
253
254 flush_fp_to_thread(current);
255
256 fpscr = current->thread.fpscr;
257
258 /* Invalid operation */
259 if ((fpscr & FPSCR_VE) && (fpscr & FPSCR_VX))
260 code = FPE_FLTINV;
261
262 /* Overflow */
263 else if ((fpscr & FPSCR_OE) && (fpscr & FPSCR_OX))
264 code = FPE_FLTOVF;
265
266 /* Underflow */
267 else if ((fpscr & FPSCR_UE) && (fpscr & FPSCR_UX))
268 code = FPE_FLTUND;
269
270 /* Divide by zero */
271 else if ((fpscr & FPSCR_ZE) && (fpscr & FPSCR_ZX))
272 code = FPE_FLTDIV;
273
274 /* Inexact result */
275 else if ((fpscr & FPSCR_XE) && (fpscr & FPSCR_XX))
276 code = FPE_FLTRES;
277
278 _exception(SIGFPE, regs, code, regs->nip);
279}
280
281/*
282 * Illegal instruction emulation support. Return non-zero if we can't
283 * emulate, or -EFAULT if the associated memory access caused an access
284 * fault. Return zero on success.
285 */
286
287#define INST_MFSPR_PVR 0x7c1f42a6
288#define INST_MFSPR_PVR_MASK 0xfc1fffff
289
290#define INST_DCBA 0x7c0005ec
291#define INST_DCBA_MASK 0x7c0007fe
292
293#define INST_MCRXR 0x7c000400
294#define INST_MCRXR_MASK 0x7c0007fe
295
296static int emulate_instruction(struct pt_regs *regs)
297{
298 unsigned int instword;
299
300 if (!user_mode(regs))
301 return -EINVAL;
302
303 CHECK_FULL_REGS(regs);
304
305 if (get_user(instword, (unsigned int __user *)(regs->nip)))
306 return -EFAULT;
307
308 /* Emulate the mfspr rD, PVR. */
309 if ((instword & INST_MFSPR_PVR_MASK) == INST_MFSPR_PVR) {
310 unsigned int rd;
311
312 rd = (instword >> 21) & 0x1f;
313 regs->gpr[rd] = mfspr(SPRN_PVR);
314 return 0;
315 }
316
317 /* Emulating the dcba insn is just a no-op. */
318 if ((instword & INST_DCBA_MASK) == INST_DCBA) {
319 static int warned;
320
321 if (!warned) {
322 printk(KERN_WARNING
323 "process %d (%s) uses obsolete 'dcba' insn\n",
324 current->pid, current->comm);
325 warned = 1;
326 }
327 return 0;
328 }
329
330 /* Emulate the mcrxr insn. */
331 if ((instword & INST_MCRXR_MASK) == INST_MCRXR) {
332 static int warned;
333 unsigned int shift;
334
335 if (!warned) {
336 printk(KERN_WARNING
337 "process %d (%s) uses obsolete 'mcrxr' insn\n",
338 current->pid, current->comm);
339 warned = 1;
340 }
341
342 shift = (instword >> 21) & 0x1c;
343 regs->ccr &= ~(0xf0000000 >> shift);
344 regs->ccr |= (regs->xer & 0xf0000000) >> shift;
345 regs->xer &= ~0xf0000000;
346 return 0;
347 }
348
349 return -EINVAL;
350}
351
352/*
353 * Look through the list of trap instructions that are used for BUG(),
354 * BUG_ON() and WARN_ON() and see if we hit one. At this point we know
355 * that the exception was caused by a trap instruction of some kind.
356 * Returns 1 if we should continue (i.e. it was a WARN_ON) or 0
357 * otherwise.
358 */
359extern struct bug_entry __start___bug_table[], __stop___bug_table[];
360
361#ifndef CONFIG_MODULES
362#define module_find_bug(x) NULL
363#endif
364
365struct bug_entry *find_bug(unsigned long bugaddr)
366{
367 struct bug_entry *bug;
368
369 for (bug = __start___bug_table; bug < __stop___bug_table; ++bug)
370 if (bugaddr == bug->bug_addr)
371 return bug;
372 return module_find_bug(bugaddr);
373}
374
375static int
376check_bug_trap(struct pt_regs *regs)
377{
378 struct bug_entry *bug;
379 unsigned long addr;
380
381 if (regs->msr & MSR_PR)
382 return 0; /* not in kernel */
383 addr = regs->nip; /* address of trap instruction */
384 if (addr < PAGE_OFFSET)
385 return 0;
386 bug = find_bug(regs->nip);
387 if (bug == NULL)
388 return 0;
389 if (bug->line & BUG_WARNING_TRAP) {
390 /* this is a WARN_ON rather than BUG/BUG_ON */
391 printk(KERN_ERR "Badness in %s at %s:%d\n",
392 bug->function, bug->file,
393 (unsigned int)bug->line & ~BUG_WARNING_TRAP);
394 show_stack(current, (void *)regs->gpr[1]);
395 return 1;
396 }
397 printk(KERN_CRIT "kernel BUG in %s at %s:%d!\n",
398 bug->function, bug->file, (unsigned int)bug->line);
399 return 0;
400}
401
402void __kprobes program_check_exception(struct pt_regs *regs)
403{
404 if (debugger_fault_handler(regs))
405 return;
406
407 if (regs->msr & 0x100000) {
408 /* IEEE FP exception */
409 parse_fpe(regs);
410 } else if (regs->msr & 0x20000) {
411 /* trap exception */
412
413 if (notify_die(DIE_BPT, "breakpoint", regs, 5,
414 5, SIGTRAP) == NOTIFY_STOP)
415 return;
416 if (debugger_bpt(regs))
417 return;
418
419 if (check_bug_trap(regs)) {
420 regs->nip += 4;
421 return;
422 }
423 _exception(SIGTRAP, regs, TRAP_BRKPT, regs->nip);
424
425 } else {
426 /* Privileged or illegal instruction; try to emulate it. */
427 switch (emulate_instruction(regs)) {
428 case 0:
429 regs->nip += 4;
430 emulate_single_step(regs);
431 break;
432
433 case -EFAULT:
434 _exception(SIGSEGV, regs, SEGV_MAPERR, regs->nip);
435 break;
436
437 default:
438 if (regs->msr & 0x40000)
439 /* priveleged */
440 _exception(SIGILL, regs, ILL_PRVOPC, regs->nip);
441 else
442 /* illegal */
443 _exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
444 break;
445 }
446 }
447}
448
449void kernel_fp_unavailable_exception(struct pt_regs *regs)
450{
451 printk(KERN_EMERG "Unrecoverable FP Unavailable Exception "
452 "%lx at %lx\n", regs->trap, regs->nip);
453 die("Unrecoverable FP Unavailable Exception", regs, SIGABRT);
454}
455
456void altivec_unavailable_exception(struct pt_regs *regs)
457{
458 if (user_mode(regs)) {
459 /* A user program has executed an altivec instruction,
460 but this kernel doesn't support altivec. */
461 _exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
462 return;
463 }
464 printk(KERN_EMERG "Unrecoverable VMX/Altivec Unavailable Exception "
465 "%lx at %lx\n", regs->trap, regs->nip);
466 die("Unrecoverable VMX/Altivec Unavailable Exception", regs, SIGABRT);
467}
468
469extern perf_irq_t perf_irq;
470
471void performance_monitor_exception(struct pt_regs *regs)
472{
473 perf_irq(regs);
474}
475
476void alignment_exception(struct pt_regs *regs)
477{
478 int fixed;
479
480 fixed = fix_alignment(regs);
481
482 if (fixed == 1) {
483 regs->nip += 4; /* skip over emulated instruction */
484 emulate_single_step(regs);
485 return;
486 }
487
488 /* Operand address was bad */
489 if (fixed == -EFAULT) {
490 if (user_mode(regs)) {
491 _exception(SIGSEGV, regs, SEGV_MAPERR, regs->dar);
492 } else {
493 /* Search exception table */
494 bad_page_fault(regs, regs->dar, SIGSEGV);
495 }
496
497 return;
498 }
499
500 _exception(SIGBUS, regs, BUS_ADRALN, regs->nip);
501}
502
503#ifdef CONFIG_ALTIVEC
504void altivec_assist_exception(struct pt_regs *regs)
505{
506 int err;
507 siginfo_t info;
508
509 if (!user_mode(regs)) {
510 printk(KERN_EMERG "VMX/Altivec assist exception in kernel mode"
511 " at %lx\n", regs->nip);
512 die("Kernel VMX/Altivec assist exception", regs, SIGILL);
513 }
514
515 flush_altivec_to_thread(current);
516
517 err = emulate_altivec(regs);
518 if (err == 0) {
519 regs->nip += 4; /* skip emulated instruction */
520 emulate_single_step(regs);
521 return;
522 }
523
524 if (err == -EFAULT) {
525 /* got an error reading the instruction */
526 info.si_signo = SIGSEGV;
527 info.si_errno = 0;
528 info.si_code = SEGV_MAPERR;
529 info.si_addr = (void __user *) regs->nip;
530 force_sig_info(SIGSEGV, &info, current);
531 } else {
532 /* didn't recognize the instruction */
533 /* XXX quick hack for now: set the non-Java bit in the VSCR */
534 if (printk_ratelimit())
535 printk(KERN_ERR "Unrecognized altivec instruction "
536 "in %s at %lx\n", current->comm, regs->nip);
537 current->thread.vscr.u[3] |= 0x10000;
538 }
539}
540#endif /* CONFIG_ALTIVEC */
541
542/*
543 * We enter here if we get an unrecoverable exception, that is, one
544 * that happened at a point where the RI (recoverable interrupt) bit
545 * in the MSR is 0. This indicates that SRR0/1 are live, and that
546 * we therefore lost state by taking this exception.
547 */
548void unrecoverable_exception(struct pt_regs *regs)
549{
550 printk(KERN_EMERG "Unrecoverable exception %lx at %lx\n",
551 regs->trap, regs->nip);
552 die("Unrecoverable exception", regs, SIGABRT);
553}
554
555/*
556 * We enter here if we discover during exception entry that we are
557 * running in supervisor mode with a userspace value in the stack pointer.
558 */
559void kernel_bad_stack(struct pt_regs *regs)
560{
561 printk(KERN_EMERG "Bad kernel stack pointer %lx at %lx\n",
562 regs->gpr[1], regs->nip);
563 die("Bad kernel stack pointer", regs, SIGABRT);
564}
565
566void __init trap_init(void)
567{
568}
diff --git a/arch/ppc64/kernel/vdso64/sigtramp.S b/arch/ppc64/kernel/vdso64/sigtramp.S
index 8ae8f205e470..31b604ab56de 100644
--- a/arch/ppc64/kernel/vdso64/sigtramp.S
+++ b/arch/ppc64/kernel/vdso64/sigtramp.S
@@ -15,6 +15,7 @@
15#include <asm/ppc_asm.h> 15#include <asm/ppc_asm.h>
16#include <asm/unistd.h> 16#include <asm/unistd.h>
17#include <asm/vdso.h> 17#include <asm/vdso.h>
18#include <asm/ptrace.h> /* XXX for __SIGNAL_FRAMESIZE */
18 19
19 .text 20 .text
20 21
diff --git a/arch/ppc64/kernel/vecemu.c b/arch/ppc64/kernel/vecemu.c
deleted file mode 100644
index cb207629f21f..000000000000
--- a/arch/ppc64/kernel/vecemu.c
+++ /dev/null
@@ -1,346 +0,0 @@
1/*
2 * Routines to emulate some Altivec/VMX instructions, specifically
3 * those that can trap when given denormalized operands in Java mode.
4 */
5#include <linux/kernel.h>
6#include <linux/errno.h>
7#include <linux/sched.h>
8#include <asm/ptrace.h>
9#include <asm/processor.h>
10#include <asm/uaccess.h>
11
12/* Functions in vector.S */
13extern void vaddfp(vector128 *dst, vector128 *a, vector128 *b);
14extern void vsubfp(vector128 *dst, vector128 *a, vector128 *b);
15extern void vmaddfp(vector128 *dst, vector128 *a, vector128 *b, vector128 *c);
16extern void vnmsubfp(vector128 *dst, vector128 *a, vector128 *b, vector128 *c);
17extern void vrefp(vector128 *dst, vector128 *src);
18extern void vrsqrtefp(vector128 *dst, vector128 *src);
19extern void vexptep(vector128 *dst, vector128 *src);
20
21static unsigned int exp2s[8] = {
22 0x800000,
23 0x8b95c2,
24 0x9837f0,
25 0xa5fed7,
26 0xb504f3,
27 0xc5672a,
28 0xd744fd,
29 0xeac0c7
30};
31
32/*
33 * Computes an estimate of 2^x. The `s' argument is the 32-bit
34 * single-precision floating-point representation of x.
35 */
36static unsigned int eexp2(unsigned int s)
37{
38 int exp, pwr;
39 unsigned int mant, frac;
40
41 /* extract exponent field from input */
42 exp = ((s >> 23) & 0xff) - 127;
43 if (exp > 7) {
44 /* check for NaN input */
45 if (exp == 128 && (s & 0x7fffff) != 0)
46 return s | 0x400000; /* return QNaN */
47 /* 2^-big = 0, 2^+big = +Inf */
48 return (s & 0x80000000)? 0: 0x7f800000; /* 0 or +Inf */
49 }
50 if (exp < -23)
51 return 0x3f800000; /* 1.0 */
52
53 /* convert to fixed point integer in 9.23 representation */
54 pwr = (s & 0x7fffff) | 0x800000;
55 if (exp > 0)
56 pwr <<= exp;
57 else
58 pwr >>= -exp;
59 if (s & 0x80000000)
60 pwr = -pwr;
61
62 /* extract integer part, which becomes exponent part of result */
63 exp = (pwr >> 23) + 126;
64 if (exp >= 254)
65 return 0x7f800000;
66 if (exp < -23)
67 return 0;
68
69 /* table lookup on top 3 bits of fraction to get mantissa */
70 mant = exp2s[(pwr >> 20) & 7];
71
72 /* linear interpolation using remaining 20 bits of fraction */
73 asm("mulhwu %0,%1,%2" : "=r" (frac)
74 : "r" (pwr << 12), "r" (0x172b83ff));
75 asm("mulhwu %0,%1,%2" : "=r" (frac) : "r" (frac), "r" (mant));
76 mant += frac;
77
78 if (exp >= 0)
79 return mant + (exp << 23);
80
81 /* denormalized result */
82 exp = -exp;
83 mant += 1 << (exp - 1);
84 return mant >> exp;
85}
86
87/*
88 * Computes an estimate of log_2(x). The `s' argument is the 32-bit
89 * single-precision floating-point representation of x.
90 */
91static unsigned int elog2(unsigned int s)
92{
93 int exp, mant, lz, frac;
94
95 exp = s & 0x7f800000;
96 mant = s & 0x7fffff;
97 if (exp == 0x7f800000) { /* Inf or NaN */
98 if (mant != 0)
99 s |= 0x400000; /* turn NaN into QNaN */
100 return s;
101 }
102 if ((exp | mant) == 0) /* +0 or -0 */
103 return 0xff800000; /* return -Inf */
104
105 if (exp == 0) {
106 /* denormalized */
107 asm("cntlzw %0,%1" : "=r" (lz) : "r" (mant));
108 mant <<= lz - 8;
109 exp = (-118 - lz) << 23;
110 } else {
111 mant |= 0x800000;
112 exp -= 127 << 23;
113 }
114
115 if (mant >= 0xb504f3) { /* 2^0.5 * 2^23 */
116 exp |= 0x400000; /* 0.5 * 2^23 */
117 asm("mulhwu %0,%1,%2" : "=r" (mant)
118 : "r" (mant), "r" (0xb504f334)); /* 2^-0.5 * 2^32 */
119 }
120 if (mant >= 0x9837f0) { /* 2^0.25 * 2^23 */
121 exp |= 0x200000; /* 0.25 * 2^23 */
122 asm("mulhwu %0,%1,%2" : "=r" (mant)
123 : "r" (mant), "r" (0xd744fccb)); /* 2^-0.25 * 2^32 */
124 }
125 if (mant >= 0x8b95c2) { /* 2^0.125 * 2^23 */
126 exp |= 0x100000; /* 0.125 * 2^23 */
127 asm("mulhwu %0,%1,%2" : "=r" (mant)
128 : "r" (mant), "r" (0xeac0c6e8)); /* 2^-0.125 * 2^32 */
129 }
130 if (mant > 0x800000) { /* 1.0 * 2^23 */
131 /* calculate (mant - 1) * 1.381097463 */
132 /* 1.381097463 == 0.125 / (2^0.125 - 1) */
133 asm("mulhwu %0,%1,%2" : "=r" (frac)
134 : "r" ((mant - 0x800000) << 1), "r" (0xb0c7cd3a));
135 exp += frac;
136 }
137 s = exp & 0x80000000;
138 if (exp != 0) {
139 if (s)
140 exp = -exp;
141 asm("cntlzw %0,%1" : "=r" (lz) : "r" (exp));
142 lz = 8 - lz;
143 if (lz > 0)
144 exp >>= lz;
145 else if (lz < 0)
146 exp <<= -lz;
147 s += ((lz + 126) << 23) + exp;
148 }
149 return s;
150}
151
152#define VSCR_SAT 1
153
154static int ctsxs(unsigned int x, int scale, unsigned int *vscrp)
155{
156 int exp, mant;
157
158 exp = (x >> 23) & 0xff;
159 mant = x & 0x7fffff;
160 if (exp == 255 && mant != 0)
161 return 0; /* NaN -> 0 */
162 exp = exp - 127 + scale;
163 if (exp < 0)
164 return 0; /* round towards zero */
165 if (exp >= 31) {
166 /* saturate, unless the result would be -2^31 */
167 if (x + (scale << 23) != 0xcf000000)
168 *vscrp |= VSCR_SAT;
169 return (x & 0x80000000)? 0x80000000: 0x7fffffff;
170 }
171 mant |= 0x800000;
172 mant = (mant << 7) >> (30 - exp);
173 return (x & 0x80000000)? -mant: mant;
174}
175
176static unsigned int ctuxs(unsigned int x, int scale, unsigned int *vscrp)
177{
178 int exp;
179 unsigned int mant;
180
181 exp = (x >> 23) & 0xff;
182 mant = x & 0x7fffff;
183 if (exp == 255 && mant != 0)
184 return 0; /* NaN -> 0 */
185 exp = exp - 127 + scale;
186 if (exp < 0)
187 return 0; /* round towards zero */
188 if (x & 0x80000000) {
189 /* negative => saturate to 0 */
190 *vscrp |= VSCR_SAT;
191 return 0;
192 }
193 if (exp >= 32) {
194 /* saturate */
195 *vscrp |= VSCR_SAT;
196 return 0xffffffff;
197 }
198 mant |= 0x800000;
199 mant = (mant << 8) >> (31 - exp);
200 return mant;
201}
202
203/* Round to floating integer, towards 0 */
204static unsigned int rfiz(unsigned int x)
205{
206 int exp;
207
208 exp = ((x >> 23) & 0xff) - 127;
209 if (exp == 128 && (x & 0x7fffff) != 0)
210 return x | 0x400000; /* NaN -> make it a QNaN */
211 if (exp >= 23)
212 return x; /* it's an integer already (or Inf) */
213 if (exp < 0)
214 return x & 0x80000000; /* |x| < 1.0 rounds to 0 */
215 return x & ~(0x7fffff >> exp);
216}
217
218/* Round to floating integer, towards +/- Inf */
219static unsigned int rfii(unsigned int x)
220{
221 int exp, mask;
222
223 exp = ((x >> 23) & 0xff) - 127;
224 if (exp == 128 && (x & 0x7fffff) != 0)
225 return x | 0x400000; /* NaN -> make it a QNaN */
226 if (exp >= 23)
227 return x; /* it's an integer already (or Inf) */
228 if ((x & 0x7fffffff) == 0)
229 return x; /* +/-0 -> +/-0 */
230 if (exp < 0)
231 /* 0 < |x| < 1.0 rounds to +/- 1.0 */
232 return (x & 0x80000000) | 0x3f800000;
233 mask = 0x7fffff >> exp;
234 /* mantissa overflows into exponent - that's OK,
235 it can't overflow into the sign bit */
236 return (x + mask) & ~mask;
237}
238
239/* Round to floating integer, to nearest */
240static unsigned int rfin(unsigned int x)
241{
242 int exp, half;
243
244 exp = ((x >> 23) & 0xff) - 127;
245 if (exp == 128 && (x & 0x7fffff) != 0)
246 return x | 0x400000; /* NaN -> make it a QNaN */
247 if (exp >= 23)
248 return x; /* it's an integer already (or Inf) */
249 if (exp < -1)
250 return x & 0x80000000; /* |x| < 0.5 -> +/-0 */
251 if (exp == -1)
252 /* 0.5 <= |x| < 1.0 rounds to +/- 1.0 */
253 return (x & 0x80000000) | 0x3f800000;
254 half = 0x400000 >> exp;
255 /* add 0.5 to the magnitude and chop off the fraction bits */
256 return (x + half) & ~(0x7fffff >> exp);
257}
258
259int
260emulate_altivec(struct pt_regs *regs)
261{
262 unsigned int instr, i;
263 unsigned int va, vb, vc, vd;
264 vector128 *vrs;
265
266 if (get_user(instr, (unsigned int __user *) regs->nip))
267 return -EFAULT;
268 if ((instr >> 26) != 4)
269 return -EINVAL; /* not an altivec instruction */
270 vd = (instr >> 21) & 0x1f;
271 va = (instr >> 16) & 0x1f;
272 vb = (instr >> 11) & 0x1f;
273 vc = (instr >> 6) & 0x1f;
274
275 vrs = current->thread.vr;
276 switch (instr & 0x3f) {
277 case 10:
278 switch (vc) {
279 case 0: /* vaddfp */
280 vaddfp(&vrs[vd], &vrs[va], &vrs[vb]);
281 break;
282 case 1: /* vsubfp */
283 vsubfp(&vrs[vd], &vrs[va], &vrs[vb]);
284 break;
285 case 4: /* vrefp */
286 vrefp(&vrs[vd], &vrs[vb]);
287 break;
288 case 5: /* vrsqrtefp */
289 vrsqrtefp(&vrs[vd], &vrs[vb]);
290 break;
291 case 6: /* vexptefp */
292 for (i = 0; i < 4; ++i)
293 vrs[vd].u[i] = eexp2(vrs[vb].u[i]);
294 break;
295 case 7: /* vlogefp */
296 for (i = 0; i < 4; ++i)
297 vrs[vd].u[i] = elog2(vrs[vb].u[i]);
298 break;
299 case 8: /* vrfin */
300 for (i = 0; i < 4; ++i)
301 vrs[vd].u[i] = rfin(vrs[vb].u[i]);
302 break;
303 case 9: /* vrfiz */
304 for (i = 0; i < 4; ++i)
305 vrs[vd].u[i] = rfiz(vrs[vb].u[i]);
306 break;
307 case 10: /* vrfip */
308 for (i = 0; i < 4; ++i) {
309 u32 x = vrs[vb].u[i];
310 x = (x & 0x80000000)? rfiz(x): rfii(x);
311 vrs[vd].u[i] = x;
312 }
313 break;
314 case 11: /* vrfim */
315 for (i = 0; i < 4; ++i) {
316 u32 x = vrs[vb].u[i];
317 x = (x & 0x80000000)? rfii(x): rfiz(x);
318 vrs[vd].u[i] = x;
319 }
320 break;
321 case 14: /* vctuxs */
322 for (i = 0; i < 4; ++i)
323 vrs[vd].u[i] = ctuxs(vrs[vb].u[i], va,
324 &current->thread.vscr.u[3]);
325 break;
326 case 15: /* vctsxs */
327 for (i = 0; i < 4; ++i)
328 vrs[vd].u[i] = ctsxs(vrs[vb].u[i], va,
329 &current->thread.vscr.u[3]);
330 break;
331 default:
332 return -EINVAL;
333 }
334 break;
335 case 46: /* vmaddfp */
336 vmaddfp(&vrs[vd], &vrs[va], &vrs[vb], &vrs[vc]);
337 break;
338 case 47: /* vnmsubfp */
339 vnmsubfp(&vrs[vd], &vrs[va], &vrs[vb], &vrs[vc]);
340 break;
341 default:
342 return -EINVAL;
343 }
344
345 return 0;
346}
diff --git a/arch/ppc64/kernel/vmlinux.lds.S b/arch/ppc64/kernel/vmlinux.lds.S
index 0306510bc4ff..022f220e772f 100644
--- a/arch/ppc64/kernel/vmlinux.lds.S
+++ b/arch/ppc64/kernel/vmlinux.lds.S
@@ -1,3 +1,4 @@
1#include <asm/page.h>
1#include <asm-generic/vmlinux.lds.h> 2#include <asm-generic/vmlinux.lds.h>
2 3
3OUTPUT_ARCH(powerpc:common64) 4OUTPUT_ARCH(powerpc:common64)
@@ -17,7 +18,7 @@ SECTIONS
17 LOCK_TEXT 18 LOCK_TEXT
18 KPROBES_TEXT 19 KPROBES_TEXT
19 *(.fixup) 20 *(.fixup)
20 . = ALIGN(4096); 21 . = ALIGN(PAGE_SIZE);
21 _etext = .; 22 _etext = .;
22 } 23 }
23 24
@@ -43,7 +44,7 @@ SECTIONS
43 44
44 45
45 /* will be freed after init */ 46 /* will be freed after init */
46 . = ALIGN(4096); 47 . = ALIGN(PAGE_SIZE);
47 __init_begin = .; 48 __init_begin = .;
48 49
49 .init.text : { 50 .init.text : {
@@ -83,7 +84,7 @@ SECTIONS
83 84
84 SECURITY_INIT 85 SECURITY_INIT
85 86
86 . = ALIGN(4096); 87 . = ALIGN(PAGE_SIZE);
87 .init.ramfs : { 88 .init.ramfs : {
88 __initramfs_start = .; 89 __initramfs_start = .;
89 *(.init.ramfs) 90 *(.init.ramfs)
@@ -96,18 +97,22 @@ SECTIONS
96 __per_cpu_end = .; 97 __per_cpu_end = .;
97 } 98 }
98 99
100 . = ALIGN(PAGE_SIZE);
99 . = ALIGN(16384); 101 . = ALIGN(16384);
100 __init_end = .; 102 __init_end = .;
101 /* freed after init ends here */ 103 /* freed after init ends here */
102 104
103 105
104 /* Read/write sections */ 106 /* Read/write sections */
107 . = ALIGN(PAGE_SIZE);
105 . = ALIGN(16384); 108 . = ALIGN(16384);
109 _sdata = .;
106 /* The initial task and kernel stack */ 110 /* The initial task and kernel stack */
107 .data.init_task : { 111 .data.init_task : {
108 *(.data.init_task) 112 *(.data.init_task)
109 } 113 }
110 114
115 . = ALIGN(PAGE_SIZE);
111 .data.page_aligned : { 116 .data.page_aligned : {
112 *(.data.page_aligned) 117 *(.data.page_aligned)
113 } 118 }
@@ -129,18 +134,18 @@ SECTIONS
129 __toc_start = .; 134 __toc_start = .;
130 *(.got) 135 *(.got)
131 *(.toc) 136 *(.toc)
132 . = ALIGN(4096); 137 . = ALIGN(PAGE_SIZE);
133 _edata = .; 138 _edata = .;
134 } 139 }
135 140
136 141
137 . = ALIGN(4096); 142 . = ALIGN(PAGE_SIZE);
138 .bss : { 143 .bss : {
139 __bss_start = .; 144 __bss_start = .;
140 *(.bss) 145 *(.bss)
141 __bss_stop = .; 146 __bss_stop = .;
142 } 147 }
143 148
144 . = ALIGN(4096); 149 . = ALIGN(PAGE_SIZE);
145 _end = . ; 150 _end = . ;
146} 151}
diff --git a/arch/ppc64/lib/Makefile b/arch/ppc64/lib/Makefile
index 0b6e967de948..42d5295bf345 100644
--- a/arch/ppc64/lib/Makefile
+++ b/arch/ppc64/lib/Makefile
@@ -2,17 +2,4 @@
2# Makefile for ppc64-specific library files.. 2# Makefile for ppc64-specific library files..
3# 3#
4 4
5lib-y := checksum.o string.o strcase.o 5lib-y := string.o
6lib-y += copypage.o memcpy.o copyuser.o usercopy.o
7
8# Lock primitives are defined as no-ops in include/linux/spinlock.h
9# for non-SMP configs. Don't build the real versions.
10
11lib-$(CONFIG_SMP) += locks.o
12
13# e2a provides EBCDIC to ASCII conversions.
14ifdef CONFIG_PPC_ISERIES
15obj-y += e2a.o
16endif
17
18lib-$(CONFIG_DEBUG_KERNEL) += sstep.o
diff --git a/arch/ppc64/lib/string.S b/arch/ppc64/lib/string.S
index 813587e5c2ec..e21a0038a4d6 100644
--- a/arch/ppc64/lib/string.S
+++ b/arch/ppc64/lib/string.S
@@ -65,112 +65,6 @@ _GLOBAL(strlen)
65 subf r3,r3,r4 65 subf r3,r3,r4
66 blr 66 blr
67 67
68_GLOBAL(memset)
69 neg r0,r3
70 rlwimi r4,r4,8,16,23
71 andi. r0,r0,7 /* # bytes to be 8-byte aligned */
72 rlwimi r4,r4,16,0,15
73 cmplw cr1,r5,r0 /* do we get that far? */
74 rldimi r4,r4,32,0
75 mtcrf 1,r0
76 mr r6,r3
77 blt cr1,8f
78 beq+ 3f /* if already 8-byte aligned */
79 subf r5,r0,r5
80 bf 31,1f
81 stb r4,0(r6)
82 addi r6,r6,1
831: bf 30,2f
84 sth r4,0(r6)
85 addi r6,r6,2
862: bf 29,3f
87 stw r4,0(r6)
88 addi r6,r6,4
893: srdi. r0,r5,6
90 clrldi r5,r5,58
91 mtctr r0
92 beq 5f
934: std r4,0(r6)
94 std r4,8(r6)
95 std r4,16(r6)
96 std r4,24(r6)
97 std r4,32(r6)
98 std r4,40(r6)
99 std r4,48(r6)
100 std r4,56(r6)
101 addi r6,r6,64
102 bdnz 4b
1035: srwi. r0,r5,3
104 clrlwi r5,r5,29
105 mtcrf 1,r0
106 beq 8f
107 bf 29,6f
108 std r4,0(r6)
109 std r4,8(r6)
110 std r4,16(r6)
111 std r4,24(r6)
112 addi r6,r6,32
1136: bf 30,7f
114 std r4,0(r6)
115 std r4,8(r6)
116 addi r6,r6,16
1177: bf 31,8f
118 std r4,0(r6)
119 addi r6,r6,8
1208: cmpwi r5,0
121 mtcrf 1,r5
122 beqlr+
123 bf 29,9f
124 stw r4,0(r6)
125 addi r6,r6,4
1269: bf 30,10f
127 sth r4,0(r6)
128 addi r6,r6,2
12910: bflr 31
130 stb r4,0(r6)
131 blr
132
133_GLOBAL(memmove)
134 cmplw 0,r3,r4
135 bgt .backwards_memcpy
136 b .memcpy
137
138_GLOBAL(backwards_memcpy)
139 rlwinm. r7,r5,32-3,3,31 /* r0 = r5 >> 3 */
140 add r6,r3,r5
141 add r4,r4,r5
142 beq 2f
143 andi. r0,r6,3
144 mtctr r7
145 bne 5f
1461: lwz r7,-4(r4)
147 lwzu r8,-8(r4)
148 stw r7,-4(r6)
149 stwu r8,-8(r6)
150 bdnz 1b
151 andi. r5,r5,7
1522: cmplwi 0,r5,4
153 blt 3f
154 lwzu r0,-4(r4)
155 subi r5,r5,4
156 stwu r0,-4(r6)
1573: cmpwi 0,r5,0
158 beqlr
159 mtctr r5
1604: lbzu r0,-1(r4)
161 stbu r0,-1(r6)
162 bdnz 4b
163 blr
1645: mtctr r0
1656: lbzu r7,-1(r4)
166 stbu r7,-1(r6)
167 bdnz 6b
168 subf r5,r0,r5
169 rlwinm. r7,r5,32-3,3,31
170 beq 2b
171 mtctr r7
172 b 1b
173
174_GLOBAL(memcmp) 68_GLOBAL(memcmp)
175 cmpwi 0,r5,0 69 cmpwi 0,r5,0
176 ble- 2f 70 ble- 2f
diff --git a/arch/ppc64/mm/Makefile b/arch/ppc64/mm/Makefile
deleted file mode 100644
index 3695d00d347f..000000000000
--- a/arch/ppc64/mm/Makefile
+++ /dev/null
@@ -1,11 +0,0 @@
1#
2# Makefile for the linux ppc-specific parts of the memory manager.
3#
4
5EXTRA_CFLAGS += -mno-minimal-toc
6
7obj-y := fault.o init.o imalloc.o hash_utils.o hash_low.o tlb.o \
8 slb_low.o slb.o stab.o mmap.o
9obj-$(CONFIG_NEED_MULTIPLE_NODES) += numa.o
10obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
11obj-$(CONFIG_PPC_MULTIPLATFORM) += hash_native.o
diff --git a/arch/ppc64/mm/init.c b/arch/ppc64/mm/init.c
deleted file mode 100644
index e2bd7776622f..000000000000
--- a/arch/ppc64/mm/init.c
+++ /dev/null
@@ -1,950 +0,0 @@
1/*
2 * PowerPC version
3 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
4 *
5 * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
6 * and Cort Dougan (PReP) (cort@cs.nmt.edu)
7 * Copyright (C) 1996 Paul Mackerras
8 * Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk).
9 *
10 * Derived from "arch/i386/mm/init.c"
11 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
12 *
13 * Dave Engebretsen <engebret@us.ibm.com>
14 * Rework for PPC64 port.
15 *
16 * This program is free software; you can redistribute it and/or
17 * modify it under the terms of the GNU General Public License
18 * as published by the Free Software Foundation; either version
19 * 2 of the License, or (at your option) any later version.
20 *
21 */
22
23#include <linux/config.h>
24#include <linux/signal.h>
25#include <linux/sched.h>
26#include <linux/kernel.h>
27#include <linux/errno.h>
28#include <linux/string.h>
29#include <linux/types.h>
30#include <linux/mman.h>
31#include <linux/mm.h>
32#include <linux/swap.h>
33#include <linux/stddef.h>
34#include <linux/vmalloc.h>
35#include <linux/init.h>
36#include <linux/delay.h>
37#include <linux/bootmem.h>
38#include <linux/highmem.h>
39#include <linux/idr.h>
40#include <linux/nodemask.h>
41#include <linux/module.h>
42
43#include <asm/pgalloc.h>
44#include <asm/page.h>
45#include <asm/prom.h>
46#include <asm/lmb.h>
47#include <asm/rtas.h>
48#include <asm/io.h>
49#include <asm/mmu_context.h>
50#include <asm/pgtable.h>
51#include <asm/mmu.h>
52#include <asm/uaccess.h>
53#include <asm/smp.h>
54#include <asm/machdep.h>
55#include <asm/tlb.h>
56#include <asm/eeh.h>
57#include <asm/processor.h>
58#include <asm/mmzone.h>
59#include <asm/cputable.h>
60#include <asm/ppcdebug.h>
61#include <asm/sections.h>
62#include <asm/system.h>
63#include <asm/iommu.h>
64#include <asm/abs_addr.h>
65#include <asm/vdso.h>
66#include <asm/imalloc.h>
67
68#if PGTABLE_RANGE > USER_VSID_RANGE
69#warning Limited user VSID range means pagetable space is wasted
70#endif
71
72#if (TASK_SIZE_USER64 < PGTABLE_RANGE) && (TASK_SIZE_USER64 < USER_VSID_RANGE)
73#warning TASK_SIZE is smaller than it needs to be.
74#endif
75
76int mem_init_done;
77unsigned long ioremap_bot = IMALLOC_BASE;
78static unsigned long phbs_io_bot = PHBS_IO_BASE;
79
80extern pgd_t swapper_pg_dir[];
81extern struct task_struct *current_set[NR_CPUS];
82
83unsigned long klimit = (unsigned long)_end;
84
85unsigned long _SDR1=0;
86unsigned long _ASR=0;
87
88/* max amount of RAM to use */
89unsigned long __max_memory;
90
91/* info on what we think the IO hole is */
92unsigned long io_hole_start;
93unsigned long io_hole_size;
94
95void show_mem(void)
96{
97 unsigned long total = 0, reserved = 0;
98 unsigned long shared = 0, cached = 0;
99 struct page *page;
100 pg_data_t *pgdat;
101 unsigned long i;
102
103 printk("Mem-info:\n");
104 show_free_areas();
105 printk("Free swap: %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10));
106 for_each_pgdat(pgdat) {
107 unsigned long flags;
108 pgdat_resize_lock(pgdat, &flags);
109 for (i = 0; i < pgdat->node_spanned_pages; i++) {
110 page = pgdat_page_nr(pgdat, i);
111 total++;
112 if (PageReserved(page))
113 reserved++;
114 else if (PageSwapCache(page))
115 cached++;
116 else if (page_count(page))
117 shared += page_count(page) - 1;
118 }
119 pgdat_resize_unlock(pgdat, &flags);
120 }
121 printk("%ld pages of RAM\n", total);
122 printk("%ld reserved pages\n", reserved);
123 printk("%ld pages shared\n", shared);
124 printk("%ld pages swap cached\n", cached);
125}
126
127#ifdef CONFIG_PPC_ISERIES
128
129void __iomem *ioremap(unsigned long addr, unsigned long size)
130{
131 return (void __iomem *)addr;
132}
133
134extern void __iomem *__ioremap(unsigned long addr, unsigned long size,
135 unsigned long flags)
136{
137 return (void __iomem *)addr;
138}
139
140void iounmap(volatile void __iomem *addr)
141{
142 return;
143}
144
145#else
146
147/*
148 * map_io_page currently only called by __ioremap
149 * map_io_page adds an entry to the ioremap page table
150 * and adds an entry to the HPT, possibly bolting it
151 */
152static int map_io_page(unsigned long ea, unsigned long pa, int flags)
153{
154 pgd_t *pgdp;
155 pud_t *pudp;
156 pmd_t *pmdp;
157 pte_t *ptep;
158 unsigned long vsid;
159
160 if (mem_init_done) {
161 pgdp = pgd_offset_k(ea);
162 pudp = pud_alloc(&init_mm, pgdp, ea);
163 if (!pudp)
164 return -ENOMEM;
165 pmdp = pmd_alloc(&init_mm, pudp, ea);
166 if (!pmdp)
167 return -ENOMEM;
168 ptep = pte_alloc_kernel(pmdp, ea);
169 if (!ptep)
170 return -ENOMEM;
171 set_pte_at(&init_mm, ea, ptep, pfn_pte(pa >> PAGE_SHIFT,
172 __pgprot(flags)));
173 } else {
174 unsigned long va, vpn, hash, hpteg;
175
176 /*
177 * If the mm subsystem is not fully up, we cannot create a
178 * linux page table entry for this mapping. Simply bolt an
179 * entry in the hardware page table.
180 */
181 vsid = get_kernel_vsid(ea);
182 va = (vsid << 28) | (ea & 0xFFFFFFF);
183 vpn = va >> PAGE_SHIFT;
184
185 hash = hpt_hash(vpn, 0);
186
187 hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP);
188
189 /* Panic if a pte grpup is full */
190 if (ppc_md.hpte_insert(hpteg, va, pa >> PAGE_SHIFT,
191 HPTE_V_BOLTED,
192 _PAGE_NO_CACHE|_PAGE_GUARDED|PP_RWXX)
193 == -1) {
194 panic("map_io_page: could not insert mapping");
195 }
196 }
197 return 0;
198}
199
200
201static void __iomem * __ioremap_com(unsigned long addr, unsigned long pa,
202 unsigned long ea, unsigned long size,
203 unsigned long flags)
204{
205 unsigned long i;
206
207 if ((flags & _PAGE_PRESENT) == 0)
208 flags |= pgprot_val(PAGE_KERNEL);
209
210 for (i = 0; i < size; i += PAGE_SIZE)
211 if (map_io_page(ea+i, pa+i, flags))
212 return NULL;
213
214 return (void __iomem *) (ea + (addr & ~PAGE_MASK));
215}
216
217
218void __iomem *
219ioremap(unsigned long addr, unsigned long size)
220{
221 return __ioremap(addr, size, _PAGE_NO_CACHE | _PAGE_GUARDED);
222}
223
224void __iomem * __ioremap(unsigned long addr, unsigned long size,
225 unsigned long flags)
226{
227 unsigned long pa, ea;
228 void __iomem *ret;
229
230 /*
231 * Choose an address to map it to.
232 * Once the imalloc system is running, we use it.
233 * Before that, we map using addresses going
234 * up from ioremap_bot. imalloc will use
235 * the addresses from ioremap_bot through
236 * IMALLOC_END
237 *
238 */
239 pa = addr & PAGE_MASK;
240 size = PAGE_ALIGN(addr + size) - pa;
241
242 if (size == 0)
243 return NULL;
244
245 if (mem_init_done) {
246 struct vm_struct *area;
247 area = im_get_free_area(size);
248 if (area == NULL)
249 return NULL;
250 ea = (unsigned long)(area->addr);
251 ret = __ioremap_com(addr, pa, ea, size, flags);
252 if (!ret)
253 im_free(area->addr);
254 } else {
255 ea = ioremap_bot;
256 ret = __ioremap_com(addr, pa, ea, size, flags);
257 if (ret)
258 ioremap_bot += size;
259 }
260 return ret;
261}
262
263#define IS_PAGE_ALIGNED(_val) ((_val) == ((_val) & PAGE_MASK))
264
265int __ioremap_explicit(unsigned long pa, unsigned long ea,
266 unsigned long size, unsigned long flags)
267{
268 struct vm_struct *area;
269 void __iomem *ret;
270
271 /* For now, require page-aligned values for pa, ea, and size */
272 if (!IS_PAGE_ALIGNED(pa) || !IS_PAGE_ALIGNED(ea) ||
273 !IS_PAGE_ALIGNED(size)) {
274 printk(KERN_ERR "unaligned value in %s\n", __FUNCTION__);
275 return 1;
276 }
277
278 if (!mem_init_done) {
279 /* Two things to consider in this case:
280 * 1) No records will be kept (imalloc, etc) that the region
281 * has been remapped
282 * 2) It won't be easy to iounmap() the region later (because
283 * of 1)
284 */
285 ;
286 } else {
287 area = im_get_area(ea, size,
288 IM_REGION_UNUSED|IM_REGION_SUBSET|IM_REGION_EXISTS);
289 if (area == NULL) {
290 /* Expected when PHB-dlpar is in play */
291 return 1;
292 }
293 if (ea != (unsigned long) area->addr) {
294 printk(KERN_ERR "unexpected addr return from "
295 "im_get_area\n");
296 return 1;
297 }
298 }
299
300 ret = __ioremap_com(pa, pa, ea, size, flags);
301 if (ret == NULL) {
302 printk(KERN_ERR "ioremap_explicit() allocation failure !\n");
303 return 1;
304 }
305 if (ret != (void *) ea) {
306 printk(KERN_ERR "__ioremap_com() returned unexpected addr\n");
307 return 1;
308 }
309
310 return 0;
311}
312
313/*
314 * Unmap an IO region and remove it from imalloc'd list.
315 * Access to IO memory should be serialized by driver.
316 * This code is modeled after vmalloc code - unmap_vm_area()
317 *
318 * XXX what about calls before mem_init_done (ie python_countermeasures())
319 */
320void iounmap(volatile void __iomem *token)
321{
322 void *addr;
323
324 if (!mem_init_done)
325 return;
326
327 addr = (void *) ((unsigned long __force) token & PAGE_MASK);
328
329 im_free(addr);
330}
331
332static int iounmap_subset_regions(unsigned long addr, unsigned long size)
333{
334 struct vm_struct *area;
335
336 /* Check whether subsets of this region exist */
337 area = im_get_area(addr, size, IM_REGION_SUPERSET);
338 if (area == NULL)
339 return 1;
340
341 while (area) {
342 iounmap((void __iomem *) area->addr);
343 area = im_get_area(addr, size,
344 IM_REGION_SUPERSET);
345 }
346
347 return 0;
348}
349
350int iounmap_explicit(volatile void __iomem *start, unsigned long size)
351{
352 struct vm_struct *area;
353 unsigned long addr;
354 int rc;
355
356 addr = (unsigned long __force) start & PAGE_MASK;
357
358 /* Verify that the region either exists or is a subset of an existing
359 * region. In the latter case, split the parent region to create
360 * the exact region
361 */
362 area = im_get_area(addr, size,
363 IM_REGION_EXISTS | IM_REGION_SUBSET);
364 if (area == NULL) {
365 /* Determine whether subset regions exist. If so, unmap */
366 rc = iounmap_subset_regions(addr, size);
367 if (rc) {
368 printk(KERN_ERR
369 "%s() cannot unmap nonexistent range 0x%lx\n",
370 __FUNCTION__, addr);
371 return 1;
372 }
373 } else {
374 iounmap((void __iomem *) area->addr);
375 }
376 /*
377 * FIXME! This can't be right:
378 iounmap(area->addr);
379 * Maybe it should be "iounmap(area);"
380 */
381 return 0;
382}
383
384#endif
385
386EXPORT_SYMBOL(ioremap);
387EXPORT_SYMBOL(__ioremap);
388EXPORT_SYMBOL(iounmap);
389
390void free_initmem(void)
391{
392 unsigned long addr;
393
394 addr = (unsigned long)__init_begin;
395 for (; addr < (unsigned long)__init_end; addr += PAGE_SIZE) {
396 memset((void *)addr, 0xcc, PAGE_SIZE);
397 ClearPageReserved(virt_to_page(addr));
398 set_page_count(virt_to_page(addr), 1);
399 free_page(addr);
400 totalram_pages++;
401 }
402 printk ("Freeing unused kernel memory: %luk freed\n",
403 ((unsigned long)__init_end - (unsigned long)__init_begin) >> 10);
404}
405
406#ifdef CONFIG_BLK_DEV_INITRD
407void free_initrd_mem(unsigned long start, unsigned long end)
408{
409 if (start < end)
410 printk ("Freeing initrd memory: %ldk freed\n", (end - start) >> 10);
411 for (; start < end; start += PAGE_SIZE) {
412 ClearPageReserved(virt_to_page(start));
413 set_page_count(virt_to_page(start), 1);
414 free_page(start);
415 totalram_pages++;
416 }
417}
418#endif
419
420static DEFINE_SPINLOCK(mmu_context_lock);
421static DEFINE_IDR(mmu_context_idr);
422
423int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
424{
425 int index;
426 int err;
427
428again:
429 if (!idr_pre_get(&mmu_context_idr, GFP_KERNEL))
430 return -ENOMEM;
431
432 spin_lock(&mmu_context_lock);
433 err = idr_get_new_above(&mmu_context_idr, NULL, 1, &index);
434 spin_unlock(&mmu_context_lock);
435
436 if (err == -EAGAIN)
437 goto again;
438 else if (err)
439 return err;
440
441 if (index > MAX_CONTEXT) {
442 idr_remove(&mmu_context_idr, index);
443 return -ENOMEM;
444 }
445
446 mm->context.id = index;
447
448 return 0;
449}
450
451void destroy_context(struct mm_struct *mm)
452{
453 spin_lock(&mmu_context_lock);
454 idr_remove(&mmu_context_idr, mm->context.id);
455 spin_unlock(&mmu_context_lock);
456
457 mm->context.id = NO_CONTEXT;
458}
459
460/*
461 * Do very early mm setup.
462 */
463void __init mm_init_ppc64(void)
464{
465#ifndef CONFIG_PPC_ISERIES
466 unsigned long i;
467#endif
468
469 ppc64_boot_msg(0x100, "MM Init");
470
471 /* This is the story of the IO hole... please, keep seated,
472 * unfortunately, we are out of oxygen masks at the moment.
473 * So we need some rough way to tell where your big IO hole
474 * is. On pmac, it's between 2G and 4G, on POWER3, it's around
475 * that area as well, on POWER4 we don't have one, etc...
476 * We need that as a "hint" when sizing the TCE table on POWER3
477 * So far, the simplest way that seem work well enough for us it
478 * to just assume that the first discontinuity in our physical
479 * RAM layout is the IO hole. That may not be correct in the future
480 * (and isn't on iSeries but then we don't care ;)
481 */
482
483#ifndef CONFIG_PPC_ISERIES
484 for (i = 1; i < lmb.memory.cnt; i++) {
485 unsigned long base, prevbase, prevsize;
486
487 prevbase = lmb.memory.region[i-1].base;
488 prevsize = lmb.memory.region[i-1].size;
489 base = lmb.memory.region[i].base;
490 if (base > (prevbase + prevsize)) {
491 io_hole_start = prevbase + prevsize;
492 io_hole_size = base - (prevbase + prevsize);
493 break;
494 }
495 }
496#endif /* CONFIG_PPC_ISERIES */
497 if (io_hole_start)
498 printk("IO Hole assumed to be %lx -> %lx\n",
499 io_hole_start, io_hole_start + io_hole_size - 1);
500
501 ppc64_boot_msg(0x100, "MM Init Done");
502}
503
504/*
505 * This is called by /dev/mem to know if a given address has to
506 * be mapped non-cacheable or not
507 */
508int page_is_ram(unsigned long pfn)
509{
510 int i;
511 unsigned long paddr = (pfn << PAGE_SHIFT);
512
513 for (i=0; i < lmb.memory.cnt; i++) {
514 unsigned long base;
515
516 base = lmb.memory.region[i].base;
517
518 if ((paddr >= base) &&
519 (paddr < (base + lmb.memory.region[i].size))) {
520 return 1;
521 }
522 }
523
524 return 0;
525}
526EXPORT_SYMBOL(page_is_ram);
527
528/*
529 * Initialize the bootmem system and give it all the memory we
530 * have available.
531 */
532#ifndef CONFIG_NEED_MULTIPLE_NODES
533void __init do_init_bootmem(void)
534{
535 unsigned long i;
536 unsigned long start, bootmap_pages;
537 unsigned long total_pages = lmb_end_of_DRAM() >> PAGE_SHIFT;
538 int boot_mapsize;
539
540 /*
541 * Find an area to use for the bootmem bitmap. Calculate the size of
542 * bitmap required as (Total Memory) / PAGE_SIZE / BITS_PER_BYTE.
543 * Add 1 additional page in case the address isn't page-aligned.
544 */
545 bootmap_pages = bootmem_bootmap_pages(total_pages);
546
547 start = lmb_alloc(bootmap_pages<<PAGE_SHIFT, PAGE_SIZE);
548 BUG_ON(!start);
549
550 boot_mapsize = init_bootmem(start >> PAGE_SHIFT, total_pages);
551
552 max_pfn = max_low_pfn;
553
554 /* Add all physical memory to the bootmem map, mark each area
555 * present.
556 */
557 for (i=0; i < lmb.memory.cnt; i++)
558 free_bootmem(lmb.memory.region[i].base,
559 lmb_size_bytes(&lmb.memory, i));
560
561 /* reserve the sections we're already using */
562 for (i=0; i < lmb.reserved.cnt; i++)
563 reserve_bootmem(lmb.reserved.region[i].base,
564 lmb_size_bytes(&lmb.reserved, i));
565
566 for (i=0; i < lmb.memory.cnt; i++)
567 memory_present(0, lmb_start_pfn(&lmb.memory, i),
568 lmb_end_pfn(&lmb.memory, i));
569}
570
571/*
572 * paging_init() sets up the page tables - in fact we've already done this.
573 */
574void __init paging_init(void)
575{
576 unsigned long zones_size[MAX_NR_ZONES];
577 unsigned long zholes_size[MAX_NR_ZONES];
578 unsigned long total_ram = lmb_phys_mem_size();
579 unsigned long top_of_ram = lmb_end_of_DRAM();
580
581 printk(KERN_INFO "Top of RAM: 0x%lx, Total RAM: 0x%lx\n",
582 top_of_ram, total_ram);
583 printk(KERN_INFO "Memory hole size: %ldMB\n",
584 (top_of_ram - total_ram) >> 20);
585 /*
586 * All pages are DMA-able so we put them all in the DMA zone.
587 */
588 memset(zones_size, 0, sizeof(zones_size));
589 memset(zholes_size, 0, sizeof(zholes_size));
590
591 zones_size[ZONE_DMA] = top_of_ram >> PAGE_SHIFT;
592 zholes_size[ZONE_DMA] = (top_of_ram - total_ram) >> PAGE_SHIFT;
593
594 free_area_init_node(0, NODE_DATA(0), zones_size,
595 __pa(PAGE_OFFSET) >> PAGE_SHIFT, zholes_size);
596}
597#endif /* ! CONFIG_NEED_MULTIPLE_NODES */
598
599static struct kcore_list kcore_vmem;
600
601static int __init setup_kcore(void)
602{
603 int i;
604
605 for (i=0; i < lmb.memory.cnt; i++) {
606 unsigned long base, size;
607 struct kcore_list *kcore_mem;
608
609 base = lmb.memory.region[i].base;
610 size = lmb.memory.region[i].size;
611
612 /* GFP_ATOMIC to avoid might_sleep warnings during boot */
613 kcore_mem = kmalloc(sizeof(struct kcore_list), GFP_ATOMIC);
614 if (!kcore_mem)
615 panic("mem_init: kmalloc failed\n");
616
617 kclist_add(kcore_mem, __va(base), size);
618 }
619
620 kclist_add(&kcore_vmem, (void *)VMALLOC_START, VMALLOC_END-VMALLOC_START);
621
622 return 0;
623}
624module_init(setup_kcore);
625
626void __init mem_init(void)
627{
628#ifdef CONFIG_NEED_MULTIPLE_NODES
629 int nid;
630#endif
631 pg_data_t *pgdat;
632 unsigned long i;
633 struct page *page;
634 unsigned long reservedpages = 0, codesize, initsize, datasize, bsssize;
635
636 num_physpages = max_low_pfn; /* RAM is assumed contiguous */
637 high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
638
639#ifdef CONFIG_NEED_MULTIPLE_NODES
640 for_each_online_node(nid) {
641 if (NODE_DATA(nid)->node_spanned_pages != 0) {
642 printk("freeing bootmem node %x\n", nid);
643 totalram_pages +=
644 free_all_bootmem_node(NODE_DATA(nid));
645 }
646 }
647#else
648 max_mapnr = num_physpages;
649 totalram_pages += free_all_bootmem();
650#endif
651
652 for_each_pgdat(pgdat) {
653 unsigned long flags;
654 pgdat_resize_lock(pgdat, &flags);
655 for (i = 0; i < pgdat->node_spanned_pages; i++) {
656 page = pgdat_page_nr(pgdat, i);
657 if (PageReserved(page))
658 reservedpages++;
659 }
660 pgdat_resize_unlock(pgdat, &flags);
661 }
662
663 codesize = (unsigned long)&_etext - (unsigned long)&_stext;
664 initsize = (unsigned long)&__init_end - (unsigned long)&__init_begin;
665 datasize = (unsigned long)&_edata - (unsigned long)&__init_end;
666 bsssize = (unsigned long)&__bss_stop - (unsigned long)&__bss_start;
667
668 printk(KERN_INFO "Memory: %luk/%luk available (%luk kernel code, "
669 "%luk reserved, %luk data, %luk bss, %luk init)\n",
670 (unsigned long)nr_free_pages() << (PAGE_SHIFT-10),
671 num_physpages << (PAGE_SHIFT-10),
672 codesize >> 10,
673 reservedpages << (PAGE_SHIFT-10),
674 datasize >> 10,
675 bsssize >> 10,
676 initsize >> 10);
677
678 mem_init_done = 1;
679
680 /* Initialize the vDSO */
681 vdso_init();
682}
683
684/*
685 * This is called when a page has been modified by the kernel.
686 * It just marks the page as not i-cache clean. We do the i-cache
687 * flush later when the page is given to a user process, if necessary.
688 */
689void flush_dcache_page(struct page *page)
690{
691 if (cpu_has_feature(CPU_FTR_COHERENT_ICACHE))
692 return;
693 /* avoid an atomic op if possible */
694 if (test_bit(PG_arch_1, &page->flags))
695 clear_bit(PG_arch_1, &page->flags);
696}
697EXPORT_SYMBOL(flush_dcache_page);
698
699void clear_user_page(void *page, unsigned long vaddr, struct page *pg)
700{
701 clear_page(page);
702
703 if (cpu_has_feature(CPU_FTR_COHERENT_ICACHE))
704 return;
705 /*
706 * We shouldnt have to do this, but some versions of glibc
707 * require it (ld.so assumes zero filled pages are icache clean)
708 * - Anton
709 */
710
711 /* avoid an atomic op if possible */
712 if (test_bit(PG_arch_1, &pg->flags))
713 clear_bit(PG_arch_1, &pg->flags);
714}
715EXPORT_SYMBOL(clear_user_page);
716
717void copy_user_page(void *vto, void *vfrom, unsigned long vaddr,
718 struct page *pg)
719{
720 copy_page(vto, vfrom);
721
722 /*
723 * We should be able to use the following optimisation, however
724 * there are two problems.
725 * Firstly a bug in some versions of binutils meant PLT sections
726 * were not marked executable.
727 * Secondly the first word in the GOT section is blrl, used
728 * to establish the GOT address. Until recently the GOT was
729 * not marked executable.
730 * - Anton
731 */
732#if 0
733 if (!vma->vm_file && ((vma->vm_flags & VM_EXEC) == 0))
734 return;
735#endif
736
737 if (cpu_has_feature(CPU_FTR_COHERENT_ICACHE))
738 return;
739
740 /* avoid an atomic op if possible */
741 if (test_bit(PG_arch_1, &pg->flags))
742 clear_bit(PG_arch_1, &pg->flags);
743}
744
745void flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
746 unsigned long addr, int len)
747{
748 unsigned long maddr;
749
750 maddr = (unsigned long)page_address(page) + (addr & ~PAGE_MASK);
751 flush_icache_range(maddr, maddr + len);
752}
753EXPORT_SYMBOL(flush_icache_user_range);
754
755/*
756 * This is called at the end of handling a user page fault, when the
757 * fault has been handled by updating a PTE in the linux page tables.
758 * We use it to preload an HPTE into the hash table corresponding to
759 * the updated linux PTE.
760 *
761 * This must always be called with the mm->page_table_lock held
762 */
763void update_mmu_cache(struct vm_area_struct *vma, unsigned long ea,
764 pte_t pte)
765{
766 unsigned long vsid;
767 void *pgdir;
768 pte_t *ptep;
769 int local = 0;
770 cpumask_t tmp;
771 unsigned long flags;
772
773 /* handle i-cache coherency */
774 if (!cpu_has_feature(CPU_FTR_COHERENT_ICACHE) &&
775 !cpu_has_feature(CPU_FTR_NOEXECUTE)) {
776 unsigned long pfn = pte_pfn(pte);
777 if (pfn_valid(pfn)) {
778 struct page *page = pfn_to_page(pfn);
779 if (!PageReserved(page)
780 && !test_bit(PG_arch_1, &page->flags)) {
781 __flush_dcache_icache(page_address(page));
782 set_bit(PG_arch_1, &page->flags);
783 }
784 }
785 }
786
787 /* We only want HPTEs for linux PTEs that have _PAGE_ACCESSED set */
788 if (!pte_young(pte))
789 return;
790
791 pgdir = vma->vm_mm->pgd;
792 if (pgdir == NULL)
793 return;
794
795 ptep = find_linux_pte(pgdir, ea);
796 if (!ptep)
797 return;
798
799 vsid = get_vsid(vma->vm_mm->context.id, ea);
800
801 local_irq_save(flags);
802 tmp = cpumask_of_cpu(smp_processor_id());
803 if (cpus_equal(vma->vm_mm->cpu_vm_mask, tmp))
804 local = 1;
805
806 __hash_page(ea, 0, vsid, ptep, 0x300, local);
807 local_irq_restore(flags);
808}
809
810void __iomem * reserve_phb_iospace(unsigned long size)
811{
812 void __iomem *virt_addr;
813
814 if (phbs_io_bot >= IMALLOC_BASE)
815 panic("reserve_phb_iospace(): phb io space overflow\n");
816
817 virt_addr = (void __iomem *) phbs_io_bot;
818 phbs_io_bot += size;
819
820 return virt_addr;
821}
822
823static void zero_ctor(void *addr, kmem_cache_t *cache, unsigned long flags)
824{
825 memset(addr, 0, kmem_cache_size(cache));
826}
827
828static const int pgtable_cache_size[2] = {
829 PTE_TABLE_SIZE, PMD_TABLE_SIZE
830};
831static const char *pgtable_cache_name[ARRAY_SIZE(pgtable_cache_size)] = {
832 "pgd_pte_cache", "pud_pmd_cache",
833};
834
835kmem_cache_t *pgtable_cache[ARRAY_SIZE(pgtable_cache_size)];
836
837void pgtable_cache_init(void)
838{
839 int i;
840
841 BUILD_BUG_ON(PTE_TABLE_SIZE != pgtable_cache_size[PTE_CACHE_NUM]);
842 BUILD_BUG_ON(PMD_TABLE_SIZE != pgtable_cache_size[PMD_CACHE_NUM]);
843 BUILD_BUG_ON(PUD_TABLE_SIZE != pgtable_cache_size[PUD_CACHE_NUM]);
844 BUILD_BUG_ON(PGD_TABLE_SIZE != pgtable_cache_size[PGD_CACHE_NUM]);
845
846 for (i = 0; i < ARRAY_SIZE(pgtable_cache_size); i++) {
847 int size = pgtable_cache_size[i];
848 const char *name = pgtable_cache_name[i];
849
850 pgtable_cache[i] = kmem_cache_create(name,
851 size, size,
852 SLAB_HWCACHE_ALIGN
853 | SLAB_MUST_HWCACHE_ALIGN,
854 zero_ctor,
855 NULL);
856 if (! pgtable_cache[i])
857 panic("pgtable_cache_init(): could not create %s!\n",
858 name);
859 }
860}
861
862pgprot_t phys_mem_access_prot(struct file *file, unsigned long addr,
863 unsigned long size, pgprot_t vma_prot)
864{
865 if (ppc_md.phys_mem_access_prot)
866 return ppc_md.phys_mem_access_prot(file, addr, size, vma_prot);
867
868 if (!page_is_ram(addr >> PAGE_SHIFT))
869 vma_prot = __pgprot(pgprot_val(vma_prot)
870 | _PAGE_GUARDED | _PAGE_NO_CACHE);
871 return vma_prot;
872}
873EXPORT_SYMBOL(phys_mem_access_prot);
874
875#ifdef CONFIG_MEMORY_HOTPLUG
876
877void online_page(struct page *page)
878{
879 ClearPageReserved(page);
880 free_cold_page(page);
881 totalram_pages++;
882 num_physpages++;
883}
884
885/*
886 * This works only for the non-NUMA case. Later, we'll need a lookup
887 * to convert from real physical addresses to nid, that doesn't use
888 * pfn_to_nid().
889 */
890int __devinit add_memory(u64 start, u64 size)
891{
892 struct pglist_data *pgdata = NODE_DATA(0);
893 struct zone *zone;
894 unsigned long start_pfn = start >> PAGE_SHIFT;
895 unsigned long nr_pages = size >> PAGE_SHIFT;
896
897 /* this should work for most non-highmem platforms */
898 zone = pgdata->node_zones;
899
900 return __add_pages(zone, start_pfn, nr_pages);
901
902 return 0;
903}
904
905/*
906 * First pass at this code will check to determine if the remove
907 * request is within the RMO. Do not allow removal within the RMO.
908 */
909int __devinit remove_memory(u64 start, u64 size)
910{
911 struct zone *zone;
912 unsigned long start_pfn, end_pfn, nr_pages;
913
914 start_pfn = start >> PAGE_SHIFT;
915 nr_pages = size >> PAGE_SHIFT;
916 end_pfn = start_pfn + nr_pages;
917
918 printk("%s(): Attempting to remove memoy in range "
919 "%lx to %lx\n", __func__, start, start+size);
920 /*
921 * check for range within RMO
922 */
923 zone = page_zone(pfn_to_page(start_pfn));
924
925 printk("%s(): memory will be removed from "
926 "the %s zone\n", __func__, zone->name);
927
928 /*
929 * not handling removing memory ranges that
930 * overlap multiple zones yet
931 */
932 if (end_pfn > (zone->zone_start_pfn + zone->spanned_pages))
933 goto overlap;
934
935 /* make sure it is NOT in RMO */
936 if ((start < lmb.rmo_size) || ((start+size) < lmb.rmo_size)) {
937 printk("%s(): range to be removed must NOT be in RMO!\n",
938 __func__);
939 goto in_rmo;
940 }
941
942 return __remove_pages(zone, start_pfn, nr_pages);
943
944overlap:
945 printk("%s(): memory range to be removed overlaps "
946 "multiple zones!!!\n", __func__);
947in_rmo:
948 return -1;
949}
950#endif /* CONFIG_MEMORY_HOTPLUG */
diff --git a/arch/ppc64/oprofile/Kconfig b/arch/ppc64/oprofile/Kconfig
deleted file mode 100644
index 5ade19801b97..000000000000
--- a/arch/ppc64/oprofile/Kconfig
+++ /dev/null
@@ -1,23 +0,0 @@
1
2menu "Profiling support"
3 depends on EXPERIMENTAL
4
5config PROFILING
6 bool "Profiling support (EXPERIMENTAL)"
7 help
8 Say Y here to enable the extended profiling support mechanisms used
9 by profilers such as OProfile.
10
11
12config OPROFILE
13 tristate "OProfile system profiling (EXPERIMENTAL)"
14 depends on PROFILING
15 help
16 OProfile is a profiling system capable of profiling the
17 whole system, include the kernel, kernel modules, libraries,
18 and applications.
19
20 If unsure, say N.
21
22endmenu
23
diff --git a/arch/ppc64/oprofile/Makefile b/arch/ppc64/oprofile/Makefile
deleted file mode 100644
index 162dbf06c142..000000000000
--- a/arch/ppc64/oprofile/Makefile
+++ /dev/null
@@ -1,9 +0,0 @@
1obj-$(CONFIG_OPROFILE) += oprofile.o
2
3DRIVER_OBJS := $(addprefix ../../../drivers/oprofile/, \
4 oprof.o cpu_buffer.o buffer_sync.o \
5 event_buffer.o oprofile_files.o \
6 oprofilefs.o oprofile_stats.o \
7 timer_int.o )
8
9oprofile-y := $(DRIVER_OBJS) common.o op_model_rs64.o op_model_power4.o
diff --git a/arch/ppc64/xmon/Makefile b/arch/ppc64/xmon/Makefile
deleted file mode 100644
index fb21a7088d3e..000000000000
--- a/arch/ppc64/xmon/Makefile
+++ /dev/null
@@ -1,5 +0,0 @@
1# Makefile for xmon
2
3EXTRA_CFLAGS += -mno-minimal-toc
4
5obj-y := start.o xmon.o ppc-dis.o ppc-opc.o subr_prf.o setjmp.o
diff --git a/arch/ppc64/xmon/setjmp.S b/arch/ppc64/xmon/setjmp.S
deleted file mode 100644
index 30ee643d557c..000000000000
--- a/arch/ppc64/xmon/setjmp.S
+++ /dev/null
@@ -1,73 +0,0 @@
1/*
2 * Copyright (C) 1996 Paul Mackerras.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * NOTE: assert(sizeof(buf) > 184)
10 */
11#include <asm/processor.h>
12#include <asm/ppc_asm.h>
13
14_GLOBAL(xmon_setjmp)
15 mflr r0
16 std r0,0(r3)
17 std r1,8(r3)
18 std r2,16(r3)
19 mfcr r0
20 std r0,24(r3)
21 std r13,32(r3)
22 std r14,40(r3)
23 std r15,48(r3)
24 std r16,56(r3)
25 std r17,64(r3)
26 std r18,72(r3)
27 std r19,80(r3)
28 std r20,88(r3)
29 std r21,96(r3)
30 std r22,104(r3)
31 std r23,112(r3)
32 std r24,120(r3)
33 std r25,128(r3)
34 std r26,136(r3)
35 std r27,144(r3)
36 std r28,152(r3)
37 std r29,160(r3)
38 std r30,168(r3)
39 std r31,176(r3)
40 li r3,0
41 blr
42
43_GLOBAL(xmon_longjmp)
44 cmpdi r4,0
45 bne 1f
46 li r4,1
471: ld r13,32(r3)
48 ld r14,40(r3)
49 ld r15,48(r3)
50 ld r16,56(r3)
51 ld r17,64(r3)
52 ld r18,72(r3)
53 ld r19,80(r3)
54 ld r20,88(r3)
55 ld r21,96(r3)
56 ld r22,104(r3)
57 ld r23,112(r3)
58 ld r24,120(r3)
59 ld r25,128(r3)
60 ld r26,136(r3)
61 ld r27,144(r3)
62 ld r28,152(r3)
63 ld r29,160(r3)
64 ld r30,168(r3)
65 ld r31,176(r3)
66 ld r0,24(r3)
67 mtcrf 56,r0
68 ld r0,0(r3)
69 ld r1,8(r3)
70 ld r2,16(r3)
71 mtlr r0
72 mr r3,r4
73 blr
diff --git a/arch/s390/kernel/compat_ioctl.c b/arch/s390/kernel/compat_ioctl.c
index 24a1e9f069a7..6504c4e69986 100644
--- a/arch/s390/kernel/compat_ioctl.c
+++ b/arch/s390/kernel/compat_ioctl.c
@@ -18,6 +18,8 @@
18#include <asm/dasd.h> 18#include <asm/dasd.h>
19#include <asm/cmb.h> 19#include <asm/cmb.h>
20#include <asm/tape390.h> 20#include <asm/tape390.h>
21#include <asm/ccwdev.h>
22#include "../../../drivers/s390/char/raw3270.h"
21 23
22static int do_ioctl32_pointer(unsigned int fd, unsigned int cmd, 24static int do_ioctl32_pointer(unsigned int fd, unsigned int cmd,
23 unsigned long arg, struct file *f) 25 unsigned long arg, struct file *f)
@@ -62,6 +64,13 @@ COMPATIBLE_IOCTL(BIODASDCMFENABLE)
62COMPATIBLE_IOCTL(BIODASDCMFDISABLE) 64COMPATIBLE_IOCTL(BIODASDCMFDISABLE)
63COMPATIBLE_IOCTL(BIODASDREADALLCMB) 65COMPATIBLE_IOCTL(BIODASDREADALLCMB)
64 66
67COMPATIBLE_IOCTL(TUBICMD)
68COMPATIBLE_IOCTL(TUBOCMD)
69COMPATIBLE_IOCTL(TUBGETI)
70COMPATIBLE_IOCTL(TUBGETO)
71COMPATIBLE_IOCTL(TUBSETMOD)
72COMPATIBLE_IOCTL(TUBGETMOD)
73
65COMPATIBLE_IOCTL(TAPE390_DISPLAY) 74COMPATIBLE_IOCTL(TAPE390_DISPLAY)
66 75
67/* s390 doesn't need handlers here */ 76/* s390 doesn't need handlers here */
diff --git a/arch/s390/kernel/head.S b/arch/s390/kernel/head.S
index 55654b6e16dc..039354d72348 100644
--- a/arch/s390/kernel/head.S
+++ b/arch/s390/kernel/head.S
@@ -485,7 +485,9 @@ start:
485# 485#
486 .org 0x10000 486 .org 0x10000
487startup:basr %r13,0 # get base 487startup:basr %r13,0 # get base
488.LPG1: lctl %c0,%c15,.Lctl-.LPG1(%r13) # load control registers 488.LPG1: l %r1, .Lget_ipl_device_addr-.LPG1(%r13)
489 basr %r14, %r1
490 lctl %c0,%c15,.Lctl-.LPG1(%r13) # load control registers
489 la %r12,_pstart-.LPG1(%r13) # pointer to parameter area 491 la %r12,_pstart-.LPG1(%r13) # pointer to parameter area
490 # move IPL device to lowcore 492 # move IPL device to lowcore
491 mvc __LC_IPLDEV(4),IPL_DEVICE-PARMAREA(%r12) 493 mvc __LC_IPLDEV(4),IPL_DEVICE-PARMAREA(%r12)
@@ -560,6 +562,9 @@ startup:basr %r13,0 # get base
560 mr %r2,%r1 # mem size in bytes in %r3 562 mr %r2,%r1 # mem size in bytes in %r3
561 b .Lfchunk-.LPG1(%r13) 563 b .Lfchunk-.LPG1(%r13)
562 564
565 .align 4
566.Lget_ipl_device_addr:
567 .long .Lget_ipl_device
563.Lpmask: 568.Lpmask:
564 .byte 0 569 .byte 0
565.align 8 570.align 8
@@ -755,6 +760,63 @@ _pstart:
755 .global _pend 760 .global _pend
756_pend: 761_pend:
757 762
763.Lget_ipl_device:
764 basr %r12,0
765.LPG2: l %r1,0xb8 # get sid
766 sll %r1,15 # test if subchannel is enabled
767 srl %r1,31
768 ltr %r1,%r1
769 bz 0(%r14) # subchannel disabled
770 l %r1,0xb8
771 la %r5,.Lipl_schib-.LPG2(%r12)
772 stsch 0(%r5) # get schib of subchannel
773 bnz 0(%r14) # schib not available
774 tm 5(%r5),0x01 # devno valid?
775 bno 0(%r14)
776 la %r6,ipl_parameter_flags-.LPG2(%r12)
777 oi 3(%r6),0x01 # set flag
778 la %r2,ipl_devno-.LPG2(%r12)
779 mvc 0(2,%r2),6(%r5) # store devno
780 tm 4(%r5),0x80 # qdio capable device?
781 bno 0(%r14)
782 oi 3(%r6),0x02 # set flag
783
784 # copy ipl parameters
785
786 lhi %r0,4096
787 l %r2,20(%r0) # get address of parameter list
788 lhi %r3,IPL_PARMBLOCK_ORIGIN
789 st %r3,20(%r0)
790 lhi %r4,1
791 cr %r2,%r3 # start parameters < destination ?
792 jl 0f
793 lhi %r1,1 # copy direction is upwards
794 j 1f
7950: lhi %r1,-1 # copy direction is downwards
796 ar %r2,%r0
797 ar %r3,%r0
798 ar %r2,%r1
799 ar %r3,%r1
8001: mvc 0(1,%r3),0(%r2) # finally copy ipl parameters
801 ar %r3,%r1
802 ar %r2,%r1
803 sr %r0,%r4
804 jne 1b
805 b 0(%r14)
806
807 .align 4
808.Lipl_schib:
809 .rept 13
810 .long 0
811 .endr
812
813 .globl ipl_parameter_flags
814ipl_parameter_flags:
815 .long 0
816 .globl ipl_devno
817ipl_devno:
818 .word 0
819
758#ifdef CONFIG_SHARED_KERNEL 820#ifdef CONFIG_SHARED_KERNEL
759 .org 0x100000 821 .org 0x100000
760#endif 822#endif
@@ -764,11 +826,11 @@ _pend:
764# 826#
765 .globl _stext 827 .globl _stext
766_stext: basr %r13,0 # get base 828_stext: basr %r13,0 # get base
767.LPG2: 829.LPG3:
768# 830#
769# Setup stack 831# Setup stack
770# 832#
771 l %r15,.Linittu-.LPG2(%r13) 833 l %r15,.Linittu-.LPG3(%r13)
772 mvc __LC_CURRENT(4),__TI_task(%r15) 834 mvc __LC_CURRENT(4),__TI_task(%r15)
773 ahi %r15,1<<(PAGE_SHIFT+THREAD_ORDER) # init_task_union + THREAD_SIZE 835 ahi %r15,1<<(PAGE_SHIFT+THREAD_ORDER) # init_task_union + THREAD_SIZE
774 st %r15,__LC_KERNEL_STACK # set end of kernel stack 836 st %r15,__LC_KERNEL_STACK # set end of kernel stack
@@ -782,8 +844,8 @@ _stext: basr %r13,0 # get base
782 lctl %c0,%c15,0(%r15) 844 lctl %c0,%c15,0(%r15)
783 845
784# 846#
785 lam 0,15,.Laregs-.LPG2(%r13) # load access regs needed by uaccess 847 lam 0,15,.Laregs-.LPG3(%r13) # load access regs needed by uaccess
786 l %r14,.Lstart-.LPG2(%r13) 848 l %r14,.Lstart-.LPG3(%r13)
787 basr %r14,%r14 # call start_kernel 849 basr %r14,%r14 # call start_kernel
788# 850#
789# We returned from start_kernel ?!? PANIK 851# We returned from start_kernel ?!? PANIK
diff --git a/arch/s390/kernel/head64.S b/arch/s390/kernel/head64.S
index c9ff0404c875..193aafa72f54 100644
--- a/arch/s390/kernel/head64.S
+++ b/arch/s390/kernel/head64.S
@@ -484,6 +484,8 @@ start:
484startup:basr %r13,0 # get base 484startup:basr %r13,0 # get base
485.LPG1: sll %r13,1 # remove high order bit 485.LPG1: sll %r13,1 # remove high order bit
486 srl %r13,1 486 srl %r13,1
487 l %r1,.Lget_ipl_device_addr-.LPG1(%r13)
488 basr %r14,%r1
487 lhi %r1,1 # mode 1 = esame 489 lhi %r1,1 # mode 1 = esame
488 slr %r0,%r0 # set cpuid to zero 490 slr %r0,%r0 # set cpuid to zero
489 sigp %r1,%r0,0x12 # switch to esame mode 491 sigp %r1,%r0,0x12 # switch to esame mode
@@ -556,6 +558,9 @@ startup:basr %r13,0 # get base
556 mlgr %r2,%r1 # mem size in bytes in %r3 558 mlgr %r2,%r1 # mem size in bytes in %r3
557 b .Lfchunk-.LPG1(%r13) 559 b .Lfchunk-.LPG1(%r13)
558 560
561 .align 4
562.Lget_ipl_device_addr:
563 .long .Lget_ipl_device
559.Lpmask: 564.Lpmask:
560 .byte 0 565 .byte 0
561 .align 8 566 .align 8
@@ -746,6 +751,63 @@ _pstart:
746 .global _pend 751 .global _pend
747_pend: 752_pend:
748 753
754.Lget_ipl_device:
755 basr %r12,0
756.LPG2: l %r1,0xb8 # get sid
757 sll %r1,15 # test if subchannel is enabled
758 srl %r1,31
759 ltr %r1,%r1
760 bz 0(%r14) # subchannel disabled
761 l %r1,0xb8
762 la %r5,.Lipl_schib-.LPG2(%r12)
763 stsch 0(%r5) # get schib of subchannel
764 bnz 0(%r14) # schib not available
765 tm 5(%r5),0x01 # devno valid?
766 bno 0(%r14)
767 la %r6,ipl_parameter_flags-.LPG2(%r12)
768 oi 3(%r6),0x01 # set flag
769 la %r2,ipl_devno-.LPG2(%r12)
770 mvc 0(2,%r2),6(%r5) # store devno
771 tm 4(%r5),0x80 # qdio capable device?
772 bno 0(%r14)
773 oi 3(%r6),0x02 # set flag
774
775 # copy ipl parameters
776
777 lhi %r0,4096
778 l %r2,20(%r0) # get address of parameter list
779 lhi %r3,IPL_PARMBLOCK_ORIGIN
780 st %r3,20(%r0)
781 lhi %r4,1
782 cr %r2,%r3 # start parameters < destination ?
783 jl 0f
784 lhi %r1,1 # copy direction is upwards
785 j 1f
7860: lhi %r1,-1 # copy direction is downwards
787 ar %r2,%r0
788 ar %r3,%r0
789 ar %r2,%r1
790 ar %r3,%r1
7911: mvc 0(1,%r3),0(%r2) # finally copy ipl parameters
792 ar %r3,%r1
793 ar %r2,%r1
794 sr %r0,%r4
795 jne 1b
796 b 0(%r14)
797
798 .align 4
799.Lipl_schib:
800 .rept 13
801 .long 0
802 .endr
803
804 .globl ipl_parameter_flags
805ipl_parameter_flags:
806 .long 0
807 .globl ipl_devno
808ipl_devno:
809 .word 0
810
749#ifdef CONFIG_SHARED_KERNEL 811#ifdef CONFIG_SHARED_KERNEL
750 .org 0x100000 812 .org 0x100000
751#endif 813#endif
@@ -755,7 +817,7 @@ _pend:
755# 817#
756 .globl _stext 818 .globl _stext
757_stext: basr %r13,0 # get base 819_stext: basr %r13,0 # get base
758.LPG2: 820.LPG3:
759# 821#
760# Setup stack 822# Setup stack
761# 823#
@@ -774,7 +836,7 @@ _stext: basr %r13,0 # get base
774 lctlg %c0,%c15,0(%r15) 836 lctlg %c0,%c15,0(%r15)
775 837
776# 838#
777 lam 0,15,.Laregs-.LPG2(%r13) # load access regs needed by uaccess 839 lam 0,15,.Laregs-.LPG3(%r13) # load access regs needed by uaccess
778 brasl %r14,start_kernel # go to C code 840 brasl %r14,start_kernel # go to C code
779# 841#
780# We returned from start_kernel ?!? PANIK 842# We returned from start_kernel ?!? PANIK
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index 5204778b8e5e..31e7b19348b7 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -36,6 +36,7 @@
36#include <linux/console.h> 36#include <linux/console.h>
37#include <linux/seq_file.h> 37#include <linux/seq_file.h>
38#include <linux/kernel_stat.h> 38#include <linux/kernel_stat.h>
39#include <linux/device.h>
39 40
40#include <asm/uaccess.h> 41#include <asm/uaccess.h>
41#include <asm/system.h> 42#include <asm/system.h>
@@ -685,3 +686,188 @@ struct seq_operations cpuinfo_op = {
685 .show = show_cpuinfo, 686 .show = show_cpuinfo,
686}; 687};
687 688
689#define DEFINE_IPL_ATTR(_name, _format, _value) \
690static ssize_t ipl_##_name##_show(struct subsystem *subsys, \
691 char *page) \
692{ \
693 return sprintf(page, _format, _value); \
694} \
695static struct subsys_attribute ipl_##_name##_attr = \
696 __ATTR(_name, S_IRUGO, ipl_##_name##_show, NULL);
697
698DEFINE_IPL_ATTR(wwpn, "0x%016llx\n", (unsigned long long)
699 IPL_PARMBLOCK_START->fcp.wwpn);
700DEFINE_IPL_ATTR(lun, "0x%016llx\n", (unsigned long long)
701 IPL_PARMBLOCK_START->fcp.lun);
702DEFINE_IPL_ATTR(bootprog, "%lld\n", (unsigned long long)
703 IPL_PARMBLOCK_START->fcp.bootprog);
704DEFINE_IPL_ATTR(br_lba, "%lld\n", (unsigned long long)
705 IPL_PARMBLOCK_START->fcp.br_lba);
706
707enum ipl_type_type {
708 ipl_type_unknown,
709 ipl_type_ccw,
710 ipl_type_fcp,
711};
712
713static enum ipl_type_type
714get_ipl_type(void)
715{
716 struct ipl_parameter_block *ipl = IPL_PARMBLOCK_START;
717
718 if (!IPL_DEVNO_VALID)
719 return ipl_type_unknown;
720 if (!IPL_PARMBLOCK_VALID)
721 return ipl_type_ccw;
722 if (ipl->hdr.header.version > IPL_MAX_SUPPORTED_VERSION)
723 return ipl_type_unknown;
724 if (ipl->fcp.pbt != IPL_TYPE_FCP)
725 return ipl_type_unknown;
726 return ipl_type_fcp;
727}
728
729static ssize_t
730ipl_type_show(struct subsystem *subsys, char *page)
731{
732 switch (get_ipl_type()) {
733 case ipl_type_ccw:
734 return sprintf(page, "ccw\n");
735 case ipl_type_fcp:
736 return sprintf(page, "fcp\n");
737 default:
738 return sprintf(page, "unknown\n");
739 }
740}
741
742static struct subsys_attribute ipl_type_attr = __ATTR_RO(ipl_type);
743
744static ssize_t
745ipl_device_show(struct subsystem *subsys, char *page)
746{
747 struct ipl_parameter_block *ipl = IPL_PARMBLOCK_START;
748
749 switch (get_ipl_type()) {
750 case ipl_type_ccw:
751 return sprintf(page, "0.0.%04x\n", ipl_devno);
752 case ipl_type_fcp:
753 return sprintf(page, "0.0.%04x\n", ipl->fcp.devno);
754 default:
755 return 0;
756 }
757}
758
759static struct subsys_attribute ipl_device_attr =
760 __ATTR(device, S_IRUGO, ipl_device_show, NULL);
761
762static struct attribute *ipl_fcp_attrs[] = {
763 &ipl_type_attr.attr,
764 &ipl_device_attr.attr,
765 &ipl_wwpn_attr.attr,
766 &ipl_lun_attr.attr,
767 &ipl_bootprog_attr.attr,
768 &ipl_br_lba_attr.attr,
769 NULL,
770};
771
772static struct attribute_group ipl_fcp_attr_group = {
773 .attrs = ipl_fcp_attrs,
774};
775
776static struct attribute *ipl_ccw_attrs[] = {
777 &ipl_type_attr.attr,
778 &ipl_device_attr.attr,
779 NULL,
780};
781
782static struct attribute_group ipl_ccw_attr_group = {
783 .attrs = ipl_ccw_attrs,
784};
785
786static struct attribute *ipl_unknown_attrs[] = {
787 &ipl_type_attr.attr,
788 NULL,
789};
790
791static struct attribute_group ipl_unknown_attr_group = {
792 .attrs = ipl_unknown_attrs,
793};
794
795static ssize_t
796ipl_parameter_read(struct kobject *kobj, char *buf, loff_t off, size_t count)
797{
798 unsigned int size = IPL_PARMBLOCK_SIZE;
799
800 if (off > size)
801 return 0;
802 if (off + count > size)
803 count = size - off;
804
805 memcpy(buf, (void *) IPL_PARMBLOCK_START + off, count);
806 return count;
807}
808
809static struct bin_attribute ipl_parameter_attr = {
810 .attr = {
811 .name = "binary_parameter",
812 .mode = S_IRUGO,
813 .owner = THIS_MODULE,
814 },
815 .size = PAGE_SIZE,
816 .read = &ipl_parameter_read,
817};
818
819static ssize_t
820ipl_scp_data_read(struct kobject *kobj, char *buf, loff_t off, size_t count)
821{
822 unsigned int size = IPL_PARMBLOCK_START->fcp.scp_data_len;
823 void *scp_data = &IPL_PARMBLOCK_START->fcp.scp_data;
824
825 if (off > size)
826 return 0;
827 if (off + count > size)
828 count = size - off;
829
830 memcpy(buf, scp_data + off, count);
831 return count;
832}
833
834static struct bin_attribute ipl_scp_data_attr = {
835 .attr = {
836 .name = "scp_data",
837 .mode = S_IRUGO,
838 .owner = THIS_MODULE,
839 },
840 .size = PAGE_SIZE,
841 .read = &ipl_scp_data_read,
842};
843
844static decl_subsys(ipl, NULL, NULL);
845
846static int __init
847ipl_device_sysfs_register(void) {
848 int rc;
849
850 rc = firmware_register(&ipl_subsys);
851 if (rc)
852 return rc;
853
854 switch (get_ipl_type()) {
855 case ipl_type_ccw:
856 sysfs_create_group(&ipl_subsys.kset.kobj, &ipl_ccw_attr_group);
857 break;
858 case ipl_type_fcp:
859 sysfs_create_group(&ipl_subsys.kset.kobj, &ipl_fcp_attr_group);
860 sysfs_create_bin_file(&ipl_subsys.kset.kobj,
861 &ipl_parameter_attr);
862 sysfs_create_bin_file(&ipl_subsys.kset.kobj,
863 &ipl_scp_data_attr);
864 break;
865 default:
866 sysfs_create_group(&ipl_subsys.kset.kobj,
867 &ipl_unknown_attr_group);
868 break;
869 }
870 return 0;
871}
872
873__initcall(ipl_device_sysfs_register);
diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c
index 2fd75da15495..9a1d95894f3d 100644
--- a/arch/s390/kernel/time.c
+++ b/arch/s390/kernel/time.c
@@ -49,10 +49,6 @@
49 49
50#define TICK_SIZE tick 50#define TICK_SIZE tick
51 51
52u64 jiffies_64 = INITIAL_JIFFIES;
53
54EXPORT_SYMBOL(jiffies_64);
55
56static ext_int_info_t ext_int_info_cc; 52static ext_int_info_t ext_int_info_cc;
57static u64 init_timer_cc; 53static u64 init_timer_cc;
58static u64 jiffies_timer_cc; 54static u64 jiffies_timer_cc;
diff --git a/arch/s390/kernel/vtime.c b/arch/s390/kernel/vtime.c
index fa0726507b3d..22a895ecb7a4 100644
--- a/arch/s390/kernel/vtime.c
+++ b/arch/s390/kernel/vtime.c
@@ -24,7 +24,6 @@
24#include <asm/s390_ext.h> 24#include <asm/s390_ext.h>
25#include <asm/timer.h> 25#include <asm/timer.h>
26 26
27#define VTIMER_MAGIC (TIMER_MAGIC + 1)
28static ext_int_info_t ext_int_info_timer; 27static ext_int_info_t ext_int_info_timer;
29DEFINE_PER_CPU(struct vtimer_queue, virt_cpu_timer); 28DEFINE_PER_CPU(struct vtimer_queue, virt_cpu_timer);
30 29
@@ -277,20 +276,12 @@ static void do_cpu_timer_interrupt(struct pt_regs *regs, __u16 error_code)
277 276
278void init_virt_timer(struct vtimer_list *timer) 277void init_virt_timer(struct vtimer_list *timer)
279{ 278{
280 timer->magic = VTIMER_MAGIC;
281 timer->function = NULL; 279 timer->function = NULL;
282 INIT_LIST_HEAD(&timer->entry); 280 INIT_LIST_HEAD(&timer->entry);
283 spin_lock_init(&timer->lock); 281 spin_lock_init(&timer->lock);
284} 282}
285EXPORT_SYMBOL(init_virt_timer); 283EXPORT_SYMBOL(init_virt_timer);
286 284
287static inline int check_vtimer(struct vtimer_list *timer)
288{
289 if (timer->magic != VTIMER_MAGIC)
290 return -EINVAL;
291 return 0;
292}
293
294static inline int vtimer_pending(struct vtimer_list *timer) 285static inline int vtimer_pending(struct vtimer_list *timer)
295{ 286{
296 return (!list_empty(&timer->entry)); 287 return (!list_empty(&timer->entry));
@@ -346,7 +337,7 @@ static void internal_add_vtimer(struct vtimer_list *timer)
346 337
347static inline int prepare_vtimer(struct vtimer_list *timer) 338static inline int prepare_vtimer(struct vtimer_list *timer)
348{ 339{
349 if (check_vtimer(timer) || !timer->function) { 340 if (!timer->function) {
350 printk("add_virt_timer: uninitialized timer\n"); 341 printk("add_virt_timer: uninitialized timer\n");
351 return -EINVAL; 342 return -EINVAL;
352 } 343 }
@@ -414,7 +405,7 @@ int mod_virt_timer(struct vtimer_list *timer, __u64 expires)
414 unsigned long flags; 405 unsigned long flags;
415 int cpu; 406 int cpu;
416 407
417 if (check_vtimer(timer) || !timer->function) { 408 if (!timer->function) {
418 printk("mod_virt_timer: uninitialized timer\n"); 409 printk("mod_virt_timer: uninitialized timer\n");
419 return -EINVAL; 410 return -EINVAL;
420 } 411 }
@@ -481,11 +472,6 @@ int del_virt_timer(struct vtimer_list *timer)
481 unsigned long flags; 472 unsigned long flags;
482 struct vtimer_queue *vt_list; 473 struct vtimer_queue *vt_list;
483 474
484 if (check_vtimer(timer)) {
485 printk("del_virt_timer: timer not initialized\n");
486 return -EINVAL;
487 }
488
489 /* check if timer is pending */ 475 /* check if timer is pending */
490 if (!vtimer_pending(timer)) 476 if (!vtimer_pending(timer))
491 return 0; 477 return 0;
diff --git a/arch/sh/drivers/dma/dma-sysfs.c b/arch/sh/drivers/dma/dma-sysfs.c
index 71a6d4e7809f..6e3b58bd8795 100644
--- a/arch/sh/drivers/dma/dma-sysfs.c
+++ b/arch/sh/drivers/dma/dma-sysfs.c
@@ -13,6 +13,7 @@
13#include <linux/init.h> 13#include <linux/init.h>
14#include <linux/sysdev.h> 14#include <linux/sysdev.h>
15#include <linux/module.h> 15#include <linux/module.h>
16#include <linux/string.h>
16#include <asm/dma.h> 17#include <asm/dma.h>
17 18
18static struct sysdev_class dma_sysclass = { 19static struct sysdev_class dma_sysclass = {
diff --git a/arch/sh/kernel/cpufreq.c b/arch/sh/kernel/cpufreq.c
index e0b384bef55f..47abf6e49dfb 100644
--- a/arch/sh/kernel/cpufreq.c
+++ b/arch/sh/kernel/cpufreq.c
@@ -20,6 +20,7 @@
20#include <linux/delay.h> 20#include <linux/delay.h>
21#include <linux/cpumask.h> 21#include <linux/cpumask.h>
22#include <linux/smp.h> 22#include <linux/smp.h>
23#include <linux/sched.h> /* set_cpus_allowed() */
23 24
24#include <asm/processor.h> 25#include <asm/processor.h>
25#include <asm/watchdog.h> 26#include <asm/watchdog.h>
diff --git a/arch/sh/kernel/ptrace.c b/arch/sh/kernel/ptrace.c
index b28919b65682..1fbe5a428e31 100644
--- a/arch/sh/kernel/ptrace.c
+++ b/arch/sh/kernel/ptrace.c
@@ -80,7 +80,7 @@ void ptrace_disable(struct task_struct *child)
80 /* nothing to do.. */ 80 /* nothing to do.. */
81} 81}
82 82
83asmlinkage int sys_ptrace(long request, long pid, long addr, long data) 83asmlinkage long sys_ptrace(long request, long pid, long addr, long data)
84{ 84{
85 struct task_struct *child; 85 struct task_struct *child;
86 struct user * dummy = NULL; 86 struct user * dummy = NULL;
diff --git a/arch/sh/kernel/time.c b/arch/sh/kernel/time.c
index 02ca69918d7c..671b876416bf 100644
--- a/arch/sh/kernel/time.c
+++ b/arch/sh/kernel/time.c
@@ -56,10 +56,6 @@ extern unsigned long wall_jiffies;
56#define TICK_SIZE (tick_nsec / 1000) 56#define TICK_SIZE (tick_nsec / 1000)
57DEFINE_SPINLOCK(tmu0_lock); 57DEFINE_SPINLOCK(tmu0_lock);
58 58
59u64 jiffies_64 = INITIAL_JIFFIES;
60
61EXPORT_SYMBOL(jiffies_64);
62
63/* XXX: Can we initialize this in a routine somewhere? Dreamcast doesn't want 59/* XXX: Can we initialize this in a routine somewhere? Dreamcast doesn't want
64 * these routines anywhere... */ 60 * these routines anywhere... */
65#ifdef CONFIG_SH_RTC 61#ifdef CONFIG_SH_RTC
diff --git a/arch/sh64/kernel/ptrace.c b/arch/sh64/kernel/ptrace.c
index fd2000956dae..71f2eec00b99 100644
--- a/arch/sh64/kernel/ptrace.c
+++ b/arch/sh64/kernel/ptrace.c
@@ -121,7 +121,7 @@ put_fpu_long(struct task_struct *task, unsigned long addr, unsigned long data)
121 return 0; 121 return 0;
122} 122}
123 123
124asmlinkage int sys_ptrace(long request, long pid, long addr, long data) 124asmlinkage long sys_ptrace(long request, long pid, long addr, long data)
125{ 125{
126 struct task_struct *child; 126 struct task_struct *child;
127 extern void poke_real_address_q(unsigned long long addr, unsigned long long data); 127 extern void poke_real_address_q(unsigned long long addr, unsigned long long data);
diff --git a/arch/sh64/kernel/time.c b/arch/sh64/kernel/time.c
index 43e395a14f49..870fe5327e09 100644
--- a/arch/sh64/kernel/time.c
+++ b/arch/sh64/kernel/time.c
@@ -116,8 +116,6 @@
116 116
117extern unsigned long wall_jiffies; 117extern unsigned long wall_jiffies;
118 118
119u64 jiffies_64 = INITIAL_JIFFIES;
120
121static unsigned long tmu_base, rtc_base; 119static unsigned long tmu_base, rtc_base;
122unsigned long cprc_base; 120unsigned long cprc_base;
123 121
diff --git a/arch/sparc/kernel/pcic.c b/arch/sparc/kernel/pcic.c
index 36a40697b8d6..25e31d5ec99b 100644
--- a/arch/sparc/kernel/pcic.c
+++ b/arch/sparc/kernel/pcic.c
@@ -497,8 +497,8 @@ static void pcic_map_pci_device(struct linux_pcic *pcic,
497 * CheerIO makes a similar conversion. 497 * CheerIO makes a similar conversion.
498 * See ebus.c for details. 498 * See ebus.c for details.
499 * 499 *
500 * Note that check_region()/request_region() 500 * Note that request_region()
501 * work for these devices. 501 * works for these devices.
502 * 502 *
503 * XXX Neat trick, but it's a *bad* idea 503 * XXX Neat trick, but it's a *bad* idea
504 * to shit into regions like that. 504 * to shit into regions like that.
diff --git a/arch/sparc/kernel/time.c b/arch/sparc/kernel/time.c
index 279a62627c10..24814d58f9e1 100644
--- a/arch/sparc/kernel/time.c
+++ b/arch/sparc/kernel/time.c
@@ -45,10 +45,6 @@
45 45
46extern unsigned long wall_jiffies; 46extern unsigned long wall_jiffies;
47 47
48u64 jiffies_64 = INITIAL_JIFFIES;
49
50EXPORT_SYMBOL(jiffies_64);
51
52DEFINE_SPINLOCK(rtc_lock); 48DEFINE_SPINLOCK(rtc_lock);
53enum sparc_clock_type sp_clock_typ; 49enum sparc_clock_type sp_clock_typ;
54DEFINE_SPINLOCK(mostek_lock); 50DEFINE_SPINLOCK(mostek_lock);
diff --git a/arch/sparc64/kernel/ioctl32.c b/arch/sparc64/kernel/ioctl32.c
index 43fc3173d480..e6a00325075a 100644
--- a/arch/sparc64/kernel/ioctl32.c
+++ b/arch/sparc64/kernel/ioctl32.c
@@ -475,9 +475,6 @@ IOCTL_TABLE_START
475#include <linux/compat_ioctl.h> 475#include <linux/compat_ioctl.h>
476#define DECLARES 476#define DECLARES
477#include "compat_ioctl.c" 477#include "compat_ioctl.c"
478COMPATIBLE_IOCTL(TIOCSTART)
479COMPATIBLE_IOCTL(TIOCSTOP)
480COMPATIBLE_IOCTL(TIOCSLTC)
481COMPATIBLE_IOCTL(FBIOGTYPE) 478COMPATIBLE_IOCTL(FBIOGTYPE)
482COMPATIBLE_IOCTL(FBIOSATTR) 479COMPATIBLE_IOCTL(FBIOSATTR)
483COMPATIBLE_IOCTL(FBIOGATTR) 480COMPATIBLE_IOCTL(FBIOGATTR)
diff --git a/arch/sparc64/kernel/time.c b/arch/sparc64/kernel/time.c
index 3f08a32f51a1..38c5525087a2 100644
--- a/arch/sparc64/kernel/time.c
+++ b/arch/sparc64/kernel/time.c
@@ -55,10 +55,6 @@ unsigned long ds1287_regs = 0UL;
55 55
56extern unsigned long wall_jiffies; 56extern unsigned long wall_jiffies;
57 57
58u64 jiffies_64 = INITIAL_JIFFIES;
59
60EXPORT_SYMBOL(jiffies_64);
61
62static void __iomem *mstk48t08_regs; 58static void __iomem *mstk48t08_regs;
63static void __iomem *mstk48t59_regs; 59static void __iomem *mstk48t59_regs;
64 60
diff --git a/arch/um/Kconfig b/arch/um/Kconfig
index 684e1f8b2755..cd06ed7d842d 100644
--- a/arch/um/Kconfig
+++ b/arch/um/Kconfig
@@ -27,10 +27,6 @@ config UID16
27 bool 27 bool
28 default y 28 default y
29 29
30config RWSEM_GENERIC_SPINLOCK
31 bool
32 default y
33
34config GENERIC_CALIBRATE_DELAY 30config GENERIC_CALIBRATE_DELAY
35 bool 31 bool
36 default y 32 default y
@@ -40,6 +36,12 @@ config IRQ_RELEASE_METHOD
40 bool 36 bool
41 default y 37 default y
42 38
39menu "Host processor type and features"
40
41source "arch/i386/Kconfig.cpu"
42
43endmenu
44
43menu "UML-specific options" 45menu "UML-specific options"
44 46
45config MODE_TT 47config MODE_TT
diff --git a/arch/um/Kconfig.x86_64 b/arch/um/Kconfig.x86_64
index bd35e59419c8..aae19bc4b06a 100644
--- a/arch/um/Kconfig.x86_64
+++ b/arch/um/Kconfig.x86_64
@@ -6,6 +6,11 @@ config 64BIT
6 bool 6 bool
7 default y 7 default y
8 8
9#XXX: this is so in the underlying arch, but it's wrong!!!
10config RWSEM_GENERIC_SPINLOCK
11 bool
12 default y
13
9config SEMAPHORE_SLEEPERS 14config SEMAPHORE_SLEEPERS
10 bool 15 bool
11 default y 16 default y
diff --git a/arch/um/Makefile-i386 b/arch/um/Makefile-i386
index 2ee8a2858117..aef7c50f8e13 100644
--- a/arch/um/Makefile-i386
+++ b/arch/um/Makefile-i386
@@ -29,6 +29,12 @@ endif
29 29
30CFLAGS += -U__$(SUBARCH)__ -U$(SUBARCH) 30CFLAGS += -U__$(SUBARCH)__ -U$(SUBARCH)
31 31
32ifneq ($(CONFIG_GPROF),y) 32# First of all, tune CFLAGS for the specific CPU. This actually sets cflags-y.
33ARCH_CFLAGS += -DUM_FASTCALL 33include $(srctree)/arch/i386/Makefile.cpu
34endif 34
35# prevent gcc from keeping the stack 16 byte aligned. Taken from i386.
36cflags-y += $(call cc-option,-mpreferred-stack-boundary=2)
37
38CFLAGS += $(cflags-y)
39USER_CFLAGS += $(cflags-y)
40
diff --git a/arch/um/include/sysdep-i386/syscalls.h b/arch/um/include/sysdep-i386/syscalls.h
index a0d5b74d3731..57bd79efbee3 100644
--- a/arch/um/include/sysdep-i386/syscalls.h
+++ b/arch/um/include/sysdep-i386/syscalls.h
@@ -11,7 +11,6 @@ typedef long syscall_handler_t(struct pt_regs);
11/* Not declared on x86, incompatible declarations on x86_64, so these have 11/* Not declared on x86, incompatible declarations on x86_64, so these have
12 * to go here rather than in sys_call_table.c 12 * to go here rather than in sys_call_table.c
13 */ 13 */
14extern syscall_handler_t sys_ptrace;
15extern syscall_handler_t sys_rt_sigaction; 14extern syscall_handler_t sys_rt_sigaction;
16 15
17extern syscall_handler_t old_mmap_i386; 16extern syscall_handler_t old_mmap_i386;
diff --git a/arch/um/kernel/time_kern.c b/arch/um/kernel/time_kern.c
index 4e08f7545d63..020ca79b8d33 100644
--- a/arch/um/kernel/time_kern.c
+++ b/arch/um/kernel/time_kern.c
@@ -22,10 +22,6 @@
22#include "mode.h" 22#include "mode.h"
23#include "os.h" 23#include "os.h"
24 24
25u64 jiffies_64 = INITIAL_JIFFIES;
26
27EXPORT_SYMBOL(jiffies_64);
28
29int hz(void) 25int hz(void)
30{ 26{
31 return(HZ); 27 return(HZ);
diff --git a/arch/v850/kernel/ptrace.c b/arch/v850/kernel/ptrace.c
index 4726b87f5e5a..d6077ff47d22 100644
--- a/arch/v850/kernel/ptrace.c
+++ b/arch/v850/kernel/ptrace.c
@@ -113,7 +113,7 @@ static int set_single_step (struct task_struct *t, int val)
113 return 1; 113 return 1;
114} 114}
115 115
116int sys_ptrace(long request, long pid, long addr, long data) 116long sys_ptrace(long request, long pid, long addr, long data)
117{ 117{
118 struct task_struct *child; 118 struct task_struct *child;
119 int rval; 119 int rval;
diff --git a/arch/v850/kernel/time.c b/arch/v850/kernel/time.c
index ea3fd8844ff0..c1e85c2aef65 100644
--- a/arch/v850/kernel/time.c
+++ b/arch/v850/kernel/time.c
@@ -26,10 +26,6 @@
26 26
27#include "mach.h" 27#include "mach.h"
28 28
29u64 jiffies_64 = INITIAL_JIFFIES;
30
31EXPORT_SYMBOL(jiffies_64);
32
33#define TICK_SIZE (tick_nsec / 1000) 29#define TICK_SIZE (tick_nsec / 1000)
34 30
35/* 31/*
diff --git a/arch/x86_64/ia32/ia32_ioctl.c b/arch/x86_64/ia32/ia32_ioctl.c
index 419758f19ca4..4ba0e293d5e5 100644
--- a/arch/x86_64/ia32/ia32_ioctl.c
+++ b/arch/x86_64/ia32/ia32_ioctl.c
@@ -12,40 +12,11 @@
12#define INCLUDES 12#define INCLUDES
13#include <linux/syscalls.h> 13#include <linux/syscalls.h>
14#include "compat_ioctl.c" 14#include "compat_ioctl.c"
15#include <asm/mtrr.h>
16#include <asm/ia32.h> 15#include <asm/ia32.h>
17 16
18#define CODE 17#define CODE
19#include "compat_ioctl.c" 18#include "compat_ioctl.c"
20 19
21#ifndef TIOCGDEV
22#define TIOCGDEV _IOR('T',0x32, unsigned int)
23#endif
24static int tiocgdev(unsigned fd, unsigned cmd, unsigned int __user *ptr)
25{
26
27 struct file *file;
28 struct tty_struct *real_tty;
29 int fput_needed, ret;
30
31 file = fget_light(fd, &fput_needed);
32 if (!file)
33 return -EBADF;
34
35 ret = -EINVAL;
36 if (file->f_op->ioctl != tty_ioctl)
37 goto out;
38 real_tty = (struct tty_struct *)file->private_data;
39 if (!real_tty)
40 goto out;
41
42 ret = put_user(new_encode_dev(tty_devnum(real_tty)), ptr);
43
44out:
45 fput_light(file, fput_needed);
46 return ret;
47}
48
49#define RTC_IRQP_READ32 _IOR('p', 0x0b, unsigned int) /* Read IRQ rate */ 20#define RTC_IRQP_READ32 _IOR('p', 0x0b, unsigned int) /* Read IRQ rate */
50#define RTC_IRQP_SET32 _IOW('p', 0x0c, unsigned int) /* Set IRQ rate */ 21#define RTC_IRQP_SET32 _IOW('p', 0x0c, unsigned int) /* Set IRQ rate */
51#define RTC_EPOCH_READ32 _IOR('p', 0x0d, unsigned) /* Read epoch */ 22#define RTC_EPOCH_READ32 _IOR('p', 0x0d, unsigned) /* Read epoch */
@@ -85,90 +56,6 @@ static int rtc32_ioctl(unsigned fd, unsigned cmd, unsigned long arg)
85 return sys_ioctl(fd,cmd,arg); 56 return sys_ioctl(fd,cmd,arg);
86} 57}
87 58
88/* /proc/mtrr ioctls */
89
90
91struct mtrr_sentry32
92{
93 compat_ulong_t base; /* Base address */
94 compat_uint_t size; /* Size of region */
95 compat_uint_t type; /* Type of region */
96};
97
98struct mtrr_gentry32
99{
100 compat_ulong_t regnum; /* Register number */
101 compat_uint_t base; /* Base address */
102 compat_uint_t size; /* Size of region */
103 compat_uint_t type; /* Type of region */
104};
105
106#define MTRR_IOCTL_BASE 'M'
107
108#define MTRRIOC32_ADD_ENTRY _IOW(MTRR_IOCTL_BASE, 0, struct mtrr_sentry32)
109#define MTRRIOC32_SET_ENTRY _IOW(MTRR_IOCTL_BASE, 1, struct mtrr_sentry32)
110#define MTRRIOC32_DEL_ENTRY _IOW(MTRR_IOCTL_BASE, 2, struct mtrr_sentry32)
111#define MTRRIOC32_GET_ENTRY _IOWR(MTRR_IOCTL_BASE, 3, struct mtrr_gentry32)
112#define MTRRIOC32_KILL_ENTRY _IOW(MTRR_IOCTL_BASE, 4, struct mtrr_sentry32)
113#define MTRRIOC32_ADD_PAGE_ENTRY _IOW(MTRR_IOCTL_BASE, 5, struct mtrr_sentry32)
114#define MTRRIOC32_SET_PAGE_ENTRY _IOW(MTRR_IOCTL_BASE, 6, struct mtrr_sentry32)
115#define MTRRIOC32_DEL_PAGE_ENTRY _IOW(MTRR_IOCTL_BASE, 7, struct mtrr_sentry32)
116#define MTRRIOC32_GET_PAGE_ENTRY _IOWR(MTRR_IOCTL_BASE, 8, struct mtrr_gentry32)
117#define MTRRIOC32_KILL_PAGE_ENTRY _IOW(MTRR_IOCTL_BASE, 9, struct mtrr_sentry32)
118
119
120static int mtrr_ioctl32(unsigned int fd, unsigned int cmd, unsigned long arg)
121{
122 struct mtrr_gentry g;
123 struct mtrr_sentry s;
124 int get = 0, err = 0;
125 struct mtrr_gentry32 __user *g32 = (struct mtrr_gentry32 __user *)arg;
126 mm_segment_t oldfs = get_fs();
127
128 switch (cmd) {
129#define SET(x) case MTRRIOC32_ ## x ## _ENTRY: cmd = MTRRIOC_ ## x ## _ENTRY; break
130#define GET(x) case MTRRIOC32_ ## x ## _ENTRY: cmd = MTRRIOC_ ## x ## _ENTRY; get=1; break
131 SET(ADD);
132 SET(SET);
133 SET(DEL);
134 GET(GET);
135 SET(KILL);
136 SET(ADD_PAGE);
137 SET(SET_PAGE);
138 SET(DEL_PAGE);
139 GET(GET_PAGE);
140 SET(KILL_PAGE);
141 }
142
143 if (get) {
144 err = get_user(g.regnum, &g32->regnum);
145 err |= get_user(g.base, &g32->base);
146 err |= get_user(g.size, &g32->size);
147 err |= get_user(g.type, &g32->type);
148
149 arg = (unsigned long)&g;
150 } else {
151 struct mtrr_sentry32 __user *s32 = (struct mtrr_sentry32 __user *)arg;
152 err = get_user(s.base, &s32->base);
153 err |= get_user(s.size, &s32->size);
154 err |= get_user(s.type, &s32->type);
155
156 arg = (unsigned long)&s;
157 }
158 if (err) return err;
159
160 set_fs(KERNEL_DS);
161 err = sys_ioctl(fd, cmd, arg);
162 set_fs(oldfs);
163
164 if (!err && get) {
165 err = put_user(g.base, &g32->base);
166 err |= put_user(g.size, &g32->size);
167 err |= put_user(g.regnum, &g32->regnum);
168 err |= put_user(g.type, &g32->type);
169 }
170 return err;
171}
172 59
173#define HANDLE_IOCTL(cmd,handler) { (cmd), (ioctl_trans_handler_t)(handler) }, 60#define HANDLE_IOCTL(cmd,handler) { (cmd), (ioctl_trans_handler_t)(handler) },
174#define COMPATIBLE_IOCTL(cmd) HANDLE_IOCTL(cmd,sys_ioctl) 61#define COMPATIBLE_IOCTL(cmd) HANDLE_IOCTL(cmd,sys_ioctl)
@@ -185,7 +72,6 @@ COMPATIBLE_IOCTL(0x4B51) /* KDSHWCLK - not in the kernel, but don't complain *
185COMPATIBLE_IOCTL(FIOQSIZE) 72COMPATIBLE_IOCTL(FIOQSIZE)
186 73
187/* And these ioctls need translation */ 74/* And these ioctls need translation */
188HANDLE_IOCTL(TIOCGDEV, tiocgdev)
189/* realtime device */ 75/* realtime device */
190HANDLE_IOCTL(RTC_IRQP_READ, rtc32_ioctl) 76HANDLE_IOCTL(RTC_IRQP_READ, rtc32_ioctl)
191HANDLE_IOCTL(RTC_IRQP_READ32,rtc32_ioctl) 77HANDLE_IOCTL(RTC_IRQP_READ32,rtc32_ioctl)
@@ -193,17 +79,6 @@ HANDLE_IOCTL(RTC_IRQP_SET32, rtc32_ioctl)
193HANDLE_IOCTL(RTC_EPOCH_READ32, rtc32_ioctl) 79HANDLE_IOCTL(RTC_EPOCH_READ32, rtc32_ioctl)
194HANDLE_IOCTL(RTC_EPOCH_SET32, rtc32_ioctl) 80HANDLE_IOCTL(RTC_EPOCH_SET32, rtc32_ioctl)
195/* take care of sizeof(sizeof()) breakage */ 81/* take care of sizeof(sizeof()) breakage */
196/* mtrr */
197HANDLE_IOCTL(MTRRIOC32_ADD_ENTRY, mtrr_ioctl32)
198HANDLE_IOCTL(MTRRIOC32_SET_ENTRY, mtrr_ioctl32)
199HANDLE_IOCTL(MTRRIOC32_DEL_ENTRY, mtrr_ioctl32)
200HANDLE_IOCTL(MTRRIOC32_GET_ENTRY, mtrr_ioctl32)
201HANDLE_IOCTL(MTRRIOC32_KILL_ENTRY, mtrr_ioctl32)
202HANDLE_IOCTL(MTRRIOC32_ADD_PAGE_ENTRY, mtrr_ioctl32)
203HANDLE_IOCTL(MTRRIOC32_SET_PAGE_ENTRY, mtrr_ioctl32)
204HANDLE_IOCTL(MTRRIOC32_DEL_PAGE_ENTRY, mtrr_ioctl32)
205HANDLE_IOCTL(MTRRIOC32_GET_PAGE_ENTRY, mtrr_ioctl32)
206HANDLE_IOCTL(MTRRIOC32_KILL_PAGE_ENTRY, mtrr_ioctl32)
207}; 82};
208 83
209int ioctl_table_size = ARRAY_SIZE(ioctl_start); 84int ioctl_table_size = ARRAY_SIZE(ioctl_start);
diff --git a/arch/x86_64/kernel/i8259.c b/arch/x86_64/kernel/i8259.c
index b2a238b5a17e..c6c9791d77c1 100644
--- a/arch/x86_64/kernel/i8259.c
+++ b/arch/x86_64/kernel/i8259.c
@@ -494,7 +494,7 @@ void invalidate_interrupt7(void);
494void thermal_interrupt(void); 494void thermal_interrupt(void);
495void i8254_timer_resume(void); 495void i8254_timer_resume(void);
496 496
497static void setup_timer(void) 497static void setup_timer_hardware(void)
498{ 498{
499 outb_p(0x34,0x43); /* binary, mode 2, LSB/MSB, ch 0 */ 499 outb_p(0x34,0x43); /* binary, mode 2, LSB/MSB, ch 0 */
500 udelay(10); 500 udelay(10);
@@ -505,13 +505,13 @@ static void setup_timer(void)
505 505
506static int timer_resume(struct sys_device *dev) 506static int timer_resume(struct sys_device *dev)
507{ 507{
508 setup_timer(); 508 setup_timer_hardware();
509 return 0; 509 return 0;
510} 510}
511 511
512void i8254_timer_resume(void) 512void i8254_timer_resume(void)
513{ 513{
514 setup_timer(); 514 setup_timer_hardware();
515} 515}
516 516
517static struct sysdev_class timer_sysclass = { 517static struct sysdev_class timer_sysclass = {
@@ -594,7 +594,7 @@ void __init init_IRQ(void)
594 * Set the clock to HZ Hz, we already have a valid 594 * Set the clock to HZ Hz, we already have a valid
595 * vector now: 595 * vector now:
596 */ 596 */
597 setup_timer(); 597 setup_timer_hardware();
598 598
599 if (!acpi_ioapic) 599 if (!acpi_ioapic)
600 setup_irq(2, &irq2); 600 setup_irq(2, &irq2);
diff --git a/arch/x86_64/kernel/setup.c b/arch/x86_64/kernel/setup.c
index cb28df14ff6f..da0bc3e7bdf5 100644
--- a/arch/x86_64/kernel/setup.c
+++ b/arch/x86_64/kernel/setup.c
@@ -1213,7 +1213,7 @@ static int show_cpuinfo(struct seq_file *m, void *v)
1213 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, 1213 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1214 1214
1215 /* Intel-defined (#2) */ 1215 /* Intel-defined (#2) */
1216 "pni", NULL, NULL, "monitor", "ds_cpl", NULL, NULL, "est", 1216 "pni", NULL, NULL, "monitor", "ds_cpl", "vmx", NULL, "est",
1217 "tm2", NULL, "cid", NULL, NULL, "cx16", "xtpr", NULL, 1217 "tm2", NULL, "cid", NULL, NULL, "cx16", "xtpr", NULL,
1218 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, 1218 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1219 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, 1219 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
diff --git a/arch/x86_64/kernel/suspend.c b/arch/x86_64/kernel/suspend.c
index f066c6ab3618..fd2bef780882 100644
--- a/arch/x86_64/kernel/suspend.c
+++ b/arch/x86_64/kernel/suspend.c
@@ -63,13 +63,12 @@ void save_processor_state(void)
63 __save_processor_state(&saved_context); 63 __save_processor_state(&saved_context);
64} 64}
65 65
66static void 66static void do_fpu_end(void)
67do_fpu_end(void)
68{ 67{
69 /* restore FPU regs if necessary */ 68 /*
70 /* Do it out of line so that gcc does not move cr0 load to some stupid place */ 69 * Restore FPU regs if necessary
71 kernel_fpu_end(); 70 */
72 mxcsr_feature_mask_init(); 71 kernel_fpu_end();
73} 72}
74 73
75void __restore_processor_state(struct saved_context *ctxt) 74void __restore_processor_state(struct saved_context *ctxt)
@@ -148,57 +147,7 @@ extern int restore_image(void);
148 147
149pgd_t *temp_level4_pgt; 148pgd_t *temp_level4_pgt;
150 149
151static void **pages; 150static int res_phys_pud_init(pud_t *pud, unsigned long address, unsigned long end)
152
153static inline void *__add_page(void)
154{
155 void **c;
156
157 c = (void **)get_usable_page(GFP_ATOMIC);
158 if (c) {
159 *c = pages;
160 pages = c;
161 }
162 return c;
163}
164
165static inline void *__next_page(void)
166{
167 void **c;
168
169 c = pages;
170 if (c) {
171 pages = *c;
172 *c = NULL;
173 }
174 return c;
175}
176
177/*
178 * Try to allocate as many usable pages as needed and daisy chain them.
179 * If one allocation fails, free the pages allocated so far
180 */
181static int alloc_usable_pages(unsigned long n)
182{
183 void *p;
184
185 pages = NULL;
186 do
187 if (!__add_page())
188 break;
189 while (--n);
190 if (n) {
191 p = __next_page();
192 while (p) {
193 free_page((unsigned long)p);
194 p = __next_page();
195 }
196 return -ENOMEM;
197 }
198 return 0;
199}
200
201static void res_phys_pud_init(pud_t *pud, unsigned long address, unsigned long end)
202{ 151{
203 long i, j; 152 long i, j;
204 153
@@ -212,7 +161,9 @@ static void res_phys_pud_init(pud_t *pud, unsigned long address, unsigned long e
212 if (paddr >= end) 161 if (paddr >= end)
213 break; 162 break;
214 163
215 pmd = (pmd_t *)__next_page(); 164 pmd = (pmd_t *)get_safe_page(GFP_ATOMIC);
165 if (!pmd)
166 return -ENOMEM;
216 set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE)); 167 set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE));
217 for (j = 0; j < PTRS_PER_PMD; pmd++, j++, paddr += PMD_SIZE) { 168 for (j = 0; j < PTRS_PER_PMD; pmd++, j++, paddr += PMD_SIZE) {
218 unsigned long pe; 169 unsigned long pe;
@@ -224,13 +175,17 @@ static void res_phys_pud_init(pud_t *pud, unsigned long address, unsigned long e
224 set_pmd(pmd, __pmd(pe)); 175 set_pmd(pmd, __pmd(pe));
225 } 176 }
226 } 177 }
178 return 0;
227} 179}
228 180
229static void set_up_temporary_mappings(void) 181static int set_up_temporary_mappings(void)
230{ 182{
231 unsigned long start, end, next; 183 unsigned long start, end, next;
184 int error;
232 185
233 temp_level4_pgt = (pgd_t *)__next_page(); 186 temp_level4_pgt = (pgd_t *)get_safe_page(GFP_ATOMIC);
187 if (!temp_level4_pgt)
188 return -ENOMEM;
234 189
235 /* It is safe to reuse the original kernel mapping */ 190 /* It is safe to reuse the original kernel mapping */
236 set_pgd(temp_level4_pgt + pgd_index(__START_KERNEL_map), 191 set_pgd(temp_level4_pgt + pgd_index(__START_KERNEL_map),
@@ -241,29 +196,27 @@ static void set_up_temporary_mappings(void)
241 end = (unsigned long)pfn_to_kaddr(end_pfn); 196 end = (unsigned long)pfn_to_kaddr(end_pfn);
242 197
243 for (; start < end; start = next) { 198 for (; start < end; start = next) {
244 pud_t *pud = (pud_t *)__next_page(); 199 pud_t *pud = (pud_t *)get_safe_page(GFP_ATOMIC);
200 if (!pud)
201 return -ENOMEM;
245 next = start + PGDIR_SIZE; 202 next = start + PGDIR_SIZE;
246 if (next > end) 203 if (next > end)
247 next = end; 204 next = end;
248 res_phys_pud_init(pud, __pa(start), __pa(next)); 205 if ((error = res_phys_pud_init(pud, __pa(start), __pa(next))))
206 return error;
249 set_pgd(temp_level4_pgt + pgd_index(start), 207 set_pgd(temp_level4_pgt + pgd_index(start),
250 mk_kernel_pgd(__pa(pud))); 208 mk_kernel_pgd(__pa(pud)));
251 } 209 }
210 return 0;
252} 211}
253 212
254int swsusp_arch_resume(void) 213int swsusp_arch_resume(void)
255{ 214{
256 unsigned long n; 215 int error;
257 216
258 n = ((end_pfn << PAGE_SHIFT) + PUD_SIZE - 1) >> PUD_SHIFT;
259 n += (n + PTRS_PER_PUD - 1) / PTRS_PER_PUD + 1;
260 pr_debug("swsusp_arch_resume(): pages needed = %lu\n", n);
261 if (alloc_usable_pages(n)) {
262 free_eaten_memory();
263 return -ENOMEM;
264 }
265 /* We have got enough memory and from now on we cannot recover */ 217 /* We have got enough memory and from now on we cannot recover */
266 set_up_temporary_mappings(); 218 if ((error = set_up_temporary_mappings()))
219 return error;
267 restore_image(); 220 restore_image();
268 return 0; 221 return 0;
269} 222}
diff --git a/arch/x86_64/kernel/time.c b/arch/x86_64/kernel/time.c
index 703acde2a1a5..fdaddc4e5284 100644
--- a/arch/x86_64/kernel/time.c
+++ b/arch/x86_64/kernel/time.c
@@ -42,10 +42,6 @@
42#include <asm/apic.h> 42#include <asm/apic.h>
43#endif 43#endif
44 44
45u64 jiffies_64 = INITIAL_JIFFIES;
46
47EXPORT_SYMBOL(jiffies_64);
48
49#ifdef CONFIG_CPU_FREQ 45#ifdef CONFIG_CPU_FREQ
50static void cpufreq_delayed_get(void); 46static void cpufreq_delayed_get(void);
51#endif 47#endif
@@ -481,9 +477,9 @@ static irqreturn_t timer_interrupt(int irq, void *dev_id, struct pt_regs *regs)
481static unsigned int cyc2ns_scale; 477static unsigned int cyc2ns_scale;
482#define CYC2NS_SCALE_FACTOR 10 /* 2^10, carefully chosen */ 478#define CYC2NS_SCALE_FACTOR 10 /* 2^10, carefully chosen */
483 479
484static inline void set_cyc2ns_scale(unsigned long cpu_mhz) 480static inline void set_cyc2ns_scale(unsigned long cpu_khz)
485{ 481{
486 cyc2ns_scale = (1000 << CYC2NS_SCALE_FACTOR)/cpu_mhz; 482 cyc2ns_scale = (1000000 << CYC2NS_SCALE_FACTOR)/cpu_khz;
487} 483}
488 484
489static inline unsigned long long cycles_2_ns(unsigned long long cyc) 485static inline unsigned long long cycles_2_ns(unsigned long long cyc)
@@ -655,7 +651,7 @@ static int time_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
655 vxtime.tsc_quot = (1000L << 32) / cpu_khz; 651 vxtime.tsc_quot = (1000L << 32) / cpu_khz;
656 } 652 }
657 653
658 set_cyc2ns_scale(cpu_khz_ref / 1000); 654 set_cyc2ns_scale(cpu_khz_ref);
659 655
660 return 0; 656 return 0;
661} 657}
@@ -939,7 +935,7 @@ void __init time_init(void)
939 rdtscll_sync(&vxtime.last_tsc); 935 rdtscll_sync(&vxtime.last_tsc);
940 setup_irq(0, &irq0); 936 setup_irq(0, &irq0);
941 937
942 set_cyc2ns_scale(cpu_khz / 1000); 938 set_cyc2ns_scale(cpu_khz);
943 939
944#ifndef CONFIG_SMP 940#ifndef CONFIG_SMP
945 time_init_gtod(); 941 time_init_gtod();
@@ -1093,6 +1089,7 @@ static unsigned long PIE_freq = DEFAULT_RTC_INT_FREQ;
1093static unsigned long PIE_count; 1089static unsigned long PIE_count;
1094 1090
1095static unsigned long hpet_rtc_int_freq; /* RTC interrupt frequency */ 1091static unsigned long hpet_rtc_int_freq; /* RTC interrupt frequency */
1092static unsigned int hpet_t1_cmp; /* cached comparator register */
1096 1093
1097int is_hpet_enabled(void) 1094int is_hpet_enabled(void)
1098{ 1095{
@@ -1129,10 +1126,12 @@ int hpet_rtc_timer_init(void)
1129 cnt = hpet_readl(HPET_COUNTER); 1126 cnt = hpet_readl(HPET_COUNTER);
1130 cnt += ((hpet_tick*HZ)/hpet_rtc_int_freq); 1127 cnt += ((hpet_tick*HZ)/hpet_rtc_int_freq);
1131 hpet_writel(cnt, HPET_T1_CMP); 1128 hpet_writel(cnt, HPET_T1_CMP);
1129 hpet_t1_cmp = cnt;
1132 local_irq_restore(flags); 1130 local_irq_restore(flags);
1133 1131
1134 cfg = hpet_readl(HPET_T1_CFG); 1132 cfg = hpet_readl(HPET_T1_CFG);
1135 cfg |= HPET_TN_ENABLE | HPET_TN_SETVAL | HPET_TN_32BIT; 1133 cfg &= ~HPET_TN_PERIODIC;
1134 cfg |= HPET_TN_ENABLE | HPET_TN_32BIT;
1136 hpet_writel(cfg, HPET_T1_CFG); 1135 hpet_writel(cfg, HPET_T1_CFG);
1137 1136
1138 return 1; 1137 return 1;
@@ -1142,8 +1141,12 @@ static void hpet_rtc_timer_reinit(void)
1142{ 1141{
1143 unsigned int cfg, cnt; 1142 unsigned int cfg, cnt;
1144 1143
1145 if (!(PIE_on | AIE_on | UIE_on)) 1144 if (unlikely(!(PIE_on | AIE_on | UIE_on))) {
1145 cfg = hpet_readl(HPET_T1_CFG);
1146 cfg &= ~HPET_TN_ENABLE;
1147 hpet_writel(cfg, HPET_T1_CFG);
1146 return; 1148 return;
1149 }
1147 1150
1148 if (PIE_on && (PIE_freq > DEFAULT_RTC_INT_FREQ)) 1151 if (PIE_on && (PIE_freq > DEFAULT_RTC_INT_FREQ))
1149 hpet_rtc_int_freq = PIE_freq; 1152 hpet_rtc_int_freq = PIE_freq;
@@ -1151,15 +1154,10 @@ static void hpet_rtc_timer_reinit(void)
1151 hpet_rtc_int_freq = DEFAULT_RTC_INT_FREQ; 1154 hpet_rtc_int_freq = DEFAULT_RTC_INT_FREQ;
1152 1155
1153 /* It is more accurate to use the comparator value than current count.*/ 1156 /* It is more accurate to use the comparator value than current count.*/
1154 cnt = hpet_readl(HPET_T1_CMP); 1157 cnt = hpet_t1_cmp;
1155 cnt += hpet_tick*HZ/hpet_rtc_int_freq; 1158 cnt += hpet_tick*HZ/hpet_rtc_int_freq;
1156 hpet_writel(cnt, HPET_T1_CMP); 1159 hpet_writel(cnt, HPET_T1_CMP);
1157 1160 hpet_t1_cmp = cnt;
1158 cfg = hpet_readl(HPET_T1_CFG);
1159 cfg |= HPET_TN_ENABLE | HPET_TN_SETVAL | HPET_TN_32BIT;
1160 hpet_writel(cfg, HPET_T1_CFG);
1161
1162 return;
1163} 1161}
1164 1162
1165/* 1163/*
diff --git a/arch/xtensa/kernel/platform.c b/arch/xtensa/kernel/platform.c
index 03674daabc66..a17930747f20 100644
--- a/arch/xtensa/kernel/platform.c
+++ b/arch/xtensa/kernel/platform.c
@@ -18,6 +18,7 @@
18#include <linux/time.h> 18#include <linux/time.h>
19#include <asm/platform.h> 19#include <asm/platform.h>
20#include <asm/timex.h> 20#include <asm/timex.h>
21#include <asm/param.h> /* HZ */
21 22
22#define _F(r,f,a,b) \ 23#define _F(r,f,a,b) \
23 r __platform_##f a b; \ 24 r __platform_##f a b; \
diff --git a/arch/xtensa/kernel/ptrace.c b/arch/xtensa/kernel/ptrace.c
index 2659efdd4e99..14460743de07 100644
--- a/arch/xtensa/kernel/ptrace.c
+++ b/arch/xtensa/kernel/ptrace.c
@@ -45,7 +45,7 @@ void ptrace_disable(struct task_struct *child)
45 /* Nothing to do.. */ 45 /* Nothing to do.. */
46} 46}
47 47
48int sys_ptrace(long request, long pid, long addr, long data) 48long sys_ptrace(long request, long pid, long addr, long data)
49{ 49{
50 struct task_struct *child; 50 struct task_struct *child;
51 int ret = -EPERM; 51 int ret = -EPERM;
diff --git a/arch/xtensa/kernel/time.c b/arch/xtensa/kernel/time.c
index 8e423d1335ce..cb6e38ed2b1d 100644
--- a/arch/xtensa/kernel/time.c
+++ b/arch/xtensa/kernel/time.c
@@ -29,9 +29,6 @@
29 29
30extern volatile unsigned long wall_jiffies; 30extern volatile unsigned long wall_jiffies;
31 31
32u64 jiffies_64 = INITIAL_JIFFIES;
33EXPORT_SYMBOL(jiffies_64);
34
35spinlock_t rtc_lock = SPIN_LOCK_UNLOCKED; 32spinlock_t rtc_lock = SPIN_LOCK_UNLOCKED;
36EXPORT_SYMBOL(rtc_lock); 33EXPORT_SYMBOL(rtc_lock);
37 34